Updated to Clang 3.5a.

Change-Id: I8127eb568f674c2e72635b639a3295381fe8af82
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index 3e429be..178ec84 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -103,8 +103,8 @@
     : Diags(diags), DiagClient(client), CapturedDiags(capturedDiags),
       HasBegunSourceFile(false) { }
 
-  virtual void BeginSourceFile(const LangOptions &Opts,
-                               const Preprocessor *PP) {
+  void BeginSourceFile(const LangOptions &Opts,
+                       const Preprocessor *PP) override {
     // Pass BeginSourceFile message onto DiagClient on first call.
     // The corresponding EndSourceFile call will be made from an
     // explicit call to FinishCapture.
@@ -128,8 +128,8 @@
     assert(!HasBegunSourceFile && "FinishCapture not called!");
   }
 
-  virtual void HandleDiagnostic(DiagnosticsEngine::Level level,
-                                const Diagnostic &Info) {
+  void HandleDiagnostic(DiagnosticsEngine::Level level,
+                        const Diagnostic &Info) override {
     if (DiagnosticIDs::isARCDiagnostic(Info.getID()) ||
         level >= DiagnosticsEngine::Error || level == DiagnosticsEngine::Note) {
       if (Info.getLocation().isValid())
@@ -167,7 +167,7 @@
 
 static CompilerInvocation *
 createInvocationForMigration(CompilerInvocation &origCI) {
-  OwningPtr<CompilerInvocation> CInvok;
+  std::unique_ptr<CompilerInvocation> CInvok;
   CInvok.reset(new CompilerInvocation(origCI));
   PreprocessorOptions &PPOpts = CInvok->getPreprocessorOpts();
   if (!PPOpts.ImplicitPCHInclude.empty()) {
@@ -204,11 +204,11 @@
       WarnOpts.push_back(*I);
   }
   WarnOpts.push_back("error=arc-unsafe-retained-assign");
-  CInvok->getDiagnosticOpts().Warnings = llvm_move(WarnOpts);
+  CInvok->getDiagnosticOpts().Warnings = std::move(WarnOpts);
 
   CInvok->getLangOpts()->ObjCARCWeak = HasARCRuntime(origCI);
 
-  return CInvok.take();
+  return CInvok.release();
 }
 
 static void emitPremigrationErrors(const CapturedDiagList &arcDiags,
@@ -246,7 +246,7 @@
                                                                      NoFinalizeRemoval);
   assert(!transforms.empty());
 
-  OwningPtr<CompilerInvocation> CInvok;
+  std::unique_ptr<CompilerInvocation> CInvok;
   CInvok.reset(createInvocationForMigration(origCI));
   CInvok->getFrontendOpts().Inputs.clear();
   CInvok->getFrontendOpts().Inputs.push_back(Input);
@@ -263,8 +263,8 @@
   CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags);
   Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
 
-  OwningPtr<ASTUnit> Unit(
-      ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags));
+  std::unique_ptr<ASTUnit> Unit(
+      ASTUnit::LoadFromCompilerInvocationAction(CInvok.release(), Diags));
   if (!Unit) {
     errRec.FinishCapture();
     return true;
@@ -310,8 +310,11 @@
   TransformActions testAct(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
   MigrationPass pass(Ctx, OrigGCMode, Unit->getSema(), testAct, capturedDiags,
                      ARCMTMacroLocs);
-  pass.setNSAllocReallocError(NoNSAllocReallocError);
   pass.setNoFinalizeRemoval(NoFinalizeRemoval);
+  Diags->setDiagnosticMapping(diag::err_arcmt_nsalloc_realloc,
+                              NoNSAllocReallocError ? diag::MAP_WARNING
+                                                    : diag::MAP_ERROR,
+                              SourceLocation());
 
   for (unsigned i=0, e = transforms.size(); i != e; ++i)
     transforms[i](pass);
@@ -416,44 +419,6 @@
   return false;
 }
 
-bool arcmt::getFileRemappingsFromFileList(
-                        std::vector<std::pair<std::string,std::string> > &remap,
-                        ArrayRef<StringRef> remapFiles,
-                        DiagnosticConsumer *DiagClient) {
-  bool hasErrorOccurred = false;
-  llvm::StringMap<bool> Uniquer;
-
-  IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
-  IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
-      new DiagnosticsEngine(DiagID, new DiagnosticOptions,
-                            DiagClient, /*ShouldOwnClient=*/false));
-
-  for (ArrayRef<StringRef>::iterator
-         I = remapFiles.begin(), E = remapFiles.end(); I != E; ++I) {
-    StringRef file = *I;
-
-    FileRemapper remapper;
-    bool err = remapper.initFromFile(file, *Diags,
-                                     /*ignoreIfFilesChanged=*/true);
-    hasErrorOccurred = hasErrorOccurred || err;
-    if (err)
-      continue;
-
-    PreprocessorOptions PPOpts;
-    remapper.applyMappings(PPOpts);
-    for (PreprocessorOptions::remapped_file_iterator
-           RI = PPOpts.remapped_file_begin(), RE = PPOpts.remapped_file_end();
-           RI != RE; ++RI) {
-      bool &inserted = Uniquer[RI->first];
-      if (inserted)
-        continue;
-      inserted = true;
-      remap.push_back(*RI);
-    }
-  }
-
-  return hasErrorOccurred;
-}
 
 //===----------------------------------------------------------------------===//
 // CollectTransformActions.
@@ -468,8 +433,8 @@
   ARCMTMacroTrackerPPCallbacks(std::vector<SourceLocation> &ARCMTMacroLocs)
     : ARCMTMacroLocs(ARCMTMacroLocs) { }
 
-  virtual void MacroExpands(const Token &MacroNameTok, const MacroDirective *MD,
-                            SourceRange Range, const MacroArgs *Args) {
+  void MacroExpands(const Token &MacroNameTok, const MacroDirective *MD,
+                    SourceRange Range, const MacroArgs *Args) override {
     if (MacroNameTok.getIdentifierInfo()->getName() == getARCMTMacroName())
       ARCMTMacroLocs.push_back(MacroNameTok.getLocation());
   }
@@ -482,8 +447,8 @@
   ARCMTMacroTrackerAction(std::vector<SourceLocation> &ARCMTMacroLocs)
     : ARCMTMacroLocs(ARCMTMacroLocs) { }
 
-  virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
-                                         StringRef InFile) {
+  ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+                                 StringRef InFile) override {
     CI.getPreprocessor().addPPCallbacks(
                               new ARCMTMacroTrackerPPCallbacks(ARCMTMacroLocs));
     return new ASTConsumer();
@@ -506,14 +471,14 @@
       Listener->finish();
   }
 
-  virtual void insert(SourceLocation loc, StringRef text) {
+  void insert(SourceLocation loc, StringRef text) override {
     bool err = rewriter.InsertText(loc, text, /*InsertAfter=*/true,
                                    /*indentNewLines=*/true);
     if (!err && Listener)
       Listener->insert(loc, text);
   }
 
-  virtual void remove(CharSourceRange range) {
+  void remove(CharSourceRange range) override {
     Rewriter::RewriteOptions removeOpts;
     removeOpts.IncludeInsertsAtBeginOfRange = false;
     removeOpts.IncludeInsertsAtEndOfRange = false;
@@ -524,8 +489,8 @@
       Listener->remove(range);
   }
 
-  virtual void increaseIndentation(CharSourceRange range,
-                                    SourceLocation parentIndent) {
+  void increaseIndentation(CharSourceRange range,
+                            SourceLocation parentIndent) override {
     rewriter.IncreaseIndentation(range, parentIndent);
   }
 };
@@ -550,7 +515,7 @@
 
 bool MigrationProcess::applyTransform(TransformFn trans,
                                       RewriteListener *listener) {
-  OwningPtr<CompilerInvocation> CInvok;
+  std::unique_ptr<CompilerInvocation> CInvok;
   CInvok.reset(createInvocationForMigration(OrigCI));
   CInvok->getDiagnosticOpts().IgnoreWarnings = true;
 
@@ -569,12 +534,11 @@
   CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags);
   Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
 
-  OwningPtr<ARCMTMacroTrackerAction> ASTAction;
+  std::unique_ptr<ARCMTMacroTrackerAction> ASTAction;
   ASTAction.reset(new ARCMTMacroTrackerAction(ARCMTMacroLocs));
 
-  OwningPtr<ASTUnit> Unit(
-      ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags,
-                                                ASTAction.get()));
+  std::unique_ptr<ASTUnit> Unit(ASTUnit::LoadFromCompilerInvocationAction(
+      CInvok.release(), Diags, ASTAction.get()));
   if (!Unit) {
     errRec.FinishCapture();
     return true;
diff --git a/lib/ARCMigrate/Android.mk b/lib/ARCMigrate/Android.mk
index 6abddd8..834f573 100644
--- a/lib/ARCMigrate/Android.mk
+++ b/lib/ARCMigrate/Android.mk
@@ -9,6 +9,7 @@
   Attrs.inc \
   AttrList.inc \
   AttrParsedAttrList.inc    \
+  AttrVisitor.inc \
   CommentCommandList.inc \
   CommentNodes.inc \
   DeclNodes.inc \
diff --git a/lib/ARCMigrate/CMakeLists.txt b/lib/ARCMigrate/CMakeLists.txt
index c552612..e5ec607 100644
--- a/lib/ARCMigrate/CMakeLists.txt
+++ b/lib/ARCMigrate/CMakeLists.txt
@@ -1,3 +1,7 @@
+set(LLVM_LINK_COMPONENTS
+  Support
+  )
+
 add_clang_library(clangARCMigrate
   ARCMT.cpp
   ARCMTActions.cpp
@@ -19,26 +23,16 @@
   TransZeroOutPropsInDealloc.cpp
   TransformActions.cpp
   Transforms.cpp
-  )
 
-add_dependencies(clangARCMigrate
-  ClangAttrClasses
-  ClangAttrList
-  ClangAttrParsedAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticGroups
-  ClangDiagnosticSema
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangARCMigrate
-  clangBasic
+  LINK_LIBS
   clangAST
-  clangParse
+  clangAnalysis
+  clangBasic
+  clangEdit
   clangFrontend
+  clangLex
   clangRewriteCore
-  clangRewriteFrontend
+  clangSema
+  clangSerialization
   clangStaticAnalyzerCheckers
   )
diff --git a/lib/ARCMigrate/FileRemapper.cpp b/lib/ARCMigrate/FileRemapper.cpp
index a14226e..e1cebc7 100644
--- a/lib/ARCMigrate/FileRemapper.cpp
+++ b/lib/ARCMigrate/FileRemapper.cpp
@@ -36,8 +36,7 @@
   assert(ToFromMappings.empty());
   if (!outputDir.empty()) {
     std::string infoFile = getRemapInfoFile(outputDir);
-    bool existed;
-    llvm::sys::fs::remove(infoFile, existed);
+    llvm::sys::fs::remove(infoFile);
   }
 }
 
@@ -65,8 +64,8 @@
     return false;
 
   std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
-  
-  OwningPtr<llvm::MemoryBuffer> fileBuf;
+
+  std::unique_ptr<llvm::MemoryBuffer> fileBuf;
   if (llvm::MemoryBuffer::getFile(infoFile.c_str(), fileBuf))
     return report("Error opening file: " + infoFile, Diag);
   
@@ -112,8 +111,7 @@
 bool FileRemapper::flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag) {
   using namespace llvm::sys;
 
-  bool existed;
-  if (fs::create_directory(outputDir, existed) != llvm::errc::success)
+  if (fs::create_directory(outputDir) != llvm::errc::success)
     return report("Could not create directory: " + outputDir, Diag);
 
   std::string infoFile = getRemapInfoFile(outputDir);
@@ -125,8 +123,7 @@
 
   std::string errMsg;
   std::string infoFile = outputPath;
-  llvm::raw_fd_ostream infoOut(infoFile.c_str(), errMsg,
-                               llvm::sys::fs::F_Binary);
+  llvm::raw_fd_ostream infoOut(infoFile.c_str(), errMsg, llvm::sys::fs::F_None);
   if (!errMsg.empty())
     return report(errMsg, Diag);
 
@@ -182,8 +179,7 @@
                     Diag);
 
     std::string errMsg;
-    llvm::raw_fd_ostream Out(origFE->getName(), errMsg,
-                             llvm::sys::fs::F_Binary);
+    llvm::raw_fd_ostream Out(origFE->getName(), errMsg, llvm::sys::fs::F_None);
     if (!errMsg.empty())
       return report(errMsg, Diag);
 
@@ -272,9 +268,7 @@
 }
 
 bool FileRemapper::report(const Twine &err, DiagnosticsEngine &Diag) {
-  SmallString<128> buf;
-  unsigned ID = Diag.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Error,
-                                                         err.toStringRef(buf));
-  Diag.Report(ID);
+  Diag.Report(Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
+      << err.str();
   return true;
 }
diff --git a/lib/ARCMigrate/Internals.h b/lib/ARCMigrate/Internals.h
index 3690c83..d87d1ec 100644
--- a/lib/ARCMigrate/Internals.h
+++ b/lib/ARCMigrate/Internals.h
@@ -95,6 +95,8 @@
     return CapturedDiags.hasDiagnostic(IDs, range);
   }
 
+  DiagnosticBuilder report(SourceLocation loc, unsigned diagId,
+                           SourceRange range = SourceRange());
   void reportError(StringRef error, SourceLocation loc,
                    SourceRange range = SourceRange());
   void reportWarning(StringRef warning, SourceLocation loc,
@@ -161,8 +163,6 @@
   const CapturedDiagList &getDiags() const { return CapturedDiags; }
 
   bool isGCMigration() const { return OrigGCMode != LangOptions::NonGC; }
-  bool noNSAllocReallocError() const { return MigOptions.NoNSAllocReallocError; }
-  void setNSAllocReallocError(bool val) { MigOptions.NoNSAllocReallocError = val; }
   bool noFinalizeRemoval() const { return MigOptions.NoFinalizeRemoval; }
   void setNoFinalizeRemoval(bool val) {MigOptions.NoFinalizeRemoval = val; }
 
diff --git a/lib/ARCMigrate/ObjCMT.cpp b/lib/ARCMigrate/ObjCMT.cpp
index cac0fb0..3ae6724 100644
--- a/lib/ARCMigrate/ObjCMT.cpp
+++ b/lib/ARCMigrate/ObjCMT.cpp
@@ -8,12 +8,15 @@
 //===----------------------------------------------------------------------===//
 
 #include "Transforms.h"
+#include "clang/ARCMigrate/ARCMT.h"
 #include "clang/ARCMigrate/ARCMTActions.h"
 #include "clang/AST/ASTConsumer.h"
 #include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
 #include "clang/AST/NSAPI.h"
 #include "clang/AST/ParentMap.h"
 #include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
 #include "clang/Basic/FileManager.h"
 #include "clang/Edit/Commit.h"
 #include "clang/Edit/EditedSource.h"
@@ -24,11 +27,11 @@
 #include "clang/Lex/PPConditionalDirectiveRecord.h"
 #include "clang/Lex/Preprocessor.h"
 #include "clang/Rewrite/Core/Rewriter.h"
-#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
 #include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
-#include "clang/AST/Attr.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/Support/Path.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/YAMLParser.h"
 
 using namespace clang;
 using namespace arcmt;
@@ -45,7 +48,6 @@
   
   void migrateDecl(Decl *D);
   void migrateObjCInterfaceDecl(ASTContext &Ctx, ObjCContainerDecl *D);
-  void migrateDeprecatedAnnotation(ASTContext &Ctx, ObjCCategoryDecl *CatDecl);
   void migrateProtocolConformance(ASTContext &Ctx,
                                   const ObjCImplementationDecl *ImpDecl);
   void CacheObjCNSIntegerTypedefed(const TypedefDecl *TypedefDcl);
@@ -76,14 +78,18 @@
   
   void migrateAddMethodAnnotation(ASTContext &Ctx,
                                   const ObjCMethodDecl *MethodDecl);
+
+  void inferDesignatedInitializers(ASTContext &Ctx,
+                                   const ObjCImplementationDecl *ImplD);
+
 public:
   std::string MigrateDir;
   unsigned ASTMigrateActions;
   FileID FileId;
   const TypedefDecl *NSIntegerTypedefed;
   const TypedefDecl *NSUIntegerTypedefed;
-  OwningPtr<NSAPI> NSAPIObj;
-  OwningPtr<edit::EditedSource> Editor;
+  std::unique_ptr<NSAPI> NSAPIObj;
+  std::unique_ptr<edit::EditedSource> Editor;
   FileRemapper &Remapper;
   FileManager &FileMgr;
   const PPConditionalDirectiveRecord *PPRec;
@@ -114,26 +120,26 @@
   }
 
 protected:
-  virtual void Initialize(ASTContext &Context) {
+  void Initialize(ASTContext &Context) override {
     NSAPIObj.reset(new NSAPI(Context));
     Editor.reset(new edit::EditedSource(Context.getSourceManager(),
                                         Context.getLangOpts(),
                                         PPRec, false));
   }
 
-  virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
+  bool HandleTopLevelDecl(DeclGroupRef DG) override {
     for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
       migrateDecl(*I);
     return true;
   }
-  virtual void HandleInterestingDecl(DeclGroupRef DG) {
+  void HandleInterestingDecl(DeclGroupRef DG) override {
     // Ignore decls from the PCH.
   }
-  virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
+  void HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) override {
     ObjCMigrateASTConsumer::HandleTopLevelDecl(DG);
   }
 
-  virtual void HandleTranslationUnit(ASTContext &Ctx);
+  void HandleTranslationUnit(ASTContext &Ctx) override;
 
   bool canModifyFile(StringRef Path) {
     if (WhiteListFilenames.empty())
@@ -141,6 +147,30 @@
     return WhiteListFilenames.find(llvm::sys::path::filename(Path))
         != WhiteListFilenames.end();
   }
+  bool canModifyFile(const FileEntry *FE) {
+    if (!FE)
+      return false;
+    return canModifyFile(FE->getName());
+  }
+  bool canModifyFile(FileID FID) {
+    if (FID.isInvalid())
+      return false;
+    return canModifyFile(PP.getSourceManager().getFileEntryForID(FID));
+  }
+
+  bool canModify(const Decl *D) {
+    if (!D)
+      return false;
+    if (const ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(D))
+      return canModify(CatImpl->getCategoryDecl());
+    if (const ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D))
+      return canModify(Impl->getClassInterface());
+    if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+      return canModify(cast<Decl>(MD->getDeclContext()));
+
+    FileID FID = PP.getSourceManager().getFileID(D->getLocation());
+    return canModifyFile(FID);
+  }
 };
 
 }
@@ -223,7 +253,7 @@
 
 class BodyMigrator : public RecursiveASTVisitor<BodyMigrator> {
   ObjCMigrateASTConsumer &Consumer;
-  OwningPtr<ParentMap> PMap;
+  std::unique_ptr<ParentMap> PMap;
 
 public:
   BodyMigrator(ObjCMigrateASTConsumer &consumer) : Consumer(consumer) { }
@@ -294,7 +324,9 @@
 static const char *PropertyMemoryAttribute(ASTContext &Context, QualType ArgType) {
   Qualifiers::ObjCLifetime propertyLifetime = ArgType.getObjCLifetime();
   bool RetainableObject = ArgType->isObjCRetainableType();
-  if (RetainableObject && propertyLifetime == Qualifiers::OCL_Strong) {
+  if (RetainableObject &&
+      (propertyLifetime == Qualifiers::OCL_Strong
+       || propertyLifetime == Qualifiers::OCL_None)) {
     if (const ObjCObjectPointerType *ObjPtrTy =
         ArgType->getAs<ObjCObjectPointerType>()) {
       ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface();
@@ -302,7 +334,7 @@
           IDecl->lookupNestedProtocol(&Context.Idents.get("NSCopying")))
         return "copy";
       else
-        return "retain";
+        return "strong";
     }
     else if (ArgType->isBlockPointerType())
       return "copy";
@@ -311,7 +343,7 @@
     // looking into setter's implementation for backing weak ivar.
     return "weak";
   else if (RetainableObject)
-    return ArgType->isBlockPointerType() ? "copy" : "retain";
+    return ArgType->isBlockPointerType() ? "copy" : "strong";
   return 0;
 }
 
@@ -344,23 +376,23 @@
     PropertyString += PropertyNameString;
   }
   // Property with no setter may be suggested as a 'readonly' property.
-  if (!Setter) {
+  if (!Setter)
     append_attr(PropertyString, "readonly", LParenAdded);
-    QualType ResType = Context.getCanonicalType(Getter->getResultType());
-    if (const char *MemoryManagementAttr = PropertyMemoryAttribute(Context, ResType))
-      append_attr(PropertyString, MemoryManagementAttr, LParenAdded);
-  }
+  
   
   // Short circuit 'delegate' properties that contain the name "delegate" or
   // "dataSource", or have exact name "target" to have 'assign' attribute.
   if (PropertyName.equals("target") ||
       (PropertyName.find("delegate") != StringRef::npos) ||
       (PropertyName.find("dataSource") != StringRef::npos)) {
-    QualType QT = Getter->getResultType();
+    QualType QT = Getter->getReturnType();
     if (!QT->isRealType())
       append_attr(PropertyString, "assign", LParenAdded);
-  }
-  else if (Setter) {
+  } else if (!Setter) {
+    QualType ResType = Context.getCanonicalType(Getter->getReturnType());
+    if (const char *MemoryManagementAttr = PropertyMemoryAttribute(Context, ResType))
+      append_attr(PropertyString, MemoryManagementAttr, LParenAdded);
+  } else {
     const ParmVarDecl *argDecl = *Setter->param_begin();
     QualType ArgType = Context.getCanonicalType(argDecl->getType());
     if (const char *MemoryManagementAttr = PropertyMemoryAttribute(Context, ArgType))
@@ -368,7 +400,7 @@
   }
   if (LParenAdded)
     PropertyString += ')';
-  QualType RT = Getter->getResultType();
+  QualType RT = Getter->getReturnType();
   if (!isa<TypedefType>(RT)) {
     // strip off any ARC lifetime qualifier.
     QualType CanResultTy = Context.getCanonicalType(RT);
@@ -419,21 +451,27 @@
     // Get location past ';'
     EndLoc = EndLoc.getLocWithOffset(1);
     SourceLocation BeginOfSetterDclLoc = Setter->getLocStart();
-    // FIXME. This assumes that setter decl; is immediately preceeded by eoln.
+    // FIXME. This assumes that setter decl; is immediately preceded by eoln.
     // It is trying to remove the setter method decl. line entirely.
     BeginOfSetterDclLoc = BeginOfSetterDclLoc.getLocWithOffset(-1);
     commit.remove(SourceRange(BeginOfSetterDclLoc, EndLoc));
   }
 }
 
+static bool IsCategoryNameWithDeprecatedSuffix(ObjCContainerDecl *D) {
+  if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(D)) {
+    StringRef Name = CatDecl->getName();
+    return Name.endswith("Deprecated");
+  }
+  return false;
+}
+
 void ObjCMigrateASTConsumer::migrateObjCInterfaceDecl(ASTContext &Ctx,
                                                       ObjCContainerDecl *D) {
-  if (D->isDeprecated())
+  if (D->isDeprecated() || IsCategoryNameWithDeprecatedSuffix(D))
     return;
-  
-  for (ObjCContainerDecl::method_iterator M = D->meth_begin(), MEnd = D->meth_end();
-       M != MEnd; ++M) {
-    ObjCMethodDecl *Method = (*M);
+    
+  for (auto *Method : D->methods()) {
     if (Method->isDeprecated())
       continue;
     bool PropertyInferred = migrateProperty(Ctx, D, Method);
@@ -448,48 +486,13 @@
   if (!(ASTMigrateActions & FrontendOptions::ObjCMT_ReturnsInnerPointerProperty))
     return;
   
-  for (ObjCContainerDecl::prop_iterator P = D->prop_begin(),
-       E = D->prop_end(); P != E; ++P) {
-    ObjCPropertyDecl *Prop = *P;
+  for (auto *Prop : D->properties()) {
     if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) &&
         !Prop->isDeprecated())
       migratePropertyNsReturnsInnerPointer(Ctx, Prop);
   }
 }
 
-void ObjCMigrateASTConsumer::migrateDeprecatedAnnotation(ASTContext &Ctx,
-                                                           ObjCCategoryDecl *CatDecl) {
-  StringRef Name = CatDecl->getName();
-  if (!Name.endswith("Deprecated"))
-    return;
-  
-  if (!Ctx.Idents.get("DEPRECATED").hasMacroDefinition())
-    return;
-  
-  ObjCContainerDecl *D = cast<ObjCContainerDecl>(CatDecl);
-  
-  for (ObjCContainerDecl::method_iterator M = D->meth_begin(), MEnd = D->meth_end();
-       M != MEnd; ++M) {
-    ObjCMethodDecl *Method = (*M);
-    if (Method->isDeprecated() || Method->isImplicit())
-      continue;
-    // Annotate with DEPRECATED
-    edit::Commit commit(*Editor);
-    commit.insertBefore(Method->getLocEnd(), " DEPRECATED");
-    Editor->commit(commit);
-  }
-  for (ObjCContainerDecl::prop_iterator P = D->prop_begin(),
-       E = D->prop_end(); P != E; ++P) {
-    ObjCPropertyDecl *Prop = *P;
-    if (Prop->isDeprecated())
-      continue;
-    // Annotate with DEPRECATED
-    edit::Commit commit(*Editor);
-    commit.insertAfterToken(Prop->getLocEnd(), " DEPRECATED");
-    Editor->commit(commit);
-  }
-}
-
 static bool
 ClassImplementsAllMethodsAndProperties(ASTContext &Ctx,
                                       const ObjCImplementationDecl *ImpDecl,
@@ -500,9 +503,7 @@
   // in class interface.
   bool HasAtleastOneRequiredProperty = false;
   if (const ObjCProtocolDecl *PDecl = Protocol->getDefinition())
-    for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
-         E = PDecl->prop_end(); P != E; ++P) {
-      ObjCPropertyDecl *Property = *P;
+    for (const auto *Property : PDecl->properties()) {
       if (Property->getPropertyImplementation() == ObjCPropertyDecl::Optional)
         continue;
       HasAtleastOneRequiredProperty = true;
@@ -532,9 +533,7 @@
   if (const ObjCProtocolDecl *PDecl = Protocol->getDefinition()) {
     if (PDecl->meth_begin() == PDecl->meth_end())
       return HasAtleastOneRequiredProperty;
-    for (ObjCContainerDecl::method_iterator M = PDecl->meth_begin(),
-         MEnd = PDecl->meth_end(); M != MEnd; ++M) {
-      ObjCMethodDecl *MD = (*M);
+    for (const auto *MD : PDecl->methods()) {
       if (MD->isImplicit())
         continue;
       if (MD->getImplementationControl() == ObjCMethodDecl::Optional)
@@ -632,7 +631,7 @@
                                                  /*IsDecl*/true);
   if (!EndOfEnumDclLoc.isInvalid()) {
     SourceLocation BeginOfEnumDclLoc = EnumDcl->getLocStart();
-    // FIXME. This assumes that enum decl; is immediately preceeded by eoln.
+    // FIXME. This assumes that enum decl; is immediately preceded by eoln.
     // It is trying to remove the enum decl. lines entirely.
     BeginOfEnumDclLoc = BeginOfEnumDclLoc.getLocWithOffset(-1);
     commit.remove(SourceRange(BeginOfEnumDclLoc, EndOfEnumDclLoc));
@@ -660,9 +659,7 @@
   bool PowerOfTwo = true;
   bool AllHexdecimalEnumerator = true;
   uint64_t MaxPowerOfTwoVal = 0;
-  for (EnumDecl::enumerator_iterator EI = EnumDcl->enumerator_begin(),
-       EE = EnumDcl->enumerator_end(); EI != EE; ++EI) {
-    EnumConstantDecl *Enumerator = (*EI);
+  for (auto Enumerator : EnumDcl->enumerators()) {
     const Expr *InitExpr = Enumerator->getInitExpr();
     if (!InitExpr) {
       PowerOfTwo = false;
@@ -749,6 +746,8 @@
     if (!DropIt)
       MinimalConformingProtocols.push_back(TargetPDecl);
   }
+  if (MinimalConformingProtocols.empty())
+    return;
   edit::Commit commit(*Editor);
   rewriteToObjCInterfaceDecl(IDecl, MinimalConformingProtocols,
                              *NSAPIObj, commit);
@@ -835,7 +834,7 @@
                                     ObjCMethodDecl *OM) {
   SourceRange R;
   std::string ClassString;
-  if (TypeSourceInfo *TSInfo =  OM->getResultTypeSourceInfo()) {
+  if (TypeSourceInfo *TSInfo = OM->getReturnTypeSourceInfo()) {
     TypeLoc TL = TSInfo->getTypeLoc();
     R = SourceRange(TL.getBeginLoc(), TL.getEndLoc());
     ClassString = "instancetype";
@@ -855,7 +854,7 @@
   ObjCInterfaceDecl *IDecl = OM->getClassInterface();
   SourceRange R;
   std::string ClassString;
-  if (TypeSourceInfo *TSInfo =  OM->getResultTypeSourceInfo()) {
+  if (TypeSourceInfo *TSInfo = OM->getReturnTypeSourceInfo()) {
     TypeLoc TL = TSInfo->getTypeLoc();
     R = SourceRange(TL.getBeginLoc(), TL.getEndLoc()); {
       ClassString  = IDecl->getName();
@@ -893,14 +892,14 @@
       migrateFactoryMethod(Ctx, CDecl, OM, OIT_Singleton);
       return;
     case OIT_Init:
-      if (OM->getResultType()->isObjCIdType())
+      if (OM->getReturnType()->isObjCIdType())
         ReplaceWithInstancetype(*this, OM);
       return;
     case OIT_ReturnsSelf:
       migrateFactoryMethod(Ctx, CDecl, OM, OIT_ReturnsSelf);
       return;
   }
-  if (!OM->getResultType()->isObjCIdType())
+  if (!OM->getReturnType()->isObjCIdType())
     return;
   
   ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl);
@@ -1034,7 +1033,7 @@
       Method->param_size() != 0)
     return false;
   // Is this method candidate to be a getter?
-  QualType GRT = Method->getResultType();
+  QualType GRT = Method->getReturnType();
   if (GRT->isVoidType())
     return false;
   
@@ -1087,7 +1086,7 @@
       return false;
     
     // Is this a valid setter, matching the target getter?
-    QualType SRT = SetterMethod->getResultType();
+    QualType SRT = SetterMethod->getReturnType();
     if (!SRT->isVoidType())
       return false;
     const ParmVarDecl *argDecl = *SetterMethod->param_begin();
@@ -1128,8 +1127,8 @@
       !OM->isInstanceMethod() ||
       OM->hasAttr<ObjCReturnsInnerPointerAttr>())
     return;
-  
-  QualType RT = OM->getResultType();
+
+  QualType RT = OM->getReturnType();
   if (!TypeIsInnerPointer(RT) ||
       !Ctx.Idents.get("NS_RETURNS_INNER_POINTER").hasMacroDefinition())
     return;
@@ -1153,14 +1152,11 @@
 
 void ObjCMigrateASTConsumer::migrateAllMethodInstaceType(ASTContext &Ctx,
                                                  ObjCContainerDecl *CDecl) {
-  if (CDecl->isDeprecated())
+  if (CDecl->isDeprecated() || IsCategoryNameWithDeprecatedSuffix(CDecl))
     return;
   
   // migrate methods which can have instancetype as their result type.
-  for (ObjCContainerDecl::method_iterator M = CDecl->meth_begin(),
-       MEnd = CDecl->meth_end();
-       M != MEnd; ++M) {
-    ObjCMethodDecl *Method = (*M);
+  for (auto *Method : CDecl->methods()) {
     if (Method->isDeprecated())
       continue;
     migrateMethodInstanceType(Ctx, CDecl, Method);
@@ -1172,8 +1168,8 @@
                                                   ObjCMethodDecl *OM,
                                                   ObjCInstanceTypeFamily OIT_Family) {
   if (OM->isInstanceMethod() ||
-      OM->getResultType() == Ctx.getObjCInstanceType() ||
-      !OM->getResultType()->isObjCIdType())
+      OM->getReturnType() == Ctx.getObjCInstanceType() ||
+      !OM->getReturnType()->isObjCIdType())
     return;
   
   // Candidate factory methods are + (id) NaMeXXX : ... which belong to a class
@@ -1365,13 +1361,13 @@
        pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) {
     const ParmVarDecl *pd = *pi;
     ArgEffect AE = AEArgs[i];
-    if (AE == DecRef && !pd->getAttr<CFConsumedAttr>() &&
+    if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() &&
         Ctx.Idents.get("CF_CONSUMED").hasMacroDefinition()) {
       edit::Commit commit(*Editor);
       commit.insertBefore(pd->getLocation(), "CF_CONSUMED ");
       Editor->commit(commit);
     }
-    else if (AE == DecRefMsg && !pd->getAttr<NSConsumedAttr>() &&
+    else if (AE == DecRefMsg && !pd->hasAttr<NSConsumedAttr>() &&
              Ctx.Idents.get("NS_CONSUMED").hasMacroDefinition()) {
       edit::Commit commit(*Editor);
       commit.insertBefore(pd->getLocation(), "NS_CONSUMED ");
@@ -1389,11 +1385,11 @@
     return CF_BRIDGING_NONE;
     
   CallEffects CE  = CallEffects::getEffect(FuncDecl);
-  bool FuncIsReturnAnnotated = (FuncDecl->getAttr<CFReturnsRetainedAttr>() ||
-                                FuncDecl->getAttr<CFReturnsNotRetainedAttr>() ||
-                                FuncDecl->getAttr<NSReturnsRetainedAttr>() ||
-                                FuncDecl->getAttr<NSReturnsNotRetainedAttr>() ||
-                                FuncDecl->getAttr<NSReturnsAutoreleasedAttr>());
+  bool FuncIsReturnAnnotated = (FuncDecl->hasAttr<CFReturnsRetainedAttr>() ||
+                                FuncDecl->hasAttr<CFReturnsNotRetainedAttr>() ||
+                                FuncDecl->hasAttr<NSReturnsRetainedAttr>() ||
+                                FuncDecl->hasAttr<NSReturnsNotRetainedAttr>() ||
+                                FuncDecl->hasAttr<NSReturnsAutoreleasedAttr>());
   
   // Trivial case of when funciton is annotated and has no argument.
   if (FuncIsReturnAnnotated && FuncDecl->getNumParams() == 0)
@@ -1405,7 +1401,7 @@
     if (Ret.getObjKind() == RetEffect::CF &&
         (Ret.isOwned() || Ret.notOwned()))
       ReturnCFAudited = true;
-    else if (!AuditedType(FuncDecl->getResultType()))
+    else if (!AuditedType(FuncDecl->getReturnType()))
       return CF_BRIDGING_NONE;
   }
   
@@ -1419,7 +1415,7 @@
     const ParmVarDecl *pd = *pi;
     ArgEffect AE = AEArgs[i];
     if (AE == DecRef /*CFConsumed annotated*/ || AE == IncRef) {
-      if (AE == DecRef && !pd->getAttr<CFConsumedAttr>())
+      if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>())
         ArgCFAudited = true;
       else if (AE == IncRef)
         ArgCFAudited = true;
@@ -1444,12 +1440,8 @@
     return;
   
   // migrate methods which can have instancetype as their result type.
-  for (ObjCContainerDecl::method_iterator M = CDecl->meth_begin(),
-       MEnd = CDecl->meth_end();
-       M != MEnd; ++M) {
-    ObjCMethodDecl *Method = (*M);
+  for (const auto *Method : CDecl->methods())
     migrateCFAnnotation(Ctx, Method);
-  }
 }
 
 void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
@@ -1498,7 +1490,7 @@
        pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) {
     const ParmVarDecl *pd = *pi;
     ArgEffect AE = AEArgs[i];
-    if (AE == DecRef && !pd->getAttr<CFConsumedAttr>() &&
+    if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() &&
         Ctx.Idents.get("CF_CONSUMED").hasMacroDefinition()) {
       edit::Commit commit(*Editor);
       commit.insertBefore(pd->getLocation(), "CF_CONSUMED ");
@@ -1514,14 +1506,14 @@
     return;
   
   CallEffects CE  = CallEffects::getEffect(MethodDecl);
-  bool MethodIsReturnAnnotated = (MethodDecl->getAttr<CFReturnsRetainedAttr>() ||
-                                  MethodDecl->getAttr<CFReturnsNotRetainedAttr>() ||
-                                  MethodDecl->getAttr<NSReturnsRetainedAttr>() ||
-                                  MethodDecl->getAttr<NSReturnsNotRetainedAttr>() ||
-                                  MethodDecl->getAttr<NSReturnsAutoreleasedAttr>());
+  bool MethodIsReturnAnnotated = (MethodDecl->hasAttr<CFReturnsRetainedAttr>() ||
+                                  MethodDecl->hasAttr<CFReturnsNotRetainedAttr>() ||
+                                  MethodDecl->hasAttr<NSReturnsRetainedAttr>() ||
+                                  MethodDecl->hasAttr<NSReturnsNotRetainedAttr>() ||
+                                  MethodDecl->hasAttr<NSReturnsAutoreleasedAttr>());
   
   if (CE.getReceiver() ==  DecRefMsg &&
-      !MethodDecl->getAttr<NSConsumesSelfAttr>() &&
+      !MethodDecl->hasAttr<NSConsumesSelfAttr>() &&
       MethodDecl->getMethodFamily() != OMF_init &&
       MethodDecl->getMethodFamily() != OMF_release &&
       Ctx.Idents.get("NS_CONSUMES_SELF").hasMacroDefinition()) {
@@ -1542,8 +1534,7 @@
         (Ret.isOwned() || Ret.notOwned())) {
       AddCFAnnotations(Ctx, CE, MethodDecl, false);
       return;
-    }
-    else if (!AuditedType(MethodDecl->getResultType()))
+    } else if (!AuditedType(MethodDecl->getReturnType()))
       return;
   }
   
@@ -1555,7 +1546,7 @@
        pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) {
     const ParmVarDecl *pd = *pi;
     ArgEffect AE = AEArgs[i];
-    if ((AE == DecRef && !pd->getAttr<CFConsumedAttr>()) || AE == IncRef ||
+    if ((AE == DecRef && !pd->hasAttr<CFConsumedAttr>()) || AE == IncRef ||
         !AuditedType(pd->getType())) {
       AddCFAnnotations(Ctx, CE, MethodDecl, MethodIsReturnAnnotated);
       return;
@@ -1565,6 +1556,53 @@
 }
 
 namespace {
+class SuperInitChecker : public RecursiveASTVisitor<SuperInitChecker> {
+public:
+  bool shouldVisitTemplateInstantiations() const { return false; }
+  bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+  bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+    if (E->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
+      if (E->getMethodFamily() == OMF_init)
+        return false;
+    }
+    return true;
+  }
+};
+} // anonymous namespace
+
+static bool hasSuperInitCall(const ObjCMethodDecl *MD) {
+  return !SuperInitChecker().TraverseStmt(MD->getBody());
+}
+
+void ObjCMigrateASTConsumer::inferDesignatedInitializers(
+    ASTContext &Ctx,
+    const ObjCImplementationDecl *ImplD) {
+
+  const ObjCInterfaceDecl *IFace = ImplD->getClassInterface();
+  if (!IFace || IFace->hasDesignatedInitializers())
+    return;
+  if (!Ctx.Idents.get("NS_DESIGNATED_INITIALIZER").hasMacroDefinition())
+    return;
+
+  for (const auto *MD : ImplD->instance_methods()) {
+    if (MD->isDeprecated() ||
+        MD->getMethodFamily() != OMF_init ||
+        MD->isDesignatedInitializerForTheInterface())
+      continue;
+    const ObjCMethodDecl *IFaceM = IFace->getMethod(MD->getSelector(),
+                                                    /*isInstance=*/true);
+    if (!IFaceM)
+      continue;
+    if (hasSuperInitCall(MD)) {
+      edit::Commit commit(*Editor);
+      commit.insert(IFaceM->getLocEnd(), " NS_DESIGNATED_INITIALIZER");
+      Editor->commit(commit);
+    }
+  }
+}
+
+namespace {
 
 class RewritesReceiver : public edit::EditsReceiver {
   Rewriter &Rewrite;
@@ -1572,14 +1610,92 @@
 public:
   RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
 
-  virtual void insert(SourceLocation loc, StringRef text) {
+  void insert(SourceLocation loc, StringRef text) override {
     Rewrite.InsertText(loc, text);
   }
-  virtual void replace(CharSourceRange range, StringRef text) {
+  void replace(CharSourceRange range, StringRef text) override {
     Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
   }
 };
 
+class JSONEditWriter : public edit::EditsReceiver {
+  SourceManager &SourceMgr;
+  llvm::raw_ostream &OS;
+
+public:
+  JSONEditWriter(SourceManager &SM, llvm::raw_ostream &OS)
+    : SourceMgr(SM), OS(OS) {
+    OS << "[\n";
+  }
+  ~JSONEditWriter() {
+    OS << "]\n";
+  }
+
+private:
+  struct EntryWriter {
+    SourceManager &SourceMgr;
+    llvm::raw_ostream &OS;
+
+    EntryWriter(SourceManager &SM, llvm::raw_ostream &OS)
+      : SourceMgr(SM), OS(OS) {
+      OS << " {\n";
+    }
+    ~EntryWriter() {
+      OS << " },\n";
+    }
+
+    void writeLoc(SourceLocation Loc) {
+      FileID FID;
+      unsigned Offset;
+      std::tie(FID, Offset) = SourceMgr.getDecomposedLoc(Loc);
+      assert(!FID.isInvalid());
+      SmallString<200> Path =
+          StringRef(SourceMgr.getFileEntryForID(FID)->getName());
+      llvm::sys::fs::make_absolute(Path);
+      OS << "  \"file\": \"";
+      OS.write_escaped(Path.str()) << "\",\n";
+      OS << "  \"offset\": " << Offset << ",\n";
+    }
+
+    void writeRemove(CharSourceRange Range) {
+      assert(Range.isCharRange());
+      std::pair<FileID, unsigned> Begin =
+          SourceMgr.getDecomposedLoc(Range.getBegin());
+      std::pair<FileID, unsigned> End =
+          SourceMgr.getDecomposedLoc(Range.getEnd());
+      assert(Begin.first == End.first);
+      assert(Begin.second <= End.second);
+      unsigned Length = End.second - Begin.second;
+
+      OS << "  \"remove\": " << Length << ",\n";
+    }
+
+    void writeText(StringRef Text) {
+      OS << "  \"text\": \"";
+      OS.write_escaped(Text) << "\",\n";
+    }
+  };
+ 
+  void insert(SourceLocation Loc, StringRef Text) override {
+    EntryWriter Writer(SourceMgr, OS);
+    Writer.writeLoc(Loc);
+    Writer.writeText(Text);
+  }
+
+  void replace(CharSourceRange Range, StringRef Text) override {
+    EntryWriter Writer(SourceMgr, OS);
+    Writer.writeLoc(Range.getBegin());
+    Writer.writeRemove(Range);
+    Writer.writeText(Text);
+  }
+
+  void remove(CharSourceRange Range) override {
+    EntryWriter Writer(SourceMgr, OS);
+    Writer.writeLoc(Range.getBegin());
+    Writer.writeRemove(Range);
+  }
+};
+
 }
 
 static bool
@@ -1593,9 +1709,9 @@
       if (FI.getFileCharacteristic() == SrcMgr::C_ExternCSystem)
         return true;
       if (FI.getFileCharacteristic() == SrcMgr::C_System) {
-        // This file is in a system header directory. Continue with commiting change
-        // only if it is a user specified system directory because user put a
-        // .system_framework file in the framework directory.
+        // This file is in a system header directory. Continue committing
+        // change only if it's a user-specified system directory because user
+        // put a .system_framework file in the framework directory.
         StringRef Directory(file->getDir()->getName());
         size_t Ix = Directory.rfind(".framework");
         if (Ix == StringRef::npos)
@@ -1624,22 +1740,25 @@
         }
       
       if (ObjCInterfaceDecl *CDecl = dyn_cast<ObjCInterfaceDecl>(*D))
-        migrateObjCInterfaceDecl(Ctx, CDecl);
+        if (canModify(CDecl))
+          migrateObjCInterfaceDecl(Ctx, CDecl);
       if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(*D)) {
-        migrateObjCInterfaceDecl(Ctx, CatDecl);
-        if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation)
-          migrateDeprecatedAnnotation(Ctx, CatDecl);
+        if (canModify(CatDecl))
+          migrateObjCInterfaceDecl(Ctx, CatDecl);
       }
       else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(*D))
-        ObjCProtocolDecls.insert(PDecl);
+        ObjCProtocolDecls.insert(PDecl->getCanonicalDecl());
       else if (const ObjCImplementationDecl *ImpDecl =
                dyn_cast<ObjCImplementationDecl>(*D)) {
-        if (ASTMigrateActions & FrontendOptions::ObjCMT_ProtocolConformance)
+        if ((ASTMigrateActions & FrontendOptions::ObjCMT_ProtocolConformance) &&
+            canModify(ImpDecl))
           migrateProtocolConformance(Ctx, ImpDecl);
       }
       else if (const EnumDecl *ED = dyn_cast<EnumDecl>(*D)) {
         if (!(ASTMigrateActions & FrontendOptions::ObjCMT_NsMacros))
           continue;
+        if (!canModify(ED))
+          continue;
         DeclContext::decl_iterator N = D;
         if (++N != DEnd) {
           const TypedefDecl *TD = dyn_cast<TypedefDecl>(*N);
@@ -1652,6 +1771,8 @@
       else if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(*D)) {
         if (!(ASTMigrateActions & FrontendOptions::ObjCMT_NsMacros))
           continue;
+        if (!canModify(TD))
+          continue;
         DeclContext::decl_iterator N = D;
         if (++N == DEnd)
           continue;
@@ -1673,23 +1794,49 @@
         CacheObjCNSIntegerTypedefed(TD);
       }
       else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*D)) {
-        if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation)
+        if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) &&
+            canModify(FD))
           migrateCFAnnotation(Ctx, FD);
       }
       
       if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(*D)) {
+        bool CanModify = canModify(CDecl);
         // migrate methods which can have instancetype as their result type.
-        if (ASTMigrateActions & FrontendOptions::ObjCMT_Instancetype)
+        if ((ASTMigrateActions & FrontendOptions::ObjCMT_Instancetype) &&
+            CanModify)
           migrateAllMethodInstaceType(Ctx, CDecl);
         // annotate methods with CF annotations.
-        if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation)
+        if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) &&
+            CanModify)
           migrateARCSafeAnnotation(Ctx, CDecl);
       }
+
+      if (const ObjCImplementationDecl *
+            ImplD = dyn_cast<ObjCImplementationDecl>(*D)) {
+        if ((ASTMigrateActions & FrontendOptions::ObjCMT_DesignatedInitializer) &&
+            canModify(ImplD))
+          inferDesignatedInitializers(Ctx, ImplD);
+      }
     }
     if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation)
       AnnotateImplicitBridging(Ctx);
   }
   
+ if (IsOutputFile) {
+   std::string Error;
+   llvm::raw_fd_ostream OS(MigrateDir.c_str(), Error, llvm::sys::fs::F_None);
+    if (!Error.empty()) {
+      DiagnosticsEngine &Diags = Ctx.getDiagnostics();
+      Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
+          << Error;
+      return;
+    }
+
+   JSONEditWriter Writer(Ctx.getSourceManager(), OS);
+   Editor->applyRewrites(Writer);
+   return;
+ }
+
   Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts());
   RewritesReceiver Rec(rewriter);
   Editor->applyRewrites(Rec);
@@ -1702,8 +1849,6 @@
     assert(file);
     if (IsReallyASystemHeader(Ctx, file, FID))
       continue;
-    if (!canModifyFile(file->getName()))
-      continue;
     SmallString<512> newText;
     llvm::raw_svector_ostream vecOS(newText);
     buf.write(vecOS);
@@ -1773,3 +1918,245 @@
                                     /*isOutputFile=*/true,
                                     WhiteList);
 }
+
+namespace {
+struct EditEntry {
+  const FileEntry *File;
+  unsigned Offset;
+  unsigned RemoveLen;
+  std::string Text;
+
+  EditEntry() : File(), Offset(), RemoveLen() {}
+};
+}
+
+namespace llvm {
+template<> struct DenseMapInfo<EditEntry> {
+  static inline EditEntry getEmptyKey() {
+    EditEntry Entry;
+    Entry.Offset = unsigned(-1);
+    return Entry;
+  }
+  static inline EditEntry getTombstoneKey() {
+    EditEntry Entry;
+    Entry.Offset = unsigned(-2);
+    return Entry;
+  }
+  static unsigned getHashValue(const EditEntry& Val) {
+    llvm::FoldingSetNodeID ID;
+    ID.AddPointer(Val.File);
+    ID.AddInteger(Val.Offset);
+    ID.AddInteger(Val.RemoveLen);
+    ID.AddString(Val.Text);
+    return ID.ComputeHash();
+  }
+  static bool isEqual(const EditEntry &LHS, const EditEntry &RHS) {
+    return LHS.File == RHS.File &&
+        LHS.Offset == RHS.Offset &&
+        LHS.RemoveLen == RHS.RemoveLen &&
+        LHS.Text == RHS.Text;
+  }
+};
+}
+
+namespace {
+class RemapFileParser {
+  FileManager &FileMgr;
+
+public:
+  RemapFileParser(FileManager &FileMgr) : FileMgr(FileMgr) { }
+
+  bool parse(StringRef File, SmallVectorImpl<EditEntry> &Entries) {
+    using namespace llvm::yaml;
+
+    std::unique_ptr<llvm::MemoryBuffer> FileBuf;
+    if (llvm::MemoryBuffer::getFile(File, FileBuf))
+      return true;
+
+    llvm::SourceMgr SM;
+    Stream YAMLStream(FileBuf.release(), SM);
+    document_iterator I = YAMLStream.begin();
+    if (I == YAMLStream.end())
+      return true;
+    Node *Root = I->getRoot();
+    if (!Root)
+      return true;
+
+    SequenceNode *SeqNode = dyn_cast<SequenceNode>(Root);
+    if (!SeqNode)
+      return true;
+
+    for (SequenceNode::iterator
+           AI = SeqNode->begin(), AE = SeqNode->end(); AI != AE; ++AI) {
+      MappingNode *MapNode = dyn_cast<MappingNode>(&*AI);
+      if (!MapNode)
+        continue;
+      parseEdit(MapNode, Entries);
+    }
+
+    return false;
+  }
+
+private:
+  void parseEdit(llvm::yaml::MappingNode *Node,
+                 SmallVectorImpl<EditEntry> &Entries) {
+    using namespace llvm::yaml;
+    EditEntry Entry;
+    bool Ignore = false;
+
+    for (MappingNode::iterator
+           KVI = Node->begin(), KVE = Node->end(); KVI != KVE; ++KVI) {
+      ScalarNode *KeyString = dyn_cast<ScalarNode>((*KVI).getKey());
+      if (!KeyString)
+        continue;
+      SmallString<10> KeyStorage;
+      StringRef Key = KeyString->getValue(KeyStorage);
+
+      ScalarNode *ValueString = dyn_cast<ScalarNode>((*KVI).getValue());
+      if (!ValueString)
+        continue;
+      SmallString<64> ValueStorage;
+      StringRef Val = ValueString->getValue(ValueStorage);
+
+      if (Key == "file") {
+        const FileEntry *FE = FileMgr.getFile(Val);
+        if (!FE)
+          Ignore = true;
+        Entry.File = FE;
+      } else if (Key == "offset") {
+        if (Val.getAsInteger(10, Entry.Offset))
+          Ignore = true;
+      } else if (Key == "remove") {
+        if (Val.getAsInteger(10, Entry.RemoveLen))
+          Ignore = true;
+      } else if (Key == "text") {
+        Entry.Text = Val;
+      }
+    }
+
+    if (!Ignore)
+      Entries.push_back(Entry);
+  }
+};
+}
+
+static bool reportDiag(const Twine &Err, DiagnosticsEngine &Diag) {
+  Diag.Report(Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
+      << Err.str();
+  return true;
+}
+
+static std::string applyEditsToTemp(const FileEntry *FE,
+                                    ArrayRef<EditEntry> Edits,
+                                    FileManager &FileMgr,
+                                    DiagnosticsEngine &Diag) {
+  using namespace llvm::sys;
+
+  SourceManager SM(Diag, FileMgr);
+  FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
+  LangOptions LangOpts;
+  edit::EditedSource Editor(SM, LangOpts);
+  for (ArrayRef<EditEntry>::iterator
+        I = Edits.begin(), E = Edits.end(); I != E; ++I) {
+    const EditEntry &Entry = *I;
+    assert(Entry.File == FE);
+    SourceLocation Loc =
+        SM.getLocForStartOfFile(FID).getLocWithOffset(Entry.Offset);
+    CharSourceRange Range;
+    if (Entry.RemoveLen != 0) {
+      Range = CharSourceRange::getCharRange(Loc,
+                                         Loc.getLocWithOffset(Entry.RemoveLen));
+    }
+
+    edit::Commit commit(Editor);
+    if (Range.isInvalid()) {
+      commit.insert(Loc, Entry.Text);
+    } else if (Entry.Text.empty()) {
+      commit.remove(Range);
+    } else {
+      commit.replace(Range, Entry.Text);
+    }
+    Editor.commit(commit);
+  }
+
+  Rewriter rewriter(SM, LangOpts);
+  RewritesReceiver Rec(rewriter);
+  Editor.applyRewrites(Rec);
+
+  const RewriteBuffer *Buf = rewriter.getRewriteBufferFor(FID);
+  SmallString<512> NewText;
+  llvm::raw_svector_ostream OS(NewText);
+  Buf->write(OS);
+  OS.flush();
+
+  SmallString<64> TempPath;
+  int FD;
+  if (fs::createTemporaryFile(path::filename(FE->getName()),
+                              path::extension(FE->getName()), FD,
+                              TempPath)) {
+    reportDiag("Could not create file: " + TempPath.str(), Diag);
+    return std::string();
+  }
+
+  llvm::raw_fd_ostream TmpOut(FD, /*shouldClose=*/true);
+  TmpOut.write(NewText.data(), NewText.size());
+  TmpOut.close();
+
+  return TempPath.str();
+}
+
+bool arcmt::getFileRemappingsFromFileList(
+                        std::vector<std::pair<std::string,std::string> > &remap,
+                        ArrayRef<StringRef> remapFiles,
+                        DiagnosticConsumer *DiagClient) {
+  bool hasErrorOccurred = false;
+
+  FileSystemOptions FSOpts;
+  FileManager FileMgr(FSOpts);
+  RemapFileParser Parser(FileMgr);
+
+  IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+  IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+      new DiagnosticsEngine(DiagID, new DiagnosticOptions,
+                            DiagClient, /*ShouldOwnClient=*/false));
+
+  typedef llvm::DenseMap<const FileEntry *, std::vector<EditEntry> >
+      FileEditEntriesTy;
+  FileEditEntriesTy FileEditEntries;
+
+  llvm::DenseSet<EditEntry> EntriesSet;
+
+  for (ArrayRef<StringRef>::iterator
+         I = remapFiles.begin(), E = remapFiles.end(); I != E; ++I) {
+    SmallVector<EditEntry, 16> Entries;
+    if (Parser.parse(*I, Entries))
+      continue;
+
+    for (SmallVectorImpl<EditEntry>::iterator
+           EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI) {
+      EditEntry &Entry = *EI;
+      if (!Entry.File)
+        continue;
+      std::pair<llvm::DenseSet<EditEntry>::iterator, bool>
+        Insert = EntriesSet.insert(Entry);
+      if (!Insert.second)
+        continue;
+
+      FileEditEntries[Entry.File].push_back(Entry);
+    }
+  }
+
+  for (FileEditEntriesTy::iterator
+         I = FileEditEntries.begin(), E = FileEditEntries.end(); I != E; ++I) {
+    std::string TempFile = applyEditsToTemp(I->first, I->second,
+                                            FileMgr, *Diags);
+    if (TempFile.empty()) {
+      hasErrorOccurred = true;
+      continue;
+    }
+
+    remap.push_back(std::make_pair(I->first->getName(), TempFile));
+  }
+
+  return hasErrorOccurred;
+}
diff --git a/lib/ARCMigrate/PlistReporter.cpp b/lib/ARCMigrate/PlistReporter.cpp
index 144ba2e..2632417 100644
--- a/lib/ARCMigrate/PlistReporter.cpp
+++ b/lib/ARCMigrate/PlistReporter.cpp
@@ -9,87 +9,12 @@
 
 #include "Internals.h"
 #include "clang/Basic/FileManager.h"
+#include "clang/Basic/PlistSupport.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Lex/Lexer.h"
 using namespace clang;
 using namespace arcmt;
-
-// FIXME: This duplicates significant functionality from PlistDiagnostics.cpp,
-// it would be jolly good if there was a reusable PlistWriter or something.
-
-typedef llvm::DenseMap<FileID, unsigned> FIDMap;
-
-static void AddFID(FIDMap &FIDs, SmallVectorImpl<FileID> &V,
-                   const SourceManager &SM, SourceLocation L) {
-
-  FileID FID = SM.getFileID(SM.getExpansionLoc(L));
-  FIDMap::iterator I = FIDs.find(FID);
-  if (I != FIDs.end()) return;
-  FIDs[FID] = V.size();
-  V.push_back(FID);
-}
-
-static unsigned GetFID(const FIDMap& FIDs, const SourceManager &SM,
-                       SourceLocation L) {
-  FileID FID = SM.getFileID(SM.getExpansionLoc(L));
-  FIDMap::const_iterator I = FIDs.find(FID);
-  assert(I != FIDs.end());
-  return I->second;
-}
-
-static raw_ostream& Indent(raw_ostream& o, const unsigned indent) {
-  for (unsigned i = 0; i < indent; ++i) o << ' ';
-  return o;
-}
-
-static void EmitLocation(raw_ostream& o, const SourceManager &SM,
-                         const LangOptions &LangOpts,
-                         SourceLocation L, const FIDMap &FM,
-                         unsigned indent, bool extend = false) {
-
-  FullSourceLoc Loc(SM.getExpansionLoc(L), const_cast<SourceManager&>(SM));
-
-  // Add in the length of the token, so that we cover multi-char tokens.
-  unsigned offset =
-    extend ? Lexer::MeasureTokenLength(Loc, SM, LangOpts) - 1 : 0;
-
-  Indent(o, indent) << "<dict>\n";
-  Indent(o, indent) << " <key>line</key><integer>"
-                    << Loc.getExpansionLineNumber() << "</integer>\n";
-  Indent(o, indent) << " <key>col</key><integer>"
-                    << Loc.getExpansionColumnNumber() + offset << "</integer>\n";
-  Indent(o, indent) << " <key>file</key><integer>"
-                    << GetFID(FM, SM, Loc) << "</integer>\n";
-  Indent(o, indent) << "</dict>\n";
-}
-
-static void EmitRange(raw_ostream& o, const SourceManager &SM,
-                      const LangOptions &LangOpts,
-                      CharSourceRange R, const FIDMap &FM,
-                      unsigned indent) {
-  Indent(o, indent) << "<array>\n";
-  EmitLocation(o, SM, LangOpts, R.getBegin(), FM, indent+1);
-  EmitLocation(o, SM, LangOpts, R.getEnd(), FM, indent+1, R.isTokenRange());
-  Indent(o, indent) << "</array>\n";
-}
-
-static raw_ostream& EmitString(raw_ostream& o,
-                                     StringRef s) {
-  o << "<string>";
-  for (StringRef::const_iterator I=s.begin(), E=s.end(); I!=E; ++I) {
-    char c = *I;
-    switch (c) {
-    default:   o << c; break;
-    case '&':  o << "&amp;"; break;
-    case '<':  o << "&lt;"; break;
-    case '>':  o << "&gt;"; break;
-    case '\'': o << "&apos;"; break;
-    case '\"': o << "&quot;"; break;
-    }
-  }
-  o << "</string>";
-  return o;
-}
+using namespace markup;
 
 void arcmt::writeARCDiagsToPlist(const std::string &outPath,
                                  ArrayRef<StoredDiagnostic> diags,
@@ -116,17 +41,14 @@
   }
 
   std::string errMsg;
-  llvm::raw_fd_ostream o(outPath.c_str(), errMsg);
+  llvm::raw_fd_ostream o(outPath.c_str(), errMsg, llvm::sys::fs::F_Text);
   if (!errMsg.empty()) {
     llvm::errs() << "error: could not create file: " << outPath << '\n';
     return;
   }
 
   // Write the plist header.
-  o << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
-  "<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" "
-  "\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
-  "<plist version=\"1.0\">\n";
+  o << PlistHeader;
 
   // Write the root object: a <dict> containing...
   //  - "files", an <array> mapping from FIDs to file names
diff --git a/lib/ARCMigrate/TransAPIUses.cpp b/lib/ARCMigrate/TransAPIUses.cpp
index a0994a6..544cb0a 100644
--- a/lib/ARCMigrate/TransAPIUses.cpp
+++ b/lib/ARCMigrate/TransAPIUses.cpp
@@ -66,8 +66,7 @@
         selName = "getArgument";
       else if (E->getSelector() == setArgumentSel)
         selName = "setArgument";
-
-      if (selName.empty())
+      else
         return true;
 
       Expr *parm = E->getArg(0)->IgnoreParenCasts();
@@ -75,13 +74,12 @@
       if (pointee.isNull())
         return true;
 
-      if (pointee.getObjCLifetime() > Qualifiers::OCL_ExplicitNone) {
-        std::string err = "NSInvocation's ";
-        err += selName;
-        err += " is not safe to be used with an object with ownership other "
-            "than __unsafe_unretained";
-        Pass.TA.reportError(err, parm->getLocStart(), parm->getSourceRange());
-      }
+      if (pointee.getObjCLifetime() > Qualifiers::OCL_ExplicitNone)
+        Pass.TA.report(parm->getLocStart(),
+                       diag::err_arcmt_nsinvocation_ownership,
+                       parm->getSourceRange())
+            << selName;
+
       return true;
     }
 
diff --git a/lib/ARCMigrate/TransBlockObjCVariable.cpp b/lib/ARCMigrate/TransBlockObjCVariable.cpp
index 97c4e34..fac6a84 100644
--- a/lib/ARCMigrate/TransBlockObjCVariable.cpp
+++ b/lib/ARCMigrate/TransBlockObjCVariable.cpp
@@ -78,10 +78,9 @@
   bool VisitBlockDecl(BlockDecl *block) {
     SmallVector<VarDecl *, 4> BlockVars;
     
-    for (BlockDecl::capture_iterator
-           I = block->capture_begin(), E = block->capture_end(); I != E; ++I) {
-      VarDecl *var = I->getVariable();
-      if (I->isByRef() &&
+    for (const auto &I : block->captures()) {
+      VarDecl *var = I.getVariable();
+      if (I.isByRef() &&
           var->getType()->isObjCObjectPointerType() &&
           isImplicitStrong(var->getType())) {
         BlockVars.push_back(var);
diff --git a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
index ffb638f..31f19ce 100644
--- a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
+++ b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -89,9 +89,8 @@
   bool VisitCompoundStmt(CompoundStmt *S) {
     if (S->body_empty())
       return false; // was already empty, not because of transformations.
-    for (CompoundStmt::body_iterator
-           I = S->body_begin(), E = S->body_end(); I != E; ++I)
-      if (!Visit(*I))
+    for (auto *I : S->body())
+      if (!Visit(I))
         return false;
     return true;
   }
@@ -167,9 +166,8 @@
   }
 
   bool VisitCompoundStmt(CompoundStmt *S) {
-    for (CompoundStmt::body_iterator
-           I = S->body_begin(), E = S->body_end(); I != E; ++I)
-      check(*I);
+    for (auto *I : S->body())
+      check(I);
     return true;
   }
 
@@ -189,9 +187,8 @@
 
 static bool isBodyEmpty(CompoundStmt *body, ASTContext &Ctx,
                         std::vector<SourceLocation> &MacroLocs) {
-  for (CompoundStmt::body_iterator
-         I = body->body_begin(), E = body->body_end(); I != E; ++I)
-    if (!EmptyChecker(Ctx, MacroLocs).Visit(*I))
+  for (auto *I : body->body())
+    if (!EmptyChecker(Ctx, MacroLocs).Visit(I))
       return false;
 
   return true;
@@ -210,10 +207,7 @@
                      E = impl_iterator(DC->decls_end()); I != E; ++I) {
     ObjCMethodDecl *DeallocM = 0;
     ObjCMethodDecl *FinalizeM = 0;
-    for (ObjCImplementationDecl::instmeth_iterator
-           MI = I->instmeth_begin(),
-           ME = I->instmeth_end(); MI != ME; ++MI) {
-      ObjCMethodDecl *MD = *MI;
+    for (auto *MD : I->instance_methods()) {
       if (!MD->hasBody())
         continue;
   
diff --git a/lib/ARCMigrate/TransGCAttrs.cpp b/lib/ARCMigrate/TransGCAttrs.cpp
index d8be1ae..cbb3d12 100644
--- a/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/lib/ARCMigrate/TransGCAttrs.cpp
@@ -134,8 +134,7 @@
       return hasObjCImpl(ContD);
 
     if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
-      for (CXXRecordDecl::method_iterator
-             MI = RD->method_begin(), ME = RD->method_end(); MI != ME; ++MI) {
+      for (const auto *MI : RD->methods()) {
         if (MI->isOutOfLine())
           return true;
       }
@@ -164,8 +163,7 @@
     if (!D)
       return false;
 
-    for (Decl::redecl_iterator
-           I = D->redecls_begin(), E = D->redecls_end(); I != E; ++I)
+    for (auto I : D->redecls())
       if (!isInMainFile(I->getLocation()))
         return false;
     
diff --git a/lib/ARCMigrate/TransGCCalls.cpp b/lib/ARCMigrate/TransGCCalls.cpp
index 249f20f..401e788 100644
--- a/lib/ARCMigrate/TransGCCalls.cpp
+++ b/lib/ARCMigrate/TransGCCalls.cpp
@@ -38,14 +38,8 @@
     TransformActions &TA = MigrateCtx.Pass.TA;
 
     if (MigrateCtx.isGCOwnedNonObjC(E->getType())) {
-      if (MigrateCtx.Pass.noNSAllocReallocError())
-        TA.reportWarning("call returns pointer to GC managed memory; "
-                       "it will become unmanaged in ARC",
-                       E->getLocStart(), E->getSourceRange());
-      else 
-        TA.reportError("call returns pointer to GC managed memory; "
-                       "it will become unmanaged in ARC",
-                       E->getLocStart(), E->getSourceRange());
+      TA.report(E->getLocStart(), diag::err_arcmt_nsalloc_realloc,
+                E->getSourceRange());
       return true;
     }
 
diff --git a/lib/ARCMigrate/TransProperties.cpp b/lib/ARCMigrate/TransProperties.cpp
index b6ddc43..e18da97 100644
--- a/lib/ARCMigrate/TransProperties.cpp
+++ b/lib/ARCMigrate/TransProperties.cpp
@@ -75,17 +75,15 @@
 
   static void collectProperties(ObjCContainerDecl *D, AtPropDeclsTy &AtProps,
                                 AtPropDeclsTy *PrevAtProps = 0) {
-    for (ObjCInterfaceDecl::prop_iterator
-           propI = D->prop_begin(),
-           propE = D->prop_end(); propI != propE; ++propI) {
-      if (propI->getAtLoc().isInvalid())
+    for (auto *Prop : D->properties()) {
+      if (Prop->getAtLoc().isInvalid())
         continue;
-      unsigned RawLoc = propI->getAtLoc().getRawEncoding();
+      unsigned RawLoc = Prop->getAtLoc().getRawEncoding();
       if (PrevAtProps)
         if (PrevAtProps->find(RawLoc) != PrevAtProps->end())
           continue;
       PropsTy &props = AtProps[RawLoc];
-      props.push_back(*propI);
+      props.push_back(Prop);
     }
   }
 
@@ -141,12 +139,8 @@
 
     AtPropDeclsTy AtExtProps;
     // Look through extensions.
-    for (ObjCInterfaceDecl::visible_extensions_iterator
-           ext = iface->visible_extensions_begin(),
-           extEnd = iface->visible_extensions_end();
-         ext != extEnd; ++ext) {
-      collectProperties(*ext, AtExtProps, &AtProps);
-    }
+    for (auto *Ext : iface->visible_extensions())
+      collectProperties(Ext, AtExtProps, &AtProps);
 
     for (AtPropDeclsTy::iterator
            I = AtExtProps.begin(), E = AtExtProps.end(); I != E; ++I) {
@@ -353,14 +347,6 @@
     return false;    
   }
 
-  bool hasAllIvarsBacked(PropsTy &props) const {
-    for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
-      if (!isUserDeclared(I->IvarD))
-        return false;
-
-    return true;
-  }
-
   // \brief Returns true if all declarations in the @property have GC __weak.
   bool hasGCWeak(PropsTy &props, SourceLocation atLoc) const {
     if (!Pass.isGCMigration())
diff --git a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index 446a284..5db5fa0 100644
--- a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -38,7 +38,7 @@
   MigrationPass &Pass;
 
   ExprSet Removables;
-  OwningPtr<ParentMap> StmtMap;
+  std::unique_ptr<ParentMap> StmtMap;
 
   Selector DelegateSel, FinalizeSel;
 
@@ -212,7 +212,7 @@
       return false;
 
     Stmt *prevStmt, *nextStmt;
-    llvm::tie(prevStmt, nextStmt) = getPreviousAndNextStmt(E);
+    std::tie(prevStmt, nextStmt) = getPreviousAndNextStmt(E);
 
     return isPlusOneAssignToVar(prevStmt, RefD) ||
            isPlusOneAssignToVar(nextStmt, RefD);
diff --git a/lib/ARCMigrate/TransUnbridgedCasts.cpp b/lib/ARCMigrate/TransUnbridgedCasts.cpp
index 7b360c6..0aa0c89 100644
--- a/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -60,10 +60,10 @@
 class UnbridgedCastRewriter : public RecursiveASTVisitor<UnbridgedCastRewriter>{
   MigrationPass &Pass;
   IdentifierInfo *SelfII;
-  OwningPtr<ParentMap> StmtMap;
+  std::unique_ptr<ParentMap> StmtMap;
   Decl *ParentD;
   Stmt *Body;
-  mutable OwningPtr<ExprSet> Removables;
+  mutable std::unique_ptr<ExprSet> Removables;
 
 public:
   UnbridgedCastRewriter(MigrationPass &pass) : Pass(pass), ParentD(0), Body(0) {
@@ -133,11 +133,11 @@
     Expr *inner = E->IgnoreParenCasts();
     if (CallExpr *callE = dyn_cast<CallExpr>(inner)) {
       if (FunctionDecl *FD = callE->getDirectCallee()) {
-        if (FD->getAttr<CFReturnsRetainedAttr>()) {
+        if (FD->hasAttr<CFReturnsRetainedAttr>()) {
           castToObjCObject(E, /*retained=*/true);
           return;
         }
-        if (FD->getAttr<CFReturnsNotRetainedAttr>()) {
+        if (FD->hasAttr<CFReturnsNotRetainedAttr>()) {
           castToObjCObject(E, /*retained=*/false);
           return;
         }
@@ -283,7 +283,7 @@
     SourceLocation Loc = E->getExprLoc();
     assert(Loc.isMacroID());
     SourceLocation MacroBegin, MacroEnd;
-    llvm::tie(MacroBegin, MacroEnd) = SM.getImmediateExpansionRange(Loc);
+    std::tie(MacroBegin, MacroEnd) = SM.getImmediateExpansionRange(Loc);
     SourceRange SubRange = E->getSubExpr()->IgnoreParenImpCasts()->getSourceRange();
     SourceLocation InnerBegin = SM.getImmediateMacroCallerLoc(SubRange.getBegin());
     SourceLocation InnerEnd = SM.getImmediateMacroCallerLoc(SubRange.getEnd());
@@ -439,7 +439,7 @@
         }
         if (i < callE->getNumArgs() && i < FD->getNumParams()) {
           ParmVarDecl *PD = FD->getParamDecl(i);
-          if (PD->getAttr<CFConsumedAttr>()) {
+          if (PD->hasAttr<CFConsumedAttr>()) {
             isConsumed = true;
             return true;
           }
diff --git a/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
index 4d088e0..3be9fb2 100644
--- a/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
+++ b/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -113,23 +113,21 @@
 
     // For a 'dealloc' method use, find all property implementations in
     // this class implementation.
-    for (ObjCImplDecl::propimpl_iterator
-           I = IMD->propimpl_begin(), EI = IMD->propimpl_end(); I != EI; ++I) {
-        ObjCPropertyImplDecl *PID = *I;
-        if (PID->getPropertyImplementation() ==
-            ObjCPropertyImplDecl::Synthesize) {
-          ObjCPropertyDecl *PD = PID->getPropertyDecl();
-          ObjCMethodDecl *setterM = PD->getSetterMethodDecl();
-          if (!(setterM && setterM->isDefined())) {
-            ObjCPropertyDecl::PropertyAttributeKind AttrKind = 
-              PD->getPropertyAttributes();
-              if (AttrKind & 
-                  (ObjCPropertyDecl::OBJC_PR_retain | 
-                   ObjCPropertyDecl::OBJC_PR_copy   |
-                   ObjCPropertyDecl::OBJC_PR_strong))
-                SynthesizedProperties[PD] = PID;
-          }
+    for (auto *PID : IMD->property_impls()) {
+      if (PID->getPropertyImplementation() ==
+          ObjCPropertyImplDecl::Synthesize) {
+        ObjCPropertyDecl *PD = PID->getPropertyDecl();
+        ObjCMethodDecl *setterM = PD->getSetterMethodDecl();
+        if (!(setterM && setterM->isDefined())) {
+          ObjCPropertyDecl::PropertyAttributeKind AttrKind = 
+            PD->getPropertyAttributes();
+            if (AttrKind & 
+                (ObjCPropertyDecl::OBJC_PR_retain | 
+                  ObjCPropertyDecl::OBJC_PR_copy   |
+                  ObjCPropertyDecl::OBJC_PR_strong))
+              SynthesizedProperties[PD] = PID;
         }
+      }
     }
 
     // Now, remove all zeroing of ivars etc.
diff --git a/lib/ARCMigrate/TransformActions.cpp b/lib/ARCMigrate/TransformActions.cpp
index 2fd0619..e6268a1 100644
--- a/lib/ARCMigrate/TransformActions.cpp
+++ b/lib/ARCMigrate/TransformActions.cpp
@@ -673,60 +673,35 @@
   static_cast<TransformActionsImpl*>(Impl)->applyRewrites(receiver);
 }
 
-void TransformActions::reportError(StringRef error, SourceLocation loc,
-                                   SourceRange range) {
-  assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
+DiagnosticBuilder TransformActions::report(SourceLocation loc, unsigned diagId,
+                                           SourceRange range) {
+  assert(!static_cast<TransformActionsImpl *>(Impl)->isInTransaction() &&
          "Errors should be emitted out of a transaction");
 
-  SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
-                                             getASTContext().getSourceManager();
-  if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
-    return;
-
-  // FIXME: Use a custom category name to distinguish rewriter errors.
-  std::string rewriteErr = "[rewriter] ";
-  rewriteErr += error;
-  unsigned diagID
-     = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Error,
-                                                 rewriteErr);
-  Diags.Report(loc, diagID) << range;
-  ReportedErrors = true;
+  SourceManager &SM = static_cast<TransformActionsImpl *>(Impl)
+                          ->getASTContext()
+                          .getSourceManager();
+  DiagnosticsEngine::Level L = Diags.getDiagnosticLevel(diagId, loc);
+  // TODO: Move this check to the caller to ensure consistent note attachments.
+  if (L == DiagnosticsEngine::Ignored ||
+      SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+    return DiagnosticBuilder::getEmpty();
+  if (L >= DiagnosticsEngine::Error)
+    ReportedErrors = true;
+  return Diags.Report(loc, diagId) << range;
 }
 
-void TransformActions::reportWarning(StringRef warning, SourceLocation loc,
+void TransformActions::reportError(StringRef message, SourceLocation loc,
                                    SourceRange range) {
-  assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
-         "Warning should be emitted out of a transaction");
-  
-  SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
-    getASTContext().getSourceManager();
-  if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
-    return;
-  
-  // FIXME: Use a custom category name to distinguish rewriter errors.
-  std::string rewriterWarn = "[rewriter] ";
-  rewriterWarn += warning;
-  unsigned diagID
-  = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Warning,
-                                              rewriterWarn);
-  Diags.Report(loc, diagID) << range;
+  report(loc, diag::err_mt_message, range) << message;
 }
 
-void TransformActions::reportNote(StringRef note, SourceLocation loc,
+void TransformActions::reportWarning(StringRef message, SourceLocation loc,
+                                     SourceRange range) {
+  report(loc, diag::warn_mt_message, range) << message;
+}
+
+void TransformActions::reportNote(StringRef message, SourceLocation loc,
                                   SourceRange range) {
-  assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
-         "Errors should be emitted out of a transaction");
-
-  SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
-                                             getASTContext().getSourceManager();
-  if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
-    return;
-
-  // FIXME: Use a custom category name to distinguish rewriter errors.
-  std::string rewriteNote = "[rewriter] ";
-  rewriteNote += note;
-  unsigned diagID
-     = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Note,
-                                                 rewriteNote);
-  Diags.Report(loc, diagID) << range;
+  report(loc, diag::note_mt_message, range) << message;
 }
diff --git a/lib/ARCMigrate/Transforms.cpp b/lib/ARCMigrate/Transforms.cpp
index 679b924..c349cb5 100644
--- a/lib/ARCMigrate/Transforms.cpp
+++ b/lib/ARCMigrate/Transforms.cpp
@@ -88,7 +88,7 @@
   if (const CallExpr *
         callE = dyn_cast<CallExpr>(E->IgnoreParenCasts())) {
     if (const FunctionDecl *FD = callE->getDirectCallee()) {
-      if (FD->getAttr<CFReturnsRetainedAttr>())
+      if (FD->hasAttr<CFReturnsRetainedAttr>())
         return true;
 
       if (FD->isGlobal() &&
@@ -264,9 +264,8 @@
   }
   
   bool VisitCompoundStmt(CompoundStmt *S) {
-    for (CompoundStmt::body_iterator
-        I = S->body_begin(), E = S->body_end(); I != E; ++I)
-      mark(*I);
+    for (auto *I : S->body())
+      mark(I);
     return true;
   }
   
@@ -538,15 +537,12 @@
   impl_iterator;
   for (impl_iterator I = impl_iterator(DC->decls_begin()),
        E = impl_iterator(DC->decls_end()); I != E; ++I) {
-    for (ObjCImplementationDecl::instmeth_iterator
-         MI = I->instmeth_begin(),
-         ME = I->instmeth_end(); MI != ME; ++MI) {
-      ObjCMethodDecl *MD = *MI;
+    for (const auto *MD : I->instance_methods()) {
       if (!MD->hasBody())
         continue;
       
       if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) {
-        ObjCMethodDecl *FinalizeM = MD;
+        const ObjCMethodDecl *FinalizeM = MD;
         Transaction Trans(TA);
         TA.insert(FinalizeM->getSourceRange().getBegin(), 
                   "#if !__has_feature(objc_arc)\n");
diff --git a/lib/ARCMigrate/Transforms.h b/lib/ARCMigrate/Transforms.h
index eab5e85..54be5b5 100644
--- a/lib/ARCMigrate/Transforms.h
+++ b/lib/ARCMigrate/Transforms.h
@@ -127,29 +127,29 @@
 
 class PropertyRewriteTraverser : public ASTTraverser {
 public:
-  virtual void traverseObjCImplementation(ObjCImplementationContext &ImplCtx);
+  void traverseObjCImplementation(ObjCImplementationContext &ImplCtx) override;
 };
 
 class BlockObjCVariableTraverser : public ASTTraverser {
 public:
-  virtual void traverseBody(BodyContext &BodyCtx);
+  void traverseBody(BodyContext &BodyCtx) override;
 };
 
 class ProtectedScopeTraverser : public ASTTraverser {
 public:
-  virtual void traverseBody(BodyContext &BodyCtx);
+  void traverseBody(BodyContext &BodyCtx) override;
 };
 
 // GC transformations
 
 class GCAttrsTraverser : public ASTTraverser {
 public:
-  virtual void traverseTU(MigrationContext &MigrateCtx);
+  void traverseTU(MigrationContext &MigrateCtx) override;
 };
 
 class GCCollectableCallsTraverser : public ASTTraverser {
 public:
-  virtual void traverseBody(BodyContext &BodyCtx);
+  void traverseBody(BodyContext &BodyCtx) override;
 };
 
 //===----------------------------------------------------------------------===//
diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp
index 541836b..049518e 100644
--- a/lib/AST/APValue.cpp
+++ b/lib/AST/APValue.cpp
@@ -34,7 +34,7 @@
 
 struct APValue::LV : LVBase {
   static const unsigned InlinePathSpace =
-      (MaxSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
+      (DataSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
 
   /// Path - The sequence of base classes, fields and array indices to follow to
   /// walk from Base to the subobject. When performing GCC-style folding, there
@@ -75,7 +75,7 @@
 
 struct APValue::MemberPointerData : MemberPointerBase {
   static const unsigned InlinePathSpace =
-      (MaxSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
+      (DataSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
   typedef const CXXRecordDecl *PathElem;
   union {
     PathElem Path[InlinePathSpace];
@@ -136,7 +136,7 @@
     break;
   case Vector:
     MakeVector();
-    setVector(((const Vec *)(const char *)RHS.Data)->Elts,
+    setVector(((const Vec *)(const char *)RHS.Data.buffer)->Elts,
               RHS.getVectorLength());
     break;
   case ComplexInt:
@@ -188,27 +188,27 @@
 
 void APValue::DestroyDataAndMakeUninit() {
   if (Kind == Int)
-    ((APSInt*)(char*)Data)->~APSInt();
+    ((APSInt*)(char*)Data.buffer)->~APSInt();
   else if (Kind == Float)
-    ((APFloat*)(char*)Data)->~APFloat();
+    ((APFloat*)(char*)Data.buffer)->~APFloat();
   else if (Kind == Vector)
-    ((Vec*)(char*)Data)->~Vec();
+    ((Vec*)(char*)Data.buffer)->~Vec();
   else if (Kind == ComplexInt)
-    ((ComplexAPSInt*)(char*)Data)->~ComplexAPSInt();
+    ((ComplexAPSInt*)(char*)Data.buffer)->~ComplexAPSInt();
   else if (Kind == ComplexFloat)
-    ((ComplexAPFloat*)(char*)Data)->~ComplexAPFloat();
+    ((ComplexAPFloat*)(char*)Data.buffer)->~ComplexAPFloat();
   else if (Kind == LValue)
-    ((LV*)(char*)Data)->~LV();
+    ((LV*)(char*)Data.buffer)->~LV();
   else if (Kind == Array)
-    ((Arr*)(char*)Data)->~Arr();
+    ((Arr*)(char*)Data.buffer)->~Arr();
   else if (Kind == Struct)
-    ((StructData*)(char*)Data)->~StructData();
+    ((StructData*)(char*)Data.buffer)->~StructData();
   else if (Kind == Union)
-    ((UnionData*)(char*)Data)->~UnionData();
+    ((UnionData*)(char*)Data.buffer)->~UnionData();
   else if (Kind == MemberPointer)
-    ((MemberPointerData*)(char*)Data)->~MemberPointerData();
+    ((MemberPointerData*)(char*)Data.buffer)->~MemberPointerData();
   else if (Kind == AddrLabelDiff)
-    ((AddrLabelDiffData*)(char*)Data)->~AddrLabelDiffData();
+    ((AddrLabelDiffData*)(char*)Data.buffer)->~AddrLabelDiffData();
   Kind = Uninitialized;
 }
 
@@ -239,19 +239,20 @@
            "same size.");
     return getComplexIntReal().needsCleanup();
   case LValue:
-    return reinterpret_cast<const LV *>(Data)->hasPathPtr();
+    return reinterpret_cast<const LV *>(Data.buffer)->hasPathPtr();
   case MemberPointer:
-    return reinterpret_cast<const MemberPointerData *>(Data)->hasPathPtr();
+    return reinterpret_cast<const MemberPointerData *>(Data.buffer)
+        ->hasPathPtr();
   }
   llvm_unreachable("Unknown APValue kind!");
 }
 
 void APValue::swap(APValue &RHS) {
   std::swap(Kind, RHS.Kind);
-  char TmpData[MaxSize];
-  memcpy(TmpData, Data, MaxSize);
-  memcpy(Data, RHS.Data, MaxSize);
-  memcpy(RHS.Data, TmpData, MaxSize);
+  char TmpData[DataSize];
+  memcpy(TmpData, Data.buffer, DataSize);
+  memcpy(Data.buffer, RHS.Data.buffer, DataSize);
+  memcpy(RHS.Data.buffer, TmpData, DataSize);
 }
 
 void APValue::dump() const {
@@ -498,8 +499,7 @@
         First = false;
       }
     }
-    for (RecordDecl::field_iterator FI = RD->field_begin();
-         FI != RD->field_end(); ++FI) {
+    for (const auto *FI : RD->fields()) {
       if (!First)
         Out << ", ";
       if (FI->isUnnamedBitfield()) continue;
@@ -546,39 +546,39 @@
 
 const APValue::LValueBase APValue::getLValueBase() const {
   assert(isLValue() && "Invalid accessor");
-  return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getPointer();
+  return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getPointer();
 }
 
 bool APValue::isLValueOnePastTheEnd() const {
   assert(isLValue() && "Invalid accessor");
-  return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getInt();
+  return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getInt();
 }
 
 CharUnits &APValue::getLValueOffset() {
   assert(isLValue() && "Invalid accessor");
-  return ((LV*)(void*)Data)->Offset;
+  return ((LV*)(void*)Data.buffer)->Offset;
 }
 
 bool APValue::hasLValuePath() const {
   assert(isLValue() && "Invalid accessor");
-  return ((const LV*)(const char*)Data)->hasPath();
+  return ((const LV*)(const char*)Data.buffer)->hasPath();
 }
 
 ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
   assert(isLValue() && hasLValuePath() && "Invalid accessor");
-  const LV &LVal = *((const LV*)(const char*)Data);
+  const LV &LVal = *((const LV*)(const char*)Data.buffer);
   return ArrayRef<LValuePathEntry>(LVal.getPath(), LVal.PathLength);
 }
 
 unsigned APValue::getLValueCallIndex() const {
   assert(isLValue() && "Invalid accessor");
-  return ((const LV*)(const char*)Data)->CallIndex;
+  return ((const LV*)(const char*)Data.buffer)->CallIndex;
 }
 
 void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
                         unsigned CallIndex) {
   assert(isLValue() && "Invalid accessor");
-  LV &LVal = *((LV*)(char*)Data);
+  LV &LVal = *((LV*)(char*)Data.buffer);
   LVal.BaseAndIsOnePastTheEnd.setPointer(B);
   LVal.BaseAndIsOnePastTheEnd.setInt(false);
   LVal.Offset = O;
@@ -590,7 +590,7 @@
                         ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
                         unsigned CallIndex) {
   assert(isLValue() && "Invalid accessor");
-  LV &LVal = *((LV*)(char*)Data);
+  LV &LVal = *((LV*)(char*)Data.buffer);
   LVal.BaseAndIsOnePastTheEnd.setPointer(B);
   LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd);
   LVal.Offset = O;
@@ -601,39 +601,42 @@
 
 const ValueDecl *APValue::getMemberPointerDecl() const {
   assert(isMemberPointer() && "Invalid accessor");
-  const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+  const MemberPointerData &MPD =
+      *((const MemberPointerData *)(const char *)Data.buffer);
   return MPD.MemberAndIsDerivedMember.getPointer();
 }
 
 bool APValue::isMemberPointerToDerivedMember() const {
   assert(isMemberPointer() && "Invalid accessor");
-  const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+  const MemberPointerData &MPD =
+      *((const MemberPointerData *)(const char *)Data.buffer);
   return MPD.MemberAndIsDerivedMember.getInt();
 }
 
 ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
   assert(isMemberPointer() && "Invalid accessor");
-  const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+  const MemberPointerData &MPD =
+      *((const MemberPointerData *)(const char *)Data.buffer);
   return ArrayRef<const CXXRecordDecl*>(MPD.getPath(), MPD.PathLength);
 }
 
 void APValue::MakeLValue() {
   assert(isUninit() && "Bad state change");
-  assert(sizeof(LV) <= MaxSize && "LV too big");
-  new ((void*)(char*)Data) LV();
+  static_assert(sizeof(LV) <= DataSize, "LV too big");
+  new ((void*)(char*)Data.buffer) LV();
   Kind = LValue;
 }
 
 void APValue::MakeArray(unsigned InitElts, unsigned Size) {
   assert(isUninit() && "Bad state change");
-  new ((void*)(char*)Data) Arr(InitElts, Size);
+  new ((void*)(char*)Data.buffer) Arr(InitElts, Size);
   Kind = Array;
 }
 
 void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
                                 ArrayRef<const CXXRecordDecl*> Path) {
   assert(isUninit() && "Bad state change");
-  MemberPointerData *MPD = new ((void*)(char*)Data) MemberPointerData;
+  MemberPointerData *MPD = new ((void*)(char*)Data.buffer) MemberPointerData;
   Kind = MemberPointer;
   MPD->MemberAndIsDerivedMember.setPointer(Member);
   MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember);
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index a03cf9e..3ed2a9f 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -29,6 +29,7 @@
 #include "clang/AST/RecordLayout.h"
 #include "clang/AST/RecursiveASTVisitor.h"
 #include "clang/AST/TypeLoc.h"
+#include "clang/AST/VTableBuilder.h"
 #include "clang/Basic/Builtins.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Basic/TargetInfo.h"
@@ -62,6 +63,13 @@
 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
   if (!CommentsLoaded && ExternalSource) {
     ExternalSource->ReadComments();
+
+#ifndef NDEBUG
+    ArrayRef<RawComment *> RawComments = Comments.getComments();
+    assert(std::is_sorted(RawComments.begin(), RawComments.end(),
+                          BeforeThanCompare<RawComment>(SourceMgr)));
+#endif
+
     CommentsLoaded = true;
   }
 
@@ -137,11 +145,23 @@
     DeclLoc = D->getLocStart();
   else {
     DeclLoc = D->getLocation();
-    // If location of the typedef name is in a macro, it is because being
-    // declared via a macro. Try using declaration's starting location
-    // as the "declaration location".
-    if (DeclLoc.isMacroID() && isa<TypedefDecl>(D))
-      DeclLoc = D->getLocStart();
+    if (DeclLoc.isMacroID()) {
+      if (isa<TypedefDecl>(D)) {
+        // If location of the typedef name is in a macro, it is because being
+        // declared via a macro. Try using declaration's starting location as
+        // the "declaration location".
+        DeclLoc = D->getLocStart();
+      } else if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+        // If location of the tag decl is inside a macro, but the spelling of
+        // the tag name comes from a macro argument, it looks like a special
+        // macro like NS_ENUM is being used to define the tag decl.  In that
+        // case, adjust the source location to the expansion loc so that we can
+        // attach the comment to the tag decl.
+        if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
+            TD->isCompleteDefinition())
+          DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
+      }
+    }
   }
 
   // If the declaration doesn't map directly to a location in a file, we
@@ -331,11 +351,9 @@
   // Search for comments attached to declarations in the redeclaration chain.
   const RawComment *RC = NULL;
   const Decl *OriginalDeclForRC = NULL;
-  for (Decl::redecl_iterator I = D->redecls_begin(),
-                             E = D->redecls_end();
-       I != E; ++I) {
+  for (auto I : D->redecls()) {
     llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
-        RedeclComments.find(*I);
+        RedeclComments.find(I);
     if (Pos != RedeclComments.end()) {
       const RawCommentAndCacheFlags &Raw = Pos->second;
       if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) {
@@ -344,16 +362,16 @@
         break;
       }
     } else {
-      RC = getRawCommentForDeclNoCache(*I);
-      OriginalDeclForRC = *I;
+      RC = getRawCommentForDeclNoCache(I);
+      OriginalDeclForRC = I;
       RawCommentAndCacheFlags Raw;
       if (RC) {
         Raw.setRaw(RC);
         Raw.setKind(RawCommentAndCacheFlags::FromDecl);
       } else
         Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl);
-      Raw.setOriginalDecl(*I);
-      RedeclComments[*I] = Raw;
+      Raw.setOriginalDecl(I);
+      RedeclComments[I] = Raw;
       if (RC)
         break;
     }
@@ -371,10 +389,8 @@
   Raw.setKind(RawCommentAndCacheFlags::FromRedecl);
   Raw.setOriginalDecl(OriginalDeclForRC);
 
-  for (Decl::redecl_iterator I = D->redecls_begin(),
-                             E = D->redecls_end();
-       I != E; ++I) {
-    RawCommentAndCacheFlags &R = RedeclComments[*I];
+  for (auto I : D->redecls()) {
+    RawCommentAndCacheFlags &R = RedeclComments[I];
     if (R.getKind() == RawCommentAndCacheFlags::NoCommentInDecl)
       R = Raw;
   }
@@ -390,10 +406,7 @@
     if (!ID)
       return;
     // Add redeclared method here.
-    for (ObjCInterfaceDecl::known_extensions_iterator
-           Ext = ID->known_extensions_begin(),
-           ExtEnd = ID->known_extensions_end();
-         Ext != ExtEnd; ++Ext) {
+    for (const auto *Ext : ID->known_extensions()) {
       if (ObjCMethodDecl *RedeclaredMethod =
             Ext->getMethod(ObjCMethod->getSelector(),
                                   ObjCMethod->isInstanceMethod()))
@@ -484,11 +497,10 @@
       if (!(RD = RD->getDefinition()))
         return NULL;
       // Check non-virtual bases.
-      for (CXXRecordDecl::base_class_const_iterator I =
-           RD->bases_begin(), E = RD->bases_end(); I != E; ++I) {
-        if (I->isVirtual() || (I->getAccessSpecifier() != AS_public))
+      for (const auto &I : RD->bases()) {
+        if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
           continue;
-        QualType Ty = I->getType();
+        QualType Ty = I.getType();
         if (Ty.isNull())
           continue;
         if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
@@ -500,11 +512,10 @@
         }
       }
       // Check virtual bases.
-      for (CXXRecordDecl::base_class_const_iterator I =
-           RD->vbases_begin(), E = RD->vbases_end(); I != E; ++I) {
-        if (I->getAccessSpecifier() != AS_public)
+      for (const auto &I : RD->vbases()) {
+        if (I.getAccessSpecifier() != AS_public)
           continue;
-        QualType Ty = I->getType();
+        QualType Ty = I.getType();
         if (Ty.isNull())
           continue;
         if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
@@ -667,6 +678,7 @@
   switch (T.getCXXABI().getKind()) {
   case TargetCXXABI::GenericARM:
   case TargetCXXABI::iOS:
+  case TargetCXXABI::iOS64:
     return CreateARMCXXABI(*this);
   case TargetCXXABI::GenericAArch64: // Same as Itanium at this level
   case TargetCXXABI::GenericItanium:
@@ -782,11 +794,7 @@
        A != AEnd; ++A)
     A->second->~AttrVec();
 
-  for (llvm::DenseMap<const DeclContext *, MangleNumberingContext *>::iterator
-           I = MangleNumberingContexts.begin(),
-           E = MangleNumberingContexts.end();
-       I != E; ++I)
-    delete I->second;
+  llvm::DeleteContainerSeconds(MangleNumberingContexts);
 }
 
 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
@@ -794,8 +802,8 @@
 }
 
 void
-ASTContext::setExternalSource(OwningPtr<ExternalASTSource> &Source) {
-  ExternalSource.reset(Source.take());
+ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
+  ExternalSource = Source;
 }
 
 void ASTContext::PrintStats() const {
@@ -849,7 +857,7 @@
                << NumImplicitDestructors
                << " implicit destructors created\n";
 
-  if (ExternalSource.get()) {
+  if (ExternalSource) {
     llvm::errs() << "\n";
     ExternalSource->PrintStats();
   }
@@ -857,45 +865,47 @@
   BumpAlloc.PrintStats();
 }
 
+RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
+                                            RecordDecl::TagKind TK) const {
+  SourceLocation Loc;
+  RecordDecl *NewDecl;
+  if (getLangOpts().CPlusPlus)
+    NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
+                                    Loc, &Idents.get(Name));
+  else
+    NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
+                                 &Idents.get(Name));
+  NewDecl->setImplicit();
+  return NewDecl;
+}
+
+TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
+                                              StringRef Name) const {
+  TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
+  TypedefDecl *NewDecl = TypedefDecl::Create(
+      const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
+      SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
+  NewDecl->setImplicit();
+  return NewDecl;
+}
+
 TypedefDecl *ASTContext::getInt128Decl() const {
-  if (!Int128Decl) {
-    TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(Int128Ty);
-    Int128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 
-                                     getTranslationUnitDecl(),
-                                     SourceLocation(),
-                                     SourceLocation(),
-                                     &Idents.get("__int128_t"),
-                                     TInfo);
-  }
-  
+  if (!Int128Decl)
+    Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
   return Int128Decl;
 }
 
 TypedefDecl *ASTContext::getUInt128Decl() const {
-  if (!UInt128Decl) {
-    TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(UnsignedInt128Ty);
-    UInt128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 
-                                     getTranslationUnitDecl(),
-                                     SourceLocation(),
-                                     SourceLocation(),
-                                     &Idents.get("__uint128_t"),
-                                     TInfo);
-  }
-  
+  if (!UInt128Decl)
+    UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
   return UInt128Decl;
 }
 
 TypeDecl *ASTContext::getFloat128StubType() const {
   assert(LangOpts.CPlusPlus && "should only be called for c++");
-  if (!Float128StubDecl) {
-    Float128StubDecl = CXXRecordDecl::Create(const_cast<ASTContext &>(*this), 
-                                             TTK_Struct,
-                                             getTranslationUnitDecl(),
-                                             SourceLocation(),
-                                             SourceLocation(),
-                                             &Idents.get("__float128"));
-  }
-  
+  if (!Float128StubDecl)
+    Float128StubDecl = buildImplicitRecord("__float128");
+
   return Float128StubDecl;
 }
 
@@ -1618,7 +1628,7 @@
   }
   case Type::MemberPointer: {
     const MemberPointerType *MPT = cast<MemberPointerType>(T);
-    llvm::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT);
+    std::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT);
     break;
   }
   case Type::Complex: {
@@ -1632,8 +1642,9 @@
   }
   case Type::ObjCObject:
     return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
+  case Type::Adjusted:
   case Type::Decayed:
-    return getTypeInfo(cast<DecayedType>(T)->getDecayedType().getTypePtr());
+    return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
   case Type::ObjCInterface: {
     const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
     const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
@@ -1761,13 +1772,18 @@
   if (Target->getTriple().getArch() == llvm::Triple::xcore)
     return ABIAlign;  // Never overalign on XCore.
 
+  const TypedefType *TT = T->getAs<TypedefType>();
+
   // Double and long long should be naturally aligned if possible.
-  if (const ComplexType* CT = T->getAs<ComplexType>())
+  if (const ComplexType *CT = T->getAs<ComplexType>())
     T = CT->getElementType().getTypePtr();
   if (T->isSpecificBuiltinType(BuiltinType::Double) ||
       T->isSpecificBuiltinType(BuiltinType::LongLong) ||
       T->isSpecificBuiltinType(BuiltinType::ULongLong))
-    return std::max(ABIAlign, (unsigned)getTypeSize(T));
+    // Don't increase the alignment if an alignment attribute was specified on a
+    // typedef declaration.
+    if (!TT || !TT->getDecl()->getMaxAlignment())
+      return std::max(ABIAlign, (unsigned)getTypeSize(T));
 
   return ABIAlign;
 }
@@ -1796,9 +1812,8 @@
   if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
     DeepCollectObjCIvars(SuperClass, false, Ivars);
   if (!leafClass) {
-    for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(),
-         E = OI->ivar_end(); I != E; ++I)
-      Ivars.push_back(*I);
+    for (const auto *I : OI->ivars())
+      Ivars.push_back(I);
   } else {
     ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
     for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 
@@ -1814,24 +1829,17 @@
   if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
     // We can use protocol_iterator here instead of
     // all_referenced_protocol_iterator since we are walking all categories.    
-    for (ObjCInterfaceDecl::all_protocol_iterator P = OI->all_referenced_protocol_begin(),
-         PE = OI->all_referenced_protocol_end(); P != PE; ++P) {
-      ObjCProtocolDecl *Proto = (*P);
+    for (auto *Proto : OI->all_referenced_protocols()) {
       Protocols.insert(Proto->getCanonicalDecl());
-      for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
-           PE = Proto->protocol_end(); P != PE; ++P) {
-        Protocols.insert((*P)->getCanonicalDecl());
-        CollectInheritedProtocols(*P, Protocols);
+      for (auto *P : Proto->protocols()) {
+        Protocols.insert(P->getCanonicalDecl());
+        CollectInheritedProtocols(P, Protocols);
       }
     }
     
     // Categories of this Interface.
-    for (ObjCInterfaceDecl::visible_categories_iterator
-           Cat = OI->visible_categories_begin(),
-           CatEnd = OI->visible_categories_end();
-         Cat != CatEnd; ++Cat) {
-      CollectInheritedProtocols(*Cat, Protocols);
-    }
+    for (const auto *Cat : OI->visible_categories())
+      CollectInheritedProtocols(Cat, Protocols);
 
     if (ObjCInterfaceDecl *SD = OI->getSuperClass())
       while (SD) {
@@ -1839,22 +1847,16 @@
         SD = SD->getSuperClass();
       }
   } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
-    for (ObjCCategoryDecl::protocol_iterator P = OC->protocol_begin(),
-         PE = OC->protocol_end(); P != PE; ++P) {
-      ObjCProtocolDecl *Proto = (*P);
+    for (auto *Proto : OC->protocols()) {
       Protocols.insert(Proto->getCanonicalDecl());
-      for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
-           PE = Proto->protocol_end(); P != PE; ++P)
-        CollectInheritedProtocols(*P, Protocols);
+      for (const auto *P : Proto->protocols())
+        CollectInheritedProtocols(P, Protocols);
     }
   } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
-    for (ObjCProtocolDecl::protocol_iterator P = OP->protocol_begin(),
-         PE = OP->protocol_end(); P != PE; ++P) {
-      ObjCProtocolDecl *Proto = (*P);
+    for (auto *Proto : OP->protocols()) {
       Protocols.insert(Proto->getCanonicalDecl());
-      for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
-           PE = Proto->protocol_end(); P != PE; ++P)
-        CollectInheritedProtocols(*P, Protocols);
+      for (const auto *P : Proto->protocols())
+        CollectInheritedProtocols(P, Protocols);
     }
   }
 }
@@ -1862,12 +1864,8 @@
 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
   unsigned count = 0;  
   // Count ivars declared in class extension.
-  for (ObjCInterfaceDecl::known_extensions_iterator
-         Ext = OI->known_extensions_begin(),
-         ExtEnd = OI->known_extensions_end();
-       Ext != ExtEnd; ++Ext) {
+  for (const auto *Ext : OI->known_extensions())
     count += Ext->ivar_size();
-  }
   
   // Count ivar defined in this class's implementation.  This
   // includes synthesized ivars.
@@ -2080,12 +2078,12 @@
 
   QualType Result;
   if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
-    Result = getFunctionNoProtoType(FNPT->getResultType(), Info);
+    Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
   } else {
     const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
     FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
     EPI.ExtInfo = Info;
-    Result = getFunctionType(FPT->getResultType(), FPT->getArgTypes(), EPI);
+    Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
   }
 
   return cast<FunctionType>(Result.getTypePtr());
@@ -2097,7 +2095,7 @@
   while (true) {
     const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
     FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
-    FD->setType(getFunctionType(ResultType, FPT->getArgTypes(), EPI));
+    FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
     if (FunctionDecl *Next = FD->getPreviousDecl())
       FD = Next;
     else
@@ -2163,15 +2161,30 @@
   return QualType(New, 0);
 }
 
+QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
+  llvm::FoldingSetNodeID ID;
+  AdjustedType::Profile(ID, Orig, New);
+  void *InsertPos = 0;
+  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+  if (AT)
+    return QualType(AT, 0);
+
+  QualType Canonical = getCanonicalType(New);
+
+  // Get the new insert position for the node we care about.
+  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+  assert(AT == 0 && "Shouldn't be in the map!");
+
+  AT = new (*this, TypeAlignment)
+      AdjustedType(Type::Adjusted, Orig, New, Canonical);
+  Types.push_back(AT);
+  AdjustedTypes.InsertNode(AT, InsertPos);
+  return QualType(AT, 0);
+}
+
 QualType ASTContext::getDecayedType(QualType T) const {
   assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
 
-  llvm::FoldingSetNodeID ID;
-  DecayedType::Profile(ID, T);
-  void *InsertPos = 0;
-  if (DecayedType *DT = DecayedTypes.FindNodeOrInsertPos(ID, InsertPos))
-    return QualType(DT, 0);
-
   QualType Decayed;
 
   // C99 6.7.5.3p7:
@@ -2189,17 +2202,23 @@
   if (T->isFunctionType())
     Decayed = getPointerType(T);
 
+  llvm::FoldingSetNodeID ID;
+  AdjustedType::Profile(ID, T, Decayed);
+  void *InsertPos = 0;
+  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+  if (AT)
+    return QualType(AT, 0);
+
   QualType Canonical = getCanonicalType(Decayed);
 
   // Get the new insert position for the node we care about.
-  DecayedType *NewIP = DecayedTypes.FindNodeOrInsertPos(ID, InsertPos);
-  assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+  assert(AT == 0 && "Shouldn't be in the map!");
 
-  DecayedType *New =
-      new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
-  Types.push_back(New);
-  DecayedTypes.InsertNode(New, InsertPos);
-  return QualType(New, 0);
+  AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
+  Types.push_back(AT);
+  AdjustedTypes.InsertNode(AT, InsertPos);
+  return QualType(AT, 0);
 }
 
 /// getBlockPointerType - Return the uniqued reference to the type for
@@ -2798,8 +2817,6 @@
           T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
 }
 
-/// getFunctionType - Return a normal function type with a typed argument
-/// list.  isVariadic indicates whether the argument list includes '...'.
 QualType
 ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
                             const FunctionProtoType::ExtProtoInfo &EPI) const {
@@ -2873,7 +2890,7 @@
   } else if (EPI.ExceptionSpecType == EST_Unevaluated) {
     Size += sizeof(FunctionDecl*);
   }
-  if (EPI.ConsumedArguments)
+  if (EPI.ConsumedParameters)
     Size += NumArgs * sizeof(bool);
 
   FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
@@ -3507,6 +3524,56 @@
   return QualType(T, 0);
 }
 
+/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
+/// protocol list adopt all protocols in QT's qualified-id protocol
+/// list.
+bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
+                                                ObjCInterfaceDecl *IC) {
+  if (!QT->isObjCQualifiedIdType())
+    return false;
+  
+  if (const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>()) {
+    // If both the right and left sides have qualifiers.
+    for (auto *Proto : OPT->quals()) {
+      if (!IC->ClassImplementsProtocol(Proto, false))
+        return false;
+    }
+    return true;
+  }
+  return false;
+}
+
+/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
+/// QT's qualified-id protocol list adopt all protocols in IDecl's list
+/// of protocols.
+bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
+                                                ObjCInterfaceDecl *IDecl) {
+  if (!QT->isObjCQualifiedIdType())
+    return false;
+  const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>();
+  if (!OPT)
+    return false;
+  if (!IDecl->hasDefinition())
+    return false;
+  llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
+  CollectInheritedProtocols(IDecl, InheritedProtocols);
+  if (InheritedProtocols.empty())
+    return false;
+      
+  for (auto *PI : InheritedProtocols) {
+    // If both the right and left sides have qualifiers.
+    bool Adopts = false;
+    for (auto *Proto : OPT->quals()) {
+      // return 'true' if '*PI' is in the inheritance hierarchy of Proto
+      if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
+        break;
+    }
+    if (!Adopts)
+      return false;
+  }
+  return true;
+}
+
 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
 /// the given object type.
 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
@@ -4479,22 +4546,10 @@
   return 1;
 }
 
-static RecordDecl *
-CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
-                 DeclContext *DC, IdentifierInfo *Id) {
-  SourceLocation Loc;
-  if (Ctx.getLangOpts().CPlusPlus)
-    return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
-  else
-    return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
-}
-
 // getCFConstantStringType - Return the type used for constant CFStrings.
 QualType ASTContext::getCFConstantStringType() const {
   if (!CFConstantStringTypeDecl) {
-    CFConstantStringTypeDecl =
-      CreateRecordDecl(*this, TTK_Struct, TUDecl,
-                       &Idents.get("NSConstantString"));
+    CFConstantStringTypeDecl = buildImplicitRecord("NSConstantString");
     CFConstantStringTypeDecl->startDefinition();
 
     QualType FieldTypes[4];
@@ -4529,8 +4584,7 @@
 
 QualType ASTContext::getObjCSuperType() const {
   if (ObjCSuperType.isNull()) {
-    RecordDecl *ObjCSuperTypeDecl  =
-      CreateRecordDecl(*this, TTK_Struct, TUDecl, &Idents.get("objc_super"));
+    RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
     TUDecl->addDecl(ObjCSuperTypeDecl);
     ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
   }
@@ -4547,12 +4601,11 @@
   if (BlockDescriptorType)
     return getTagDeclType(BlockDescriptorType);
 
-  RecordDecl *T;
+  RecordDecl *RD;
   // FIXME: Needs the FlagAppleBlock bit.
-  T = CreateRecordDecl(*this, TTK_Struct, TUDecl,
-                       &Idents.get("__block_descriptor"));
-  T->startDefinition();
-  
+  RD = buildImplicitRecord("__block_descriptor");
+  RD->startDefinition();
+
   QualType FieldTypes[] = {
     UnsignedLongTy,
     UnsignedLongTy,
@@ -4564,20 +4617,17 @@
   };
 
   for (size_t i = 0; i < 2; ++i) {
-    FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
-                                         SourceLocation(),
-                                         &Idents.get(FieldNames[i]),
-                                         FieldTypes[i], /*TInfo=*/0,
-                                         /*BitWidth=*/0,
-                                         /*Mutable=*/false,
-                                         ICIS_NoInit);
+    FieldDecl *Field = FieldDecl::Create(
+        *this, RD, SourceLocation(), SourceLocation(),
+        &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/0,
+        /*BitWidth=*/0, /*Mutable=*/false, ICIS_NoInit);
     Field->setAccess(AS_public);
-    T->addDecl(Field);
+    RD->addDecl(Field);
   }
 
-  T->completeDefinition();
+  RD->completeDefinition();
 
-  BlockDescriptorType = T;
+  BlockDescriptorType = RD;
 
   return getTagDeclType(BlockDescriptorType);
 }
@@ -4586,12 +4636,11 @@
   if (BlockDescriptorExtendedType)
     return getTagDeclType(BlockDescriptorExtendedType);
 
-  RecordDecl *T;
+  RecordDecl *RD;
   // FIXME: Needs the FlagAppleBlock bit.
-  T = CreateRecordDecl(*this, TTK_Struct, TUDecl,
-                       &Idents.get("__block_descriptor_withcopydispose"));
-  T->startDefinition();
-  
+  RD = buildImplicitRecord("__block_descriptor_withcopydispose");
+  RD->startDefinition();
+
   QualType FieldTypes[] = {
     UnsignedLongTy,
     UnsignedLongTy,
@@ -4607,21 +4656,18 @@
   };
 
   for (size_t i = 0; i < 4; ++i) {
-    FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
-                                         SourceLocation(),
-                                         &Idents.get(FieldNames[i]),
-                                         FieldTypes[i], /*TInfo=*/0,
-                                         /*BitWidth=*/0,
-                                         /*Mutable=*/false,
-                                         ICIS_NoInit);
+    FieldDecl *Field = FieldDecl::Create(
+        *this, RD, SourceLocation(), SourceLocation(),
+        &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/0,
+        /*BitWidth=*/0,
+        /*Mutable=*/false, ICIS_NoInit);
     Field->setAccess(AS_public);
-    T->addDecl(Field);
+    RD->addDecl(Field);
   }
 
-  T->completeDefinition();
+  RD->completeDefinition();
 
-  BlockDescriptorExtendedType = T;
-
+  BlockDescriptorExtendedType = RD;
   return getTagDeclType(BlockDescriptorExtendedType);
 }
 
@@ -4691,12 +4737,8 @@
 
 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
   if (!ObjCInstanceTypeDecl)
-    ObjCInstanceTypeDecl = TypedefDecl::Create(*this, 
-                                               getTranslationUnitDecl(),
-                                               SourceLocation(), 
-                                               SourceLocation(),
-                                               &Idents.get("instancetype"), 
-                                     getTrivialTypeSourceInfo(getObjCIdType()));
+    ObjCInstanceTypeDecl =
+        buildImplicitTypedef(getObjCIdType(), "instancetype");
   return ObjCInstanceTypeDecl;
 }
 
@@ -4742,21 +4784,19 @@
       Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
   // Encode result type.
   if (getLangOpts().EncodeExtendedBlockSig)
-    getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None,
-                            BlockTy->getAs<FunctionType>()->getResultType(),
-                            S, true /*Extended*/);
+    getObjCEncodingForMethodParameter(
+        Decl::OBJC_TQ_None, BlockTy->getAs<FunctionType>()->getReturnType(), S,
+        true /*Extended*/);
   else
-    getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(),
-                           S);
+    getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getReturnType(), S);
   // Compute size of all parameters.
   // Start with computing size of a pointer in number of bytes.
   // FIXME: There might(should) be a better way of doing this computation!
   SourceLocation Loc;
   CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
   CharUnits ParmOffset = PtrSize;
-  for (BlockDecl::param_const_iterator PI = Decl->param_begin(),
-       E = Decl->param_end(); PI != E; ++PI) {
-    QualType PType = (*PI)->getType();
+  for (auto PI : Decl->params()) {
+    QualType PType = PI->getType();
     CharUnits sz = getObjCEncodingTypeSize(PType);
     if (sz.isZero())
       continue;
@@ -4770,9 +4810,7 @@
   
   // Argument types.
   ParmOffset = PtrSize;
-  for (BlockDecl::param_const_iterator PI = Decl->param_begin(), E =
-       Decl->param_end(); PI != E; ++PI) {
-    ParmVarDecl *PVDecl = *PI;
+  for (auto PVDecl : Decl->params()) {
     QualType PType = PVDecl->getOriginalType(); 
     if (const ArrayType *AT =
           dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
@@ -4797,12 +4835,11 @@
 bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
                                                 std::string& S) {
   // Encode result type.
-  getObjCEncodingForType(Decl->getResultType(), S);
+  getObjCEncodingForType(Decl->getReturnType(), S);
   CharUnits ParmOffset;
   // Compute size of all parameters.
-  for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
-       E = Decl->param_end(); PI != E; ++PI) {
-    QualType PType = (*PI)->getType();
+  for (auto PI : Decl->params()) {
+    QualType PType = PI->getType();
     CharUnits sz = getObjCEncodingTypeSize(PType);
     if (sz.isZero())
       continue;
@@ -4815,9 +4852,7 @@
   ParmOffset = CharUnits::Zero();
 
   // Argument types.
-  for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
-       E = Decl->param_end(); PI != E; ++PI) {
-    ParmVarDecl *PVDecl = *PI;
+  for (auto PVDecl : Decl->params()) {
     QualType PType = PVDecl->getOriginalType();
     if (const ArrayType *AT =
           dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
@@ -4859,8 +4894,8 @@
                                               bool Extended) const {
   // FIXME: This is not very efficient.
   // Encode return type.
-  getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 
-                                    Decl->getResultType(), S, Extended);
+  getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
+                                    Decl->getReturnType(), S, Extended);
   // Compute size of all parameters.
   // Start with computing size of a pointer in number of bytes.
   // FIXME: There might(should) be a better way of doing this computation!
@@ -4907,6 +4942,26 @@
   return false;
 }
 
+ObjCPropertyImplDecl *
+ASTContext::getObjCPropertyImplDeclForPropertyDecl(
+                                      const ObjCPropertyDecl *PD,
+                                      const Decl *Container) const {
+  if (!Container)
+    return 0;
+  if (const ObjCCategoryImplDecl *CID =
+      dyn_cast<ObjCCategoryImplDecl>(Container)) {
+    for (auto *PID : CID->property_impls())
+      if (PID->getPropertyDecl() == PD)
+        return PID;
+    } else {
+      const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
+      for (auto *PID : OID->property_impls())
+        if (PID->getPropertyDecl() == PD)
+          return PID;
+    }
+  return 0;
+}
+
 /// getObjCEncodingForPropertyDecl - Return the encoded type for this
 /// property declaration. If non-NULL, Container must be either an
 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
@@ -4939,37 +4994,12 @@
   bool Dynamic = false;
   ObjCPropertyImplDecl *SynthesizePID = 0;
 
-  // FIXME: Duplicated code due to poor abstraction.
-  if (Container) {
-    if (const ObjCCategoryImplDecl *CID =
-        dyn_cast<ObjCCategoryImplDecl>(Container)) {
-      for (ObjCCategoryImplDecl::propimpl_iterator
-             i = CID->propimpl_begin(), e = CID->propimpl_end();
-           i != e; ++i) {
-        ObjCPropertyImplDecl *PID = *i;
-        if (PID->getPropertyDecl() == PD) {
-          if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
-            Dynamic = true;
-          } else {
-            SynthesizePID = PID;
-          }
-        }
-      }
-    } else {
-      const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
-      for (ObjCCategoryImplDecl::propimpl_iterator
-             i = OID->propimpl_begin(), e = OID->propimpl_end();
-           i != e; ++i) {
-        ObjCPropertyImplDecl *PID = *i;
-        if (PID->getPropertyDecl() == PD) {
-          if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
-            Dynamic = true;
-          } else {
-            SynthesizePID = PID;
-          }
-        }
-      }
-    }
+  if (ObjCPropertyImplDecl *PropertyImpDecl =
+      getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
+    if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+      Dynamic = true;
+    else
+      SynthesizePID = PropertyImpDecl;
   }
 
   // FIXME: This is not very efficient.
@@ -4988,6 +5018,8 @@
       S += ",C";
     if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain)
       S += ",&";
+    if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+      S += ",W";
   } else {
     switch (PD->getSetterKind()) {
     case ObjCPropertyDecl::Assign: break;
@@ -5322,9 +5354,7 @@
       if (!RDecl->isUnion()) {
         getObjCEncodingForStructureImpl(RDecl, S, FD);
       } else {
-        for (RecordDecl::field_iterator Field = RDecl->field_begin(),
-                                     FieldEnd = RDecl->field_end();
-             Field != FieldEnd; ++Field) {
+        for (const auto *Field : RDecl->fields()) {
           if (FD) {
             S += '"';
             S += Field->getNameAsString();
@@ -5334,7 +5364,7 @@
           // Special case bit-fields.
           if (Field->isBitField()) {
             getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
-                                       *Field);
+                                       Field);
           } else {
             QualType qt = Field->getType();
             getLegacyIntegralTypeEncoding(qt);
@@ -5358,37 +5388,38 @@
       
       S += '<';
       // Block return type
-      getObjCEncodingForTypeImpl(FT->getResultType(), S, 
-                                 ExpandPointedToStructures, ExpandStructures, 
-                                 FD, 
-                                 false /* OutermostType */, 
-                                 EncodingProperty, 
-                                 false /* StructField */, 
-                                 EncodeBlockParameters, 
-                                 EncodeClassNames);
+      getObjCEncodingForTypeImpl(
+          FT->getReturnType(), S, ExpandPointedToStructures, ExpandStructures,
+          FD, false /* OutermostType */, EncodingProperty,
+          false /* StructField */, EncodeBlockParameters, EncodeClassNames);
       // Block self
       S += "@?";
       // Block parameters
       if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
-        for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(),
-               E = FPT->arg_type_end(); I && (I != E); ++I) {
-          getObjCEncodingForTypeImpl(*I, S, 
-                                     ExpandPointedToStructures, 
-                                     ExpandStructures, 
-                                     FD, 
-                                     false /* OutermostType */, 
-                                     EncodingProperty, 
-                                     false /* StructField */, 
-                                     EncodeBlockParameters, 
-                                     EncodeClassNames);
-        }
+        for (const auto &I : FPT->param_types())
+          getObjCEncodingForTypeImpl(
+              I, S, ExpandPointedToStructures, ExpandStructures, FD,
+              false /* OutermostType */, EncodingProperty,
+              false /* StructField */, EncodeBlockParameters, EncodeClassNames);
       }
       S += '>';
     }
     return;
   }
 
-  case Type::ObjCObject:
+  case Type::ObjCObject: {
+    // hack to match legacy encoding of *id and *Class
+    QualType Ty = getObjCObjectPointerType(CT);
+    if (Ty->isObjCIdType()) {
+      S += "{objc_object=}";
+      return;
+    }
+    else if (Ty->isObjCClassType()) {
+      S += "{objc_class=}";
+      return;
+    }
+  }
+  
   case Type::ObjCInterface: {
     // Ignore protocol qualifiers when mangling at this level.
     T = T->castAs<ObjCObjectType>()->getBaseType();
@@ -5440,10 +5471,9 @@
         // Note that we do extended encoding of protocol qualifer list
         // Only when doing ivar or property encoding.
         S += '"';
-        for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
-             E = OPT->qual_end(); I != E; ++I) {
+        for (const auto *I : OPT->quals()) {
           S += '<';
-          S += (*I)->getNameAsString();
+          S += I->getNameAsString();
           S += '>';
         }
         S += '"';
@@ -5486,10 +5516,9 @@
         (FD || EncodingProperty || EncodeClassNames)) {
       S += '"';
       S += OPT->getInterfaceDecl()->getIdentifier()->getName();
-      for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
-           E = OPT->qual_end(); I != E; ++I) {
+      for (const auto *I : OPT->quals()) {
         S += '<';
-        S += (*I)->getNameAsString();
+        S += I->getNameAsString();
         S += '>';
       }
       S += '"';
@@ -5542,11 +5571,9 @@
   const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
 
   if (CXXRec) {
-    for (CXXRecordDecl::base_class_iterator
-           BI = CXXRec->bases_begin(),
-           BE = CXXRec->bases_end(); BI != BE; ++BI) {
-      if (!BI->isVirtual()) {
-        CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+    for (const auto &BI : CXXRec->bases()) {
+      if (!BI.isVirtual()) {
+        CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
         if (base->isEmpty())
           continue;
         uint64_t offs = toBits(layout.getBaseClassOffset(base));
@@ -5566,10 +5593,8 @@
   }
 
   if (CXXRec && includeVBases) {
-    for (CXXRecordDecl::base_class_iterator
-           BI = CXXRec->vbases_begin(),
-           BE = CXXRec->vbases_end(); BI != BE; ++BI) {
-      CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+    for (const auto &BI : CXXRec->vbases()) {
+      CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
       if (base->isEmpty())
         continue;
       uint64_t offs = toBits(layout.getVBaseClassOffset(base));
@@ -5587,7 +5612,9 @@
     size = layout.getSize();
   }
 
+#ifndef NDEBUG
   uint64_t CurOffs = 0;
+#endif
   std::multimap<uint64_t, NamedDecl *>::iterator
     CurLayObj = FieldOrBaseOffsets.begin();
 
@@ -5601,7 +5628,9 @@
       S += '"';
     }
     S += "^^?";
+#ifndef NDEBUG
     CurOffs += getTypeSize(VoidPtrTy);
+#endif
   }
 
   if (!RDecl->hasFlexibleArrayMember()) {
@@ -5612,8 +5641,8 @@
   }
 
   for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
+#ifndef NDEBUG
     assert(CurOffs <= CurLayObj->first);
-
     if (CurOffs < CurLayObj->first) {
       uint64_t padding = CurLayObj->first - CurOffs; 
       // FIXME: There doesn't seem to be a way to indicate in the encoding that
@@ -5625,6 +5654,7 @@
       // longer then though.
       CurOffs += padding;
     }
+#endif
 
     NamedDecl *dcl = CurLayObj->second;
     if (dcl == 0)
@@ -5637,7 +5667,9 @@
       // making the encoding type bigger than it really is.
       getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false);
       assert(!base->isEmpty());
+#ifndef NDEBUG
       CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
+#endif
     } else {
       FieldDecl *field = cast<FieldDecl>(dcl);
       if (FD) {
@@ -5648,7 +5680,9 @@
 
       if (field->isBitField()) {
         EncodeBitField(this, S, field->getType(), field);
+#ifndef NDEBUG
         CurOffs += field->getBitWidthValue(*this);
+#endif
       } else {
         QualType qt = field->getType();
         getLegacyIntegralTypeEncoding(qt);
@@ -5656,7 +5690,9 @@
                                    /*OutermostType*/false,
                                    /*EncodingProperty*/false,
                                    /*StructField*/true);
+#ifndef NDEBUG
         CurOffs += getTypeSize(field->getType());
+#endif
       }
     }
   }
@@ -5682,24 +5718,15 @@
   if (!ObjCIdDecl) {
     QualType T = getObjCObjectType(ObjCBuiltinIdTy, 0, 0);
     T = getObjCObjectPointerType(T);
-    TypeSourceInfo *IdInfo = getTrivialTypeSourceInfo(T);
-    ObjCIdDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
-                                     getTranslationUnitDecl(),
-                                     SourceLocation(), SourceLocation(),
-                                     &Idents.get("id"), IdInfo);
+    ObjCIdDecl = buildImplicitTypedef(T, "id");
   }
-  
   return ObjCIdDecl;
 }
 
 TypedefDecl *ASTContext::getObjCSelDecl() const {
   if (!ObjCSelDecl) {
-    QualType SelT = getPointerType(ObjCBuiltinSelTy);
-    TypeSourceInfo *SelInfo = getTrivialTypeSourceInfo(SelT);
-    ObjCSelDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
-                                      getTranslationUnitDecl(),
-                                      SourceLocation(), SourceLocation(),
-                                      &Idents.get("SEL"), SelInfo);
+    QualType T = getPointerType(ObjCBuiltinSelTy);
+    ObjCSelDecl = buildImplicitTypedef(T, "SEL");
   }
   return ObjCSelDecl;
 }
@@ -5708,13 +5735,8 @@
   if (!ObjCClassDecl) {
     QualType T = getObjCObjectType(ObjCBuiltinClassTy, 0, 0);
     T = getObjCObjectPointerType(T);
-    TypeSourceInfo *ClassInfo = getTrivialTypeSourceInfo(T);
-    ObjCClassDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
-                                        getTranslationUnitDecl(),
-                                        SourceLocation(), SourceLocation(),
-                                        &Idents.get("Class"), ClassInfo);
+    ObjCClassDecl = buildImplicitTypedef(T, "Class");
   }
-  
   return ObjCClassDecl;
 }
 
@@ -5737,37 +5759,20 @@
 
 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
   // typedef char* __builtin_va_list;
-  QualType CharPtrType = Context->getPointerType(Context->CharTy);
-  TypeSourceInfo *TInfo
-    = Context->getTrivialTypeSourceInfo(CharPtrType);
-
-  TypedefDecl *VaListTypeDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__builtin_va_list"),
-                          TInfo);
-  return VaListTypeDecl;
+  QualType T = Context->getPointerType(Context->CharTy);
+  return Context->buildImplicitTypedef(T, "__builtin_va_list");
 }
 
 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
   // typedef void* __builtin_va_list;
-  QualType VoidPtrType = Context->getPointerType(Context->VoidTy);
-  TypeSourceInfo *TInfo
-    = Context->getTrivialTypeSourceInfo(VoidPtrType);
-
-  TypedefDecl *VaListTypeDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__builtin_va_list"),
-                          TInfo);
-  return VaListTypeDecl;
+  QualType T = Context->getPointerType(Context->VoidTy);
+  return Context->buildImplicitTypedef(T, "__builtin_va_list");
 }
 
 static TypedefDecl *
 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
-  RecordDecl *VaListTagDecl;
+  // struct __va_list
+  RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list");
   if (Context->getLangOpts().CPlusPlus) {
     // namespace std { struct __va_list {
     NamespaceDecl *NS;
@@ -5776,17 +5781,8 @@
                                /*Inline*/false, SourceLocation(),
                                SourceLocation(), &Context->Idents.get("std"),
                                /*PrevDecl*/0);
-
-    VaListTagDecl = CXXRecordDecl::Create(*Context, TTK_Struct,
-                                          Context->getTranslationUnitDecl(),
-                                          SourceLocation(), SourceLocation(),
-                                          &Context->Idents.get("__va_list"));
+    NS->setImplicit();
     VaListTagDecl->setDeclContext(NS);
-  } else {
-    // struct __va_list
-    VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
-                                   Context->getTranslationUnitDecl(),
-                                   &Context->Idents.get("__va_list"));
   }
 
   VaListTagDecl->startDefinition();
@@ -5834,23 +5830,14 @@
   Context->VaListTagTy = VaListTagType;
 
   // } __builtin_va_list;
-  TypedefDecl *VaListTypedefDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__builtin_va_list"),
-                          Context->getTrivialTypeSourceInfo(VaListTagType));
-
-  return VaListTypedefDecl;
+  return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
 }
 
 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
   // typedef struct __va_list_tag {
   RecordDecl *VaListTagDecl;
 
-  VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
-                                   Context->getTranslationUnitDecl(),
-                                   &Context->Idents.get("__va_list_tag"));
+  VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
   VaListTagDecl->startDefinition();
 
   const size_t NumFields = 5;
@@ -5895,12 +5882,9 @@
   Context->VaListTagTy = VaListTagType;
 
   // } __va_list_tag;
-  TypedefDecl *VaListTagTypedefDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__va_list_tag"),
-                          Context->getTrivialTypeSourceInfo(VaListTagType));
+  TypedefDecl *VaListTagTypedefDecl =
+      Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
+
   QualType VaListTagTypedefType =
     Context->getTypedefType(VaListTagTypedefDecl);
 
@@ -5909,25 +5893,14 @@
   QualType VaListTagArrayType
     = Context->getConstantArrayType(VaListTagTypedefType,
                                     Size, ArrayType::Normal, 0);
-  TypeSourceInfo *TInfo
-    = Context->getTrivialTypeSourceInfo(VaListTagArrayType);
-  TypedefDecl *VaListTypedefDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__builtin_va_list"),
-                          TInfo);
-
-  return VaListTypedefDecl;
+  return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
 }
 
 static TypedefDecl *
 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
   // typedef struct __va_list_tag {
   RecordDecl *VaListTagDecl;
-  VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
-                                   Context->getTranslationUnitDecl(),
-                                   &Context->Idents.get("__va_list_tag"));
+  VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
   VaListTagDecl->startDefinition();
 
   const size_t NumFields = 4;
@@ -5969,12 +5942,9 @@
   Context->VaListTagTy = VaListTagType;
 
   // } __va_list_tag;
-  TypedefDecl *VaListTagTypedefDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__va_list_tag"),
-                          Context->getTrivialTypeSourceInfo(VaListTagType));
+  TypedefDecl *VaListTagTypedefDecl =
+      Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
+
   QualType VaListTagTypedefType =
     Context->getTypedefType(VaListTagTypedefDecl);
 
@@ -5983,16 +5953,7 @@
   QualType VaListTagArrayType
     = Context->getConstantArrayType(VaListTagTypedefType,
                                       Size, ArrayType::Normal,0);
-  TypeSourceInfo *TInfo
-    = Context->getTrivialTypeSourceInfo(VaListTagArrayType);
-  TypedefDecl *VaListTypedefDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__builtin_va_list"),
-                          TInfo);
-
-  return VaListTypedefDecl;
+  return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
 }
 
 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
@@ -6001,19 +5962,13 @@
   QualType IntArrayType
     = Context->getConstantArrayType(Context->IntTy,
 				    Size, ArrayType::Normal, 0);
-  TypedefDecl *VaListTypedefDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__builtin_va_list"),
-                          Context->getTrivialTypeSourceInfo(IntArrayType));
-
-  return VaListTypedefDecl;
+  return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list");
 }
 
 static TypedefDecl *
 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
-  RecordDecl *VaListDecl;
+  // struct __va_list
+  RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list");
   if (Context->getLangOpts().CPlusPlus) {
     // namespace std { struct __va_list {
     NamespaceDecl *NS;
@@ -6022,19 +5977,8 @@
                                /*Inline*/false, SourceLocation(),
                                SourceLocation(), &Context->Idents.get("std"),
                                /*PrevDecl*/0);
-
-    VaListDecl = CXXRecordDecl::Create(*Context, TTK_Struct,
-                                       Context->getTranslationUnitDecl(),
-                                       SourceLocation(), SourceLocation(),
-                                       &Context->Idents.get("__va_list"));
-
+    NS->setImplicit();
     VaListDecl->setDeclContext(NS);
-
-  } else {
-    // struct __va_list {
-    VaListDecl = CreateRecordDecl(*Context, TTK_Struct,
-                                  Context->getTranslationUnitDecl(),
-                                  &Context->Idents.get("__va_list"));
   }
 
   VaListDecl->startDefinition();
@@ -6057,26 +6001,15 @@
   VaListDecl->completeDefinition();
 
   // typedef struct __va_list __builtin_va_list;
-  TypeSourceInfo *TInfo
-    = Context->getTrivialTypeSourceInfo(Context->getRecordType(VaListDecl));
-
-  TypedefDecl *VaListTypeDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__builtin_va_list"),
-                          TInfo);
-
-  return VaListTypeDecl;
+  QualType T = Context->getRecordType(VaListDecl);
+  return Context->buildImplicitTypedef(T, "__builtin_va_list");
 }
 
 static TypedefDecl *
 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
   // typedef struct __va_list_tag {
   RecordDecl *VaListTagDecl;
-  VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
-                                   Context->getTranslationUnitDecl(),
-                                   &Context->Idents.get("__va_list_tag"));
+  VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
   VaListTagDecl->startDefinition();
 
   const size_t NumFields = 4;
@@ -6118,12 +6051,8 @@
   Context->VaListTagTy = VaListTagType;
 
   // } __va_list_tag;
-  TypedefDecl *VaListTagTypedefDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__va_list_tag"),
-                          Context->getTrivialTypeSourceInfo(VaListTagType));
+  TypedefDecl *VaListTagTypedefDecl =
+      Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
   QualType VaListTagTypedefType =
     Context->getTypedefType(VaListTagTypedefDecl);
 
@@ -6132,16 +6061,8 @@
   QualType VaListTagArrayType
     = Context->getConstantArrayType(VaListTagTypedefType,
                                       Size, ArrayType::Normal,0);
-  TypeSourceInfo *TInfo
-    = Context->getTrivialTypeSourceInfo(VaListTagArrayType);
-  TypedefDecl *VaListTypedefDecl
-    = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
-                          Context->getTranslationUnitDecl(),
-                          SourceLocation(), SourceLocation(),
-                          &Context->Idents.get("__builtin_va_list"),
-                          TInfo);
 
-  return VaListTypedefDecl;
+  return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
 }
 
 static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
@@ -6169,8 +6090,10 @@
 }
 
 TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
-  if (!BuiltinVaListDecl)
+  if (!BuiltinVaListDecl) {
     BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind());
+    assert(BuiltinVaListDecl->isImplicit());
+  }
 
   return BuiltinVaListDecl;
 }
@@ -6454,9 +6377,8 @@
                                            ObjCProtocolDecl *rProto) const {
   if (declaresSameEntity(lProto, rProto))
     return true;
-  for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(),
-       E = rProto->protocol_end(); PI != E; ++PI)
-    if (ProtocolCompatibleWithProtocol(lProto, *PI))
+  for (auto *PI : rProto->protocols())
+    if (ProtocolCompatibleWithProtocol(lProto, PI))
       return true;
   return false;
 }
@@ -6469,13 +6391,9 @@
   const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
   assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible");
   
-  for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
-       E = lhsQID->qual_end(); I != E; ++I) {
+  for (auto *lhsProto : lhsQID->quals()) {
     bool match = false;
-    ObjCProtocolDecl *lhsProto = *I;
-    for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(),
-         E = rhsOPT->qual_end(); J != E; ++J) {
-      ObjCProtocolDecl *rhsProto = *J;
+    for (auto *rhsProto : rhsOPT->quals()) {
       if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) {
         match = true;
         break;
@@ -6508,12 +6426,11 @@
       // If the RHS is a unqualified interface pointer "NSString*",
       // make sure we check the class hierarchy.
       if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
-        for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
-             E = lhsQID->qual_end(); I != E; ++I) {
+        for (auto *I : lhsQID->quals()) {
           // when comparing an id<P> on lhs with a static type on rhs,
           // see if static class implements all of id's protocols, directly or
           // through its super class and categories.
-          if (!rhsID->ClassImplementsProtocol(*I, true))
+          if (!rhsID->ClassImplementsProtocol(I, true))
             return false;
         }
       }
@@ -6521,17 +6438,13 @@
       return true;
     }
     // Both the right and left sides have qualifiers.
-    for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
-         E = lhsQID->qual_end(); I != E; ++I) {
-      ObjCProtocolDecl *lhsProto = *I;
+    for (auto *lhsProto : lhsQID->quals()) {
       bool match = false;
 
       // when comparing an id<P> on lhs with a static type on rhs,
       // see if static class implements all of id's protocols, directly or
       // through its super class and categories.
-      for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(),
-           E = rhsOPT->qual_end(); J != E; ++J) {
-        ObjCProtocolDecl *rhsProto = *J;
+      for (auto *rhsProto : rhsOPT->quals()) {
         if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
             (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
           match = true;
@@ -6541,12 +6454,11 @@
       // If the RHS is a qualified interface pointer "NSString<P>*",
       // make sure we check the class hierarchy.
       if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
-        for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
-             E = lhsQID->qual_end(); I != E; ++I) {
+        for (auto *I : lhsQID->quals()) {
           // when comparing an id<P> on lhs with a static type on rhs,
           // see if static class implements all of id's protocols, directly or
           // through its super class and categories.
-          if (rhsID->ClassImplementsProtocol(*I, true)) {
+          if (rhsID->ClassImplementsProtocol(I, true)) {
             match = true;
             break;
           }
@@ -6565,9 +6477,7 @@
   if (const ObjCObjectPointerType *lhsOPT =
         lhs->getAsObjCInterfacePointerType()) {
     // If both the right and left sides have qualifiers.
-    for (ObjCObjectPointerType::qual_iterator I = lhsOPT->qual_begin(),
-         E = lhsOPT->qual_end(); I != E; ++I) {
-      ObjCProtocolDecl *lhsProto = *I;
+    for (auto *lhsProto : lhsOPT->quals()) {
       bool match = false;
 
       // when comparing an id<P> on rhs with a static type on lhs,
@@ -6575,9 +6485,7 @@
       // through its super class and categories.
       // First, lhs protocols in the qualifier list must be found, direct
       // or indirect in rhs's qualifier list or it is a mismatch.
-      for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
-           E = rhsQID->qual_end(); J != E; ++J) {
-        ObjCProtocolDecl *rhsProto = *J;
+      for (auto *rhsProto : rhsQID->quals()) {
         if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
             (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
           match = true;
@@ -6598,14 +6506,9 @@
       // assume that it is mismatch.
       if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty())
         return false;
-      for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
-           LHSInheritedProtocols.begin(),
-           E = LHSInheritedProtocols.end(); I != E; ++I) {
+      for (auto *lhsProto : LHSInheritedProtocols) {
         bool match = false;
-        ObjCProtocolDecl *lhsProto = (*I);
-        for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
-             E = rhsQID->qual_end(); J != E; ++J) {
-          ObjCProtocolDecl *rhsProto = *J;
+        for (auto *rhsProto : rhsQID->quals()) {
           if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
               (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
             match = true;
@@ -6798,16 +6701,9 @@
       if (SuperClassInheritedProtocols.empty())
         return false;
       
-      for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
-           LHSPE = LHS->qual_end();
-           LHSPI != LHSPE; LHSPI++) {
-        bool SuperImplementsProtocol = false;
-        ObjCProtocolDecl *LHSProto = (*LHSPI);
-        
-        for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
-             SuperClassInheritedProtocols.begin(),
-             E = SuperClassInheritedProtocols.end(); I != E; ++I) {
-          ObjCProtocolDecl *SuperClassProto = (*I);
+      for (const auto *LHSProto : LHS->quals()) {
+        bool SuperImplementsProtocol = false;        
+        for (auto *SuperClassProto : SuperClassInheritedProtocols) {
           if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
             SuperImplementsProtocol = true;
             break;
@@ -6821,17 +6717,13 @@
     return false;
   }
 
-  for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
-                                     LHSPE = LHS->qual_end();
-       LHSPI != LHSPE; LHSPI++) {
+  for (const auto *LHSPI : LHS->quals()) {
     bool RHSImplementsProtocol = false;
 
     // If the RHS doesn't implement the protocol on the left, the types
     // are incompatible.
-    for (ObjCObjectType::qual_iterator RHSPI = RHS->qual_begin(),
-                                       RHSPE = RHS->qual_end();
-         RHSPI != RHSPE; RHSPI++) {
-      if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier())) {
+    for (auto *RHSPI : RHS->quals()) {
+      if (RHSPI->lookupProtocolNamed(LHSPI->getIdentifier())) {
         RHSImplementsProtocol = true;
         break;
       }
@@ -6891,9 +6783,8 @@
   if (const RecordType *UT = T->getAsUnionType()) {
     RecordDecl *UD = UT->getDecl();
     if (UD->hasAttr<TransparentUnionAttr>()) {
-      for (RecordDecl::field_iterator it = UD->field_begin(),
-           itend = UD->field_end(); it != itend; ++it) {
-        QualType ET = it->getType().getUnqualifiedType();
+      for (const auto *I : UD->fields()) {
+        QualType ET = I->getType().getUnqualifiedType();
         QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
         if (!MT.isNull())
           return MT;
@@ -6904,11 +6795,11 @@
   return QualType();
 }
 
-/// mergeFunctionArgumentTypes - merge two types which appear as function
-/// argument types
-QualType ASTContext::mergeFunctionArgumentTypes(QualType lhs, QualType rhs, 
-                                                bool OfBlockPointer,
-                                                bool Unqualified) {
+/// mergeFunctionParameterTypes - merge two types which appear as function
+/// parameter types
+QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
+                                                 bool OfBlockPointer,
+                                                 bool Unqualified) {
   // GNU extension: two types are compatible if they appear as a function
   // argument, one of the types is a transparent union type and the other
   // type is compatible with a union member
@@ -6938,23 +6829,23 @@
   // Check return type
   QualType retType;
   if (OfBlockPointer) {
-    QualType RHS = rbase->getResultType();
-    QualType LHS = lbase->getResultType();
+    QualType RHS = rbase->getReturnType();
+    QualType LHS = lbase->getReturnType();
     bool UnqualifiedResult = Unqualified;
     if (!UnqualifiedResult)
       UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
     retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true);
   }
   else
-    retType = mergeTypes(lbase->getResultType(), rbase->getResultType(), false,
+    retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false,
                          Unqualified);
   if (retType.isNull()) return QualType();
   
   if (Unqualified)
     retType = retType.getUnqualifiedType();
 
-  CanQualType LRetType = getCanonicalType(lbase->getResultType());
-  CanQualType RRetType = getCanonicalType(rbase->getResultType());
+  CanQualType LRetType = getCanonicalType(lbase->getReturnType());
+  CanQualType RRetType = getCanonicalType(rbase->getReturnType());
   if (Unqualified) {
     LRetType = LRetType.getUnqualifiedType();
     RRetType = RRetType.getUnqualifiedType();
@@ -6998,11 +6889,8 @@
   if (lproto && rproto) { // two C99 style function prototypes
     assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
            "C++ shouldn't be here");
-    unsigned lproto_nargs = lproto->getNumArgs();
-    unsigned rproto_nargs = rproto->getNumArgs();
-
-    // Compatible functions must have the same number of arguments
-    if (lproto_nargs != rproto_nargs)
+    // Compatible functions must have the same number of parameters
+    if (lproto->getNumParams() != rproto->getNumParams())
       return QualType();
 
     // Variadic and non-variadic functions aren't compatible
@@ -7015,29 +6903,29 @@
     if (LangOpts.ObjCAutoRefCount &&
         !FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto))
       return QualType();
-      
-    // Check argument compatibility
+
+    // Check parameter type compatibility
     SmallVector<QualType, 10> types;
-    for (unsigned i = 0; i < lproto_nargs; i++) {
-      QualType largtype = lproto->getArgType(i).getUnqualifiedType();
-      QualType rargtype = rproto->getArgType(i).getUnqualifiedType();
-      QualType argtype = mergeFunctionArgumentTypes(largtype, rargtype,
-                                                    OfBlockPointer,
-                                                    Unqualified);
-      if (argtype.isNull()) return QualType();
-      
+    for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
+      QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
+      QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
+      QualType paramType = mergeFunctionParameterTypes(
+          lParamType, rParamType, OfBlockPointer, Unqualified);
+      if (paramType.isNull())
+        return QualType();
+
       if (Unqualified)
-        argtype = argtype.getUnqualifiedType();
-      
-      types.push_back(argtype);
+        paramType = paramType.getUnqualifiedType();
+
+      types.push_back(paramType);
       if (Unqualified) {
-        largtype = largtype.getUnqualifiedType();
-        rargtype = rargtype.getUnqualifiedType();
+        lParamType = lParamType.getUnqualifiedType();
+        rParamType = rParamType.getUnqualifiedType();
       }
-      
-      if (getCanonicalType(argtype) != getCanonicalType(largtype))
+
+      if (getCanonicalType(paramType) != getCanonicalType(lParamType))
         allLTypes = false;
-      if (getCanonicalType(argtype) != getCanonicalType(rargtype))
+      if (getCanonicalType(paramType) != getCanonicalType(rParamType))
         allRTypes = false;
     }
       
@@ -7061,20 +6949,19 @@
     // The only types actually affected are promotable integer
     // types and floats, which would be passed as a different
     // type depending on whether the prototype is visible.
-    unsigned proto_nargs = proto->getNumArgs();
-    for (unsigned i = 0; i < proto_nargs; ++i) {
-      QualType argTy = proto->getArgType(i);
-      
+    for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
+      QualType paramTy = proto->getParamType(i);
+
       // Look at the converted type of enum types, since that is the type used
       // to pass enum values.
-      if (const EnumType *Enum = argTy->getAs<EnumType>()) {
-        argTy = Enum->getDecl()->getIntegerType();
-        if (argTy.isNull())
+      if (const EnumType *Enum = paramTy->getAs<EnumType>()) {
+        paramTy = Enum->getDecl()->getIntegerType();
+        if (paramTy.isNull())
           return QualType();
       }
-      
-      if (argTy->isPromotableIntegerType() ||
-          getCanonicalType(argTy).getUnqualifiedType() == FloatTy)
+
+      if (paramTy->isPromotableIntegerType() ||
+          getCanonicalType(paramTy).getUnqualifiedType() == FloatTy)
         return QualType();
     }
 
@@ -7083,7 +6970,7 @@
 
     FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
     EPI.ExtInfo = einfo;
-    return getFunctionType(retType, proto->getArgTypes(), EPI);
+    return getFunctionType(retType, proto->getParamTypes(), EPI);
   }
 
   if (allLTypes) return lhs;
@@ -7387,18 +7274,16 @@
 bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs(
                    const FunctionProtoType *FromFunctionType,
                    const FunctionProtoType *ToFunctionType) {
-  if (FromFunctionType->hasAnyConsumedArgs() != 
-      ToFunctionType->hasAnyConsumedArgs())
+  if (FromFunctionType->hasAnyConsumedParams() !=
+      ToFunctionType->hasAnyConsumedParams())
     return false;
   FunctionProtoType::ExtProtoInfo FromEPI = 
     FromFunctionType->getExtProtoInfo();
   FunctionProtoType::ExtProtoInfo ToEPI = 
     ToFunctionType->getExtProtoInfo();
-  if (FromEPI.ConsumedArguments && ToEPI.ConsumedArguments)
-    for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
-         ArgIdx != NumArgs; ++ArgIdx)  {
-      if (FromEPI.ConsumedArguments[ArgIdx] != 
-          ToEPI.ConsumedArguments[ArgIdx])
+  if (FromEPI.ConsumedParameters && ToEPI.ConsumedParameters)
+    for (unsigned i = 0, n = FromFunctionType->getNumParams(); i != n; ++i) {
+      if (FromEPI.ConsumedParameters[i] != ToEPI.ConsumedParameters[i])
         return false;
     }
   return true;
@@ -7416,10 +7301,10 @@
   if (RHSCan->isFunctionType()) {
     if (!LHSCan->isFunctionType())
       return QualType();
-    QualType OldReturnType = 
-      cast<FunctionType>(RHSCan.getTypePtr())->getResultType();
+    QualType OldReturnType =
+        cast<FunctionType>(RHSCan.getTypePtr())->getReturnType();
     QualType NewReturnType =
-      cast<FunctionType>(LHSCan.getTypePtr())->getResultType();
+        cast<FunctionType>(LHSCan.getTypePtr())->getReturnType();
     QualType ResReturnType = 
       mergeObjCGCQualifiers(NewReturnType, OldReturnType);
     if (ResReturnType.isNull())
@@ -7432,7 +7317,7 @@
         FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
         EPI.ExtInfo = getFunctionExtInfo(LHS);
         QualType ResultType =
-            getFunctionType(OldReturnType, FPT->getArgTypes(), EPI);
+            getFunctionType(OldReturnType, FPT->getParamTypes(), EPI);
         return ResultType;
       }
     }
@@ -7573,6 +7458,19 @@
       assert(HowLong <= 2 && "Can't have LLLL modifier");
       ++HowLong;
       break;
+    case 'W':
+      // This modifier represents int64 type.
+      assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
+      switch (Context.getTargetInfo().getInt64Type()) {
+      default:
+        llvm_unreachable("Unexpected integer type");
+      case TargetInfo::SignedLong:
+        HowLong = 1;
+        break;
+      case TargetInfo::SignedLongLong:
+        HowLong = 2;
+        break;
+      }
     }
   }
 
@@ -7834,7 +7732,7 @@
   return getFunctionType(ResType, ArgTypes, EPI);
 }
 
-GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) {
+GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
   if (!FD->isExternallyVisible())
     return GVA_Internal;
 
@@ -7846,7 +7744,7 @@
     break;
 
   case TSK_ExplicitInstantiationDefinition:
-    return GVA_ExplicitTemplateInstantiation;
+    return GVA_StrongODR;
 
   case TSK_ExplicitInstantiationDeclaration:
   case TSK_ImplicitInstantiation:
@@ -7857,7 +7755,7 @@
   if (!FD->isInlined())
     return External;
 
-  if ((!getLangOpts().CPlusPlus && !getLangOpts().MicrosoftMode) ||
+  if ((!getLangOpts().CPlusPlus && !getLangOpts().MSVCCompat) ||
       FD->hasAttr<GNUInlineAttr>()) {
     // GNU or C99 inline semantics. Determine whether this symbol should be
     // externally visible.
@@ -7878,6 +7776,12 @@
                                        == TSK_ExplicitInstantiationDeclaration)
     return GVA_C99Inline;
 
+  // Functions specified with extern and inline in -fms-compatibility mode
+  // forcibly get emitted.  While the body of the function cannot be later
+  // replaced, the function definition cannot be discarded.
+  if (FD->getMostRecentDecl()->isMSExternInline())
+    return GVA_StrongODR;
+
   return GVA_CXXInline;
 }
 
@@ -7895,7 +7799,7 @@
   // Fall through to treat this like any other instantiation.
 
   case TSK_ExplicitInstantiationDefinition:
-    return GVA_ExplicitTemplateInstantiation;
+    return GVA_StrongODR;
 
   case TSK_ImplicitInstantiation:
     return GVA_TemplateInstantiation;
@@ -7996,12 +7900,23 @@
   return ABI->isNearlyEmpty(RD);
 }
 
+VTableContextBase *ASTContext::getVTableContext() {
+  if (!VTContext.get()) {
+    if (Target->getCXXABI().isMicrosoft())
+      VTContext.reset(new MicrosoftVTableContext(*this));
+    else
+      VTContext.reset(new ItaniumVTableContext(*this));
+  }
+  return VTContext.get();
+}
+
 MangleContext *ASTContext::createMangleContext() {
   switch (Target->getCXXABI().getKind()) {
   case TargetCXXABI::GenericAArch64:
   case TargetCXXABI::GenericItanium:
   case TargetCXXABI::GenericARM:
   case TargetCXXABI::iOS:
+  case TargetCXXABI::iOS64:
     return ItaniumMangleContext::create(*this, getDiagnostics());
   case TargetCXXABI::Microsoft:
     return MicrosoftMangleContext::create(*this, getDiagnostics());
@@ -8071,6 +7986,17 @@
   return I != MangleNumbers.end() ? I->second : 1;
 }
 
+void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
+  if (Number > 1)
+    StaticLocalNumbers[VD] = Number;
+}
+
+unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
+  llvm::DenseMap<const VarDecl *, unsigned>::const_iterator I =
+      StaticLocalNumbers.find(VD);
+  return I != StaticLocalNumbers.end() ? I->second : 1;
+}
+
 MangleNumberingContext &
 ASTContext::getManglingNumberContext(const DeclContext *DC) {
   assert(LangOpts.CPlusPlus);  // We don't need mangling numbers for plain C.
@@ -8230,8 +8156,7 @@
   if (MethodDecl->getObjCDeclQualifier() !=
       MethodImpl->getObjCDeclQualifier())
     return false;
-  if (!hasSameType(MethodDecl->getResultType(),
-                   MethodImpl->getResultType()))
+  if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType()))
     return false;
   
   if (MethodDecl->param_size() != MethodImpl->param_size())
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index fce8f64..7c6bec4 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -12,6 +12,7 @@
 //===----------------------------------------------------------------------===//
 #include "clang/AST/ASTDiagnostic.h"
 #include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
 #include "clang/AST/DeclObjC.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/AST/ExprCXX.h"
@@ -51,6 +52,11 @@
       QT = AT->desugar();
       continue;
     }
+    // ...or an adjusted type...
+    if (const AdjustedType *AT = dyn_cast<AdjustedType>(Ty)) {
+      QT = AT->desugar();
+      continue;
+    }
     // ... or an auto type.
     if (const AutoType *AT = dyn_cast<AutoType>(Ty)) {
       if (!AT->isSugared())
@@ -354,6 +360,14 @@
       NeedQuotes = false;
       break;
     }
+    case DiagnosticsEngine::ak_attr: {
+      const Attr *At = reinterpret_cast<Attr *>(Val);
+      assert(At && "Received null Attr object!");
+      OS << '\'' << At->getSpelling() << '\'';
+      NeedQuotes = false;
+      break;
+    }
+
   }
 
   OS.flush();
@@ -1349,8 +1363,7 @@
         FromType.getLocalUnqualifiedType() ==
         ToType.getLocalUnqualifiedType()) {
       Qualifiers FromQual = FromType.getLocalQualifiers(),
-                 ToQual = ToType.getLocalQualifiers(),
-                 CommonQual;
+                 ToQual = ToType.getLocalQualifiers();
       PrintQualifiers(FromQual, ToQual);
       FromType.getLocalUnqualifiedType().print(OS, Policy);
       return;
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index 2f40255..dcfba3e 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -32,12 +32,23 @@
 
 namespace  {
   // Colors used for various parts of the AST dump
+  // Do not use bold yellow for any text.  It is hard to read on white screens.
 
   struct TerminalColor {
     raw_ostream::Colors Color;
     bool Bold;
   };
 
+  // Red           - CastColor
+  // Green         - TypeColor
+  // Bold Green    - DeclKindNameColor, UndeserializedColor
+  // Yellow        - AddressColor, LocationColor
+  // Blue          - CommentColor, NullColor, IndentColor
+  // Bold Blue     - AttrColor
+  // Bold Magenta  - StmtColor
+  // Cyan          - ValueKindColor, ObjectKindColor
+  // Bold Cyan     - ValueColor, DeclNameColor
+
   // Decl kind names (VarDecl, FunctionDecl, etc)
   static const TerminalColor DeclKindNameColor = { raw_ostream::GREEN, true };
   // Attr names (CleanupAttr, GuardedByAttr, etc)
@@ -45,7 +56,7 @@
   // Statement names (DeclStmt, ImplicitCastExpr, etc)
   static const TerminalColor StmtColor = { raw_ostream::MAGENTA, true };
   // Comment names (FullComment, ParagraphComment, TextComment, etc)
-  static const TerminalColor CommentColor = { raw_ostream::YELLOW, true };
+  static const TerminalColor CommentColor = { raw_ostream::BLUE, false };
 
   // Type names (int, float, etc, plus user defined types)
   static const TerminalColor TypeColor = { raw_ostream::GREEN, false };
@@ -138,6 +149,38 @@
       }
     };
 
+    class ChildDumper {
+      ASTDumper &Dumper;
+
+      const Decl *Prev;
+      bool PrevRef;
+    public:
+      ChildDumper(ASTDumper &Dumper) : Dumper(Dumper), Prev(0) {}
+      ~ChildDumper() {
+        if (Prev) {
+          Dumper.lastChild();
+          dump(0);
+        }
+      }
+
+      // FIXME: This should take an arbitrary callable as the dumping action.
+      void dump(const Decl *D, bool Ref = false) {
+        if (Prev) {
+          if (PrevRef)
+            Dumper.dumpDeclRef(Prev);
+          else
+            Dumper.dumpDecl(Prev);
+        }
+        Prev = D;
+        PrevRef = Ref;
+      }
+      void dumpRef(const Decl *D) { dump(D, true); }
+
+      // Give up ownership of the children of the node. By calling this,
+      // the caller takes back responsibility for calling lastChild().
+      void release() { dump(0); }
+    };
+
   public:
     ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
               const SourceManager *SM)
@@ -211,6 +254,13 @@
     void VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D);
     void VisitCXXRecordDecl(const CXXRecordDecl *D);
     void VisitStaticAssertDecl(const StaticAssertDecl *D);
+    template<typename SpecializationDecl>
+    void VisitTemplateDeclSpecialization(ChildDumper &Children,
+                                         const SpecializationDecl *D,
+                                         bool DumpExplicitInst,
+                                         bool DumpRefOnly);
+    template<typename TemplateDecl>
+    void VisitTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
     void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
     void VisitClassTemplateDecl(const ClassTemplateDecl *D);
     void VisitClassTemplateSpecializationDecl(
@@ -500,17 +550,14 @@
 void ASTDumper::dumpDeclContext(const DeclContext *DC) {
   if (!DC)
     return;
-  bool HasUndeserializedDecls = DC->hasExternalLexicalStorage();
-  for (DeclContext::decl_iterator I = DC->noload_decls_begin(),
-                                  E = DC->noload_decls_end();
-       I != E; ++I) {
-    DeclContext::decl_iterator Next = I;
-    ++Next;
-    if (Next == E && !HasUndeserializedDecls)
-      lastChild();
-    dumpDecl(*I);
-  }
-  if (HasUndeserializedDecls) {
+
+  ChildDumper Children(*this);
+  for (auto *D : DC->noload_decls())
+    Children.dump(D);
+
+  if (DC->hasExternalLexicalStorage()) {
+    Children.release();
+
     lastChild();
     IndentScope Indent(*this);
     ColorScope Color(*this, UndeserializedColor);
@@ -569,6 +616,7 @@
   IndentScope Indent(*this);
   {
     ColorScope Color(*this, AttrColor);
+
     switch (A->getKind()) {
 #define ATTR(X) case attr::X: OS << #X; break;
 #include "clang/Basic/AttrList.inc"
@@ -579,6 +627,8 @@
   dumpPointer(A);
   dumpSourceRange(A->getRange());
 #include "clang/AST/AttrDump.inc"
+  if (A->isImplicit())
+    OS << " Implicit";
 }
 
 static void dumpPreviousDeclImpl(raw_ostream &OS, ...) {}
@@ -742,13 +792,15 @@
     OS << " parent " << cast<Decl>(D->getDeclContext());
   dumpPreviousDecl(OS, D);
   dumpSourceRange(D->getSourceRange());
+  OS << ' ';
+  dumpLocation(D->getLocation());
   if (Module *M = D->getOwningModule())
     OS << " in " << M->getFullModuleName();
   if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
     if (ND->isHidden())
       OS << " hidden";
 
-  bool HasAttrs = D->attr_begin() != D->attr_end();
+  bool HasAttrs = D->hasAttrs();
   const FullComment *Comment =
       D->getASTContext().getLocalCommentForDeclUncached(D);
   // Decls within functions are visited by the body
@@ -824,13 +876,10 @@
 void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
   dumpName(D);
   dumpType(D->getType());
-  for (IndirectFieldDecl::chain_iterator I = D->chain_begin(),
-                                         E = D->chain_end();
-       I != E; ++I) {
-    if (I + 1 == E)
-      lastChild();
-    dumpDeclRef(*I);
-  }
+
+  ChildDumper Children(*this);
+  for (auto *Child : D->chain())
+    Children.dumpRef(Child);
 }
 
 void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
@@ -1016,15 +1065,13 @@
   if (!D->isCompleteDefinition())
     return;
 
-  for (CXXRecordDecl::base_class_const_iterator I = D->bases_begin(),
-                                                E = D->bases_end();
-       I != E; ++I) {
+  for (const auto &I : D->bases()) {
     IndentScope Indent(*this);
-    if (I->isVirtual())
+    if (I.isVirtual())
       OS << "virtual ";
-    dumpAccessSpecifier(I->getAccessSpecifier());
-    dumpType(I->getType());
-    if (I->isPackExpansion())
+    dumpAccessSpecifier(I.getAccessSpecifier());
+    dumpType(I.getType());
+    if (I.isPackExpansion())
       OS << "...";
   }
 }
@@ -1035,63 +1082,69 @@
   dumpStmt(D->getMessage());
 }
 
-void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
-  dumpName(D);
-  dumpTemplateParameters(D->getTemplateParameters());
-  dumpDecl(D->getTemplatedDecl());
-  for (FunctionTemplateDecl::spec_iterator I = D->spec_begin(),
-                                           E = D->spec_end();
-       I != E; ++I) {
-    FunctionTemplateDecl::spec_iterator Next = I;
-    ++Next;
-    if (Next == E)
-      lastChild();
-    switch (I->getTemplateSpecializationKind()) {
-    case TSK_Undeclared:
-    case TSK_ImplicitInstantiation:
+template<typename SpecializationDecl>
+void ASTDumper::VisitTemplateDeclSpecialization(ChildDumper &Children,
+                                                const SpecializationDecl *D,
+                                                bool DumpExplicitInst,
+                                                bool DumpRefOnly) {
+  bool DumpedAny = false;
+  for (auto *RedeclWithBadType : D->redecls()) {
+    // FIXME: The redecls() range sometimes has elements of a less-specific
+    // type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
+    // us TagDecls, and should give CXXRecordDecls).
+    auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType);
+    if (!Redecl) {
+      // Found the injected-class-name for a class template. This will be dumped
+      // as part of its surrounding class so we don't need to dump it here.
+      assert(isa<CXXRecordDecl>(RedeclWithBadType) &&
+             "expected an injected-class-name");
+      continue;
+    }
+
+    switch (Redecl->getTemplateSpecializationKind()) {
     case TSK_ExplicitInstantiationDeclaration:
     case TSK_ExplicitInstantiationDefinition:
-      if (D == D->getCanonicalDecl())
-        dumpDecl(*I);
-      else
-        dumpDeclRef(*I);
+      if (!DumpExplicitInst)
+        break;
+      // Fall through.
+    case TSK_Undeclared:
+    case TSK_ImplicitInstantiation:
+      Children.dump(Redecl, DumpRefOnly);
+      DumpedAny = true;
       break;
     case TSK_ExplicitSpecialization:
-      dumpDeclRef(*I);
       break;
     }
   }
+
+  // Ensure we dump at least one decl for each specialization.
+  if (!DumpedAny)
+    Children.dumpRef(D);
+}
+
+template<typename TemplateDecl>
+void ASTDumper::VisitTemplateDecl(const TemplateDecl *D,
+                                  bool DumpExplicitInst) {
+  dumpName(D);
+  dumpTemplateParameters(D->getTemplateParameters());
+
+  ChildDumper Children(*this);
+  Children.dump(D->getTemplatedDecl());
+
+  for (auto *Child : D->specializations())
+    VisitTemplateDeclSpecialization(Children, Child, DumpExplicitInst,
+                                    !D->isCanonicalDecl());
+}
+
+void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
+  // FIXME: We don't add a declaration of a function template specialization
+  // to its context when it's explicitly instantiated, so dump explicit
+  // instantiations when we dump the template itself.
+  VisitTemplateDecl(D, true);
 }
 
 void ASTDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
-  dumpName(D);
-  dumpTemplateParameters(D->getTemplateParameters());
-
-  ClassTemplateDecl::spec_iterator I = D->spec_begin();
-  ClassTemplateDecl::spec_iterator E = D->spec_end();
-  if (I == E)
-    lastChild();
-  dumpDecl(D->getTemplatedDecl());
-  for (; I != E; ++I) {
-    ClassTemplateDecl::spec_iterator Next = I;
-    ++Next;
-    if (Next == E)
-      lastChild();
-    switch (I->getTemplateSpecializationKind()) {
-    case TSK_Undeclared:
-    case TSK_ImplicitInstantiation:
-      if (D == D->getCanonicalDecl())
-        dumpDecl(*I);
-      else
-        dumpDeclRef(*I);
-      break;
-    case TSK_ExplicitSpecialization:
-    case TSK_ExplicitInstantiationDeclaration:
-    case TSK_ExplicitInstantiationDefinition:
-      dumpDeclRef(*I);
-      break;
-    }
-  }
+  VisitTemplateDecl(D, false);
 }
 
 void ASTDumper::VisitClassTemplateSpecializationDecl(
@@ -1114,34 +1167,7 @@
 }
 
 void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
-  dumpName(D);
-  dumpTemplateParameters(D->getTemplateParameters());
-
-  VarTemplateDecl::spec_iterator I = D->spec_begin();
-  VarTemplateDecl::spec_iterator E = D->spec_end();
-  if (I == E)
-    lastChild();
-  dumpDecl(D->getTemplatedDecl());
-  for (; I != E; ++I) {
-    VarTemplateDecl::spec_iterator Next = I;
-    ++Next;
-    if (Next == E)
-      lastChild();
-    switch (I->getTemplateSpecializationKind()) {
-    case TSK_Undeclared:
-    case TSK_ImplicitInstantiation:
-      if (D == D->getCanonicalDecl())
-        dumpDecl(*I);
-      else
-        dumpDeclRef(*I);
-      break;
-    case TSK_ExplicitSpecialization:
-    case TSK_ExplicitInstantiationDeclaration:
-    case TSK_ExplicitInstantiationDefinition:
-      dumpDeclRef(*I);
-      break;
-    }
-  }
+  VisitTemplateDecl(D, false);
 }
 
 void ASTDumper::VisitVarTemplateSpecializationDecl(
@@ -1164,8 +1190,10 @@
   if (D->isParameterPack())
     OS << " ...";
   dumpName(D);
-  if (D->hasDefaultArgument())
-    dumpType(D->getDefaultArgument());
+  if (D->hasDefaultArgument()) {
+    lastChild();
+    dumpTemplateArgument(D->getDefaultArgument());
+  }
 }
 
 void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
@@ -1173,8 +1201,10 @@
   if (D->isParameterPack())
     OS << " ...";
   dumpName(D);
-  if (D->hasDefaultArgument())
-    dumpStmt(D->getDefaultArgument());
+  if (D->hasDefaultArgument()) {
+    lastChild();
+    dumpTemplateArgument(D->getDefaultArgument());
+  }
 }
 
 void ASTDumper::VisitTemplateTemplateParmDecl(
@@ -1183,8 +1213,10 @@
     OS << " ...";
   dumpName(D);
   dumpTemplateParameters(D->getTemplateParameters());
-  if (D->hasDefaultArgument())
+  if (D->hasDefaultArgument()) {
+    lastChild();
     dumpTemplateArgumentLoc(D->getDefaultArgument());
+  }
 }
 
 void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
@@ -1241,8 +1273,6 @@
   dumpType(D->getType());
   if (D->getSynthesize())
     OS << " synthesize";
-  if (D->getBackingIvarReferencedInAccessor())
-    OS << " BackingIvarReferencedInAccessor";
 
   switch (D->getAccessControl()) {
   case ObjCIvarDecl::None:
@@ -1269,7 +1299,7 @@
   else
     OS << " +";
   dumpName(D);
-  dumpType(D->getResultType());
+  dumpType(D->getReturnType());
 
   bool OldMoreChildren = hasMoreChildren();
   bool IsVariadic = D->isVariadic();
@@ -1327,28 +1357,20 @@
 
 void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
   dumpName(D);
-  for (ObjCProtocolDecl::protocol_iterator I = D->protocol_begin(),
-                                           E = D->protocol_end();
-       I != E; ++I) {
-    if (I + 1 == E)
-      lastChild();
-    dumpDeclRef(*I);
-  }
+
+  ChildDumper Children(*this);
+  for (auto *Child : D->protocols())
+    Children.dumpRef(Child);
 }
 
 void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
   dumpName(D);
   dumpDeclRef(D->getSuperClass(), "super");
-  if (D->protocol_begin() == D->protocol_end())
-    lastChild();
-  dumpDeclRef(D->getImplementation());
-  for (ObjCInterfaceDecl::protocol_iterator I = D->protocol_begin(),
-                                            E = D->protocol_end();
-       I != E; ++I) {
-    if (I + 1 == E)
-      lastChild();
-    dumpDeclRef(*I);
-  }
+
+  ChildDumper Children(*this);
+  Children.dumpRef(D->getImplementation());
+  for (auto *Child : D->protocols())
+    Children.dumpRef(Child);
 }
 
 void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
@@ -1427,9 +1449,8 @@
 }
 
 void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
-  for (BlockDecl::param_const_iterator I = D->param_begin(), E = D->param_end();
-       I != E; ++I)
-    dumpDecl(*I);
+  for (auto I : D->params())
+    dumpDecl(I);
 
   if (D->isVariadic()) {
     IndentScope Indent(*this);
@@ -1440,20 +1461,19 @@
     IndentScope Indent(*this);
     OS << "capture this";
   }
-  for (BlockDecl::capture_iterator I = D->capture_begin(), E = D->capture_end();
-       I != E; ++I) {
+  for (const auto &I : D->captures()) {
     IndentScope Indent(*this);
     OS << "capture";
-    if (I->isByRef())
+    if (I.isByRef())
       OS << " byref";
-    if (I->isNested())
+    if (I.isNested())
       OS << " nested";
-    if (I->getVariable()) {
+    if (I.getVariable()) {
       OS << ' ';
-      dumpBareDeclRef(I->getVariable());
+      dumpBareDeclRef(I.getVariable());
     }
-    if (I->hasCopyExpr())
-      dumpStmt(I->getCopyExpr());
+    if (I.hasCopyExpr())
+      dumpStmt(I.getCopyExpr());
   }
   lastChild();
   dumpStmt(D->getBody());
@@ -1838,7 +1858,8 @@
 
 void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
   VisitExpr(Node);
-  OS << " selector=" << Node->getSelector().getAsString();
+  OS << " selector=";
+  Node->getSelector().print(OS);
   switch (Node->getReceiverKind()) {
   case ObjCMessageExpr::Instance:
     break;
@@ -1860,7 +1881,8 @@
 
 void ASTDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) {
   VisitExpr(Node);
-  OS << " selector=" << Node->getBoxingMethod()->getSelector().getAsString();
+  OS << " selector=";
+  Node->getBoxingMethod()->getSelector().print(OS);
 }
 
 void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
@@ -1879,7 +1901,8 @@
 void ASTDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *Node) {
   VisitExpr(Node);
 
-  OS << " " << Node->getSelector().getAsString();
+  OS << " ";
+  Node->getSelector().print(OS);
 }
 
 void ASTDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *Node) {
@@ -1893,13 +1916,13 @@
   if (Node->isImplicitProperty()) {
     OS << " Kind=MethodRef Getter=\"";
     if (Node->getImplicitPropertyGetter())
-      OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+      Node->getImplicitPropertyGetter()->getSelector().print(OS);
     else
       OS << "(null)";
 
     OS << "\" Setter=\"";
     if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
-      OS << Setter->getSelector().getAsString();
+      Setter->getSelector().print(OS);
     else
       OS << "(null)";
     OS << "\"";
@@ -1926,7 +1949,7 @@
   else
     OS << " Kind=DictionarySubscript GetterForDictionary=\"";
   if (Node->getAtIndexMethodDecl())
-    OS << Node->getAtIndexMethodDecl()->getSelector().getAsString();
+    Node->getAtIndexMethodDecl()->getSelector().print(OS);
   else
     OS << "(null)";
 
@@ -1935,7 +1958,7 @@
   else
     OS << "\" SetterForDictionary=\"";
   if (Node->setAtIndexMethodDecl())
-    OS << Node->setAtIndexMethodDecl()->getSelector().getAsString();
+    Node->setAtIndexMethodDecl()->getSelector().print(OS);
   else
     OS << "(null)";
 }
@@ -2054,7 +2077,7 @@
       OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
   }
 
-  if (C->isParamIndexValid())
+  if (C->isParamIndexValid() && !C->isVarArgParam())
     OS << " ParamIndex=" << C->getParamIndex();
 }
 
@@ -2095,27 +2118,25 @@
 // Decl method implementations
 //===----------------------------------------------------------------------===//
 
-void Decl::dump() const {
-  dump(llvm::errs());
-}
+LLVM_DUMP_METHOD void Decl::dump() const { dump(llvm::errs()); }
 
-void Decl::dump(raw_ostream &OS) const {
+LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS) const {
   ASTDumper P(OS, &getASTContext().getCommentCommandTraits(),
               &getASTContext().getSourceManager());
   P.dumpDecl(this);
 }
 
-void Decl::dumpColor() const {
+LLVM_DUMP_METHOD void Decl::dumpColor() const {
   ASTDumper P(llvm::errs(), &getASTContext().getCommentCommandTraits(),
               &getASTContext().getSourceManager(), /*ShowColors*/true);
   P.dumpDecl(this);
 }
 
-void DeclContext::dumpLookups() const {
+LLVM_DUMP_METHOD void DeclContext::dumpLookups() const {
   dumpLookups(llvm::errs());
 }
 
-void DeclContext::dumpLookups(raw_ostream &OS) const {
+LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS) const {
   const DeclContext *DC = this;
   while (!DC->isTranslationUnit())
     DC = DC->getParent();
@@ -2128,21 +2149,21 @@
 // Stmt method implementations
 //===----------------------------------------------------------------------===//
 
-void Stmt::dump(SourceManager &SM) const {
+LLVM_DUMP_METHOD void Stmt::dump(SourceManager &SM) const {
   dump(llvm::errs(), SM);
 }
 
-void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
+LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
   ASTDumper P(OS, 0, &SM);
   P.dumpStmt(this);
 }
 
-void Stmt::dump() const {
+LLVM_DUMP_METHOD void Stmt::dump() const {
   ASTDumper P(llvm::errs(), 0, 0);
   P.dumpStmt(this);
 }
 
-void Stmt::dumpColor() const {
+LLVM_DUMP_METHOD void Stmt::dumpColor() const {
   ASTDumper P(llvm::errs(), 0, 0, /*ShowColors*/true);
   P.dumpStmt(this);
 }
@@ -2151,11 +2172,9 @@
 // Comment method implementations
 //===----------------------------------------------------------------------===//
 
-void Comment::dump() const {
-  dump(llvm::errs(), 0, 0);
-}
+LLVM_DUMP_METHOD void Comment::dump() const { dump(llvm::errs(), 0, 0); }
 
-void Comment::dump(const ASTContext &Context) const {
+LLVM_DUMP_METHOD void Comment::dump(const ASTContext &Context) const {
   dump(llvm::errs(), &Context.getCommentCommandTraits(),
        &Context.getSourceManager());
 }
@@ -2167,7 +2186,7 @@
   D.dumpFullComment(FC);
 }
 
-void Comment::dumpColor() const {
+LLVM_DUMP_METHOD void Comment::dumpColor() const {
   const FullComment *FC = dyn_cast<FullComment>(this);
   ASTDumper D(llvm::errs(), 0, 0, /*ShowColors*/true);
   D.dumpFullComment(FC);
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index e16015b..ace526e 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -407,10 +407,11 @@
       return false;
     break;
   
+  case Type::Adjusted:
   case Type::Decayed:
     if (!IsStructurallyEquivalent(Context,
-                                  cast<DecayedType>(T1)->getPointeeType(),
-                                  cast<DecayedType>(T2)->getPointeeType()))
+                                  cast<AdjustedType>(T1)->getOriginalType(),
+                                  cast<AdjustedType>(T2)->getOriginalType()))
       return false;
     break;
 
@@ -534,12 +535,11 @@
   case Type::FunctionProto: {
     const FunctionProtoType *Proto1 = cast<FunctionProtoType>(T1);
     const FunctionProtoType *Proto2 = cast<FunctionProtoType>(T2);
-    if (Proto1->getNumArgs() != Proto2->getNumArgs())
+    if (Proto1->getNumParams() != Proto2->getNumParams())
       return false;
-    for (unsigned I = 0, N = Proto1->getNumArgs(); I != N; ++I) {
-      if (!IsStructurallyEquivalent(Context, 
-                                    Proto1->getArgType(I),
-                                    Proto2->getArgType(I)))
+    for (unsigned I = 0, N = Proto1->getNumParams(); I != N; ++I) {
+      if (!IsStructurallyEquivalent(Context, Proto1->getParamType(I),
+                                    Proto2->getParamType(I)))
         return false;
     }
     if (Proto1->isVariadic() != Proto2->isVariadic())
@@ -570,9 +570,8 @@
   case Type::FunctionNoProto: {
     const FunctionType *Function1 = cast<FunctionType>(T1);
     const FunctionType *Function2 = cast<FunctionType>(T2);
-    if (!IsStructurallyEquivalent(Context, 
-                                  Function1->getResultType(),
-                                  Function2->getResultType()))
+    if (!IsStructurallyEquivalent(Context, Function1->getReturnType(),
+                                  Function2->getReturnType()))
       return false;
       if (Function1->getExtInfo() != Function2->getExtInfo())
         return false;
@@ -931,10 +930,8 @@
     return None;
 
   unsigned Index = 0;
-  for (DeclContext::decl_iterator D = Owner->noload_decls_begin(),
-                               DEnd = Owner->noload_decls_end();
-       D != DEnd; ++D) {
-    FieldDecl *F = dyn_cast<FieldDecl>(*D);
+  for (const auto *D : Owner->noload_decls()) {
+    const auto *F = dyn_cast<FieldDecl>(D);
     if (!F || !F->isAnonymousStructOrUnion())
       continue;
 
@@ -1586,7 +1583,7 @@
 ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
   // FIXME: What happens if we're importing a function without a prototype 
   // into C++? Should we make it variadic?
-  QualType ToResultType = Importer.Import(T->getResultType());
+  QualType ToResultType = Importer.Import(T->getReturnType());
   if (ToResultType.isNull())
     return QualType();
 
@@ -1595,16 +1592,14 @@
 }
 
 QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
-  QualType ToResultType = Importer.Import(T->getResultType());
+  QualType ToResultType = Importer.Import(T->getReturnType());
   if (ToResultType.isNull())
     return QualType();
   
   // Import argument types
   SmallVector<QualType, 4> ArgTypes;
-  for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
-                                         AEnd = T->arg_type_end();
-       A != AEnd; ++A) {
-    QualType ArgType = Importer.Import(*A);
+  for (const auto &A : T->param_types()) {
+    QualType ArgType = Importer.Import(A);
     if (ArgType.isNull())
       return QualType();
     ArgTypes.push_back(ArgType);
@@ -1612,10 +1607,8 @@
   
   // Import exception types
   SmallVector<QualType, 4> ExceptionTypes;
-  for (FunctionProtoType::exception_iterator E = T->exception_begin(),
-                                          EEnd = T->exception_end();
-       E != EEnd; ++E) {
-    QualType ExceptionType = Importer.Import(*E);
+  for (const auto &E : T->exceptions()) {
+    QualType ExceptionType = Importer.Import(E);
     if (ExceptionType.isNull())
       return QualType();
     ExceptionTypes.push_back(ExceptionType);
@@ -1631,7 +1624,7 @@
   ToEPI.RefQualifier = FromEPI.RefQualifier;
   ToEPI.NumExceptions = ExceptionTypes.size();
   ToEPI.Exceptions = ExceptionTypes.data();
-  ToEPI.ConsumedArguments = FromEPI.ConsumedArguments;
+  ToEPI.ConsumedParameters = FromEPI.ConsumedParameters;
   ToEPI.ExceptionSpecType = FromEPI.ExceptionSpecType;
   ToEPI.NoexceptExpr = Importer.Import(FromEPI.NoexceptExpr);
   ToEPI.ExceptionSpecDecl = cast_or_null<FunctionDecl>(
@@ -1787,11 +1780,9 @@
     return QualType();
 
   SmallVector<ObjCProtocolDecl *, 4> Protocols;
-  for (ObjCObjectType::qual_iterator P = T->qual_begin(), 
-                                     PEnd = T->qual_end();
-       P != PEnd; ++P) {
+  for (auto *P : T->quals()) {
     ObjCProtocolDecl *Protocol
-      = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(*P));
+      = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(P));
     if (!Protocol)
       return QualType();
     Protocols.push_back(Protocol);
@@ -1909,11 +1900,8 @@
     return;
   }
   
-  for (DeclContext::decl_iterator From = FromDC->decls_begin(),
-                               FromEnd = FromDC->decls_end();
-       From != FromEnd;
-       ++From)
-    Importer.Import(*From);
+  for (auto *From : FromDC->decls())
+    Importer.Import(From);
 }
 
 bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To, 
@@ -1946,6 +1934,7 @@
     ToData.HasProtectedFields = FromData.HasProtectedFields;
     ToData.HasPublicFields = FromData.HasPublicFields;
     ToData.HasMutableFields = FromData.HasMutableFields;
+    ToData.HasVariantMembers = FromData.HasVariantMembers;
     ToData.HasOnlyCMembers = FromData.HasOnlyCMembers;
     ToData.HasInClassInitializer = FromData.HasInClassInitializer;
     ToData.HasUninitializedReferenceMember
@@ -1986,29 +1975,25 @@
     ToData.IsLambda = FromData.IsLambda;
 
     SmallVector<CXXBaseSpecifier *, 4> Bases;
-    for (CXXRecordDecl::base_class_iterator 
-                  Base1 = FromCXX->bases_begin(),
-            FromBaseEnd = FromCXX->bases_end();
-         Base1 != FromBaseEnd;
-         ++Base1) {
-      QualType T = Importer.Import(Base1->getType());
+    for (const auto &Base1 : FromCXX->bases()) {
+      QualType T = Importer.Import(Base1.getType());
       if (T.isNull())
         return true;
 
       SourceLocation EllipsisLoc;
-      if (Base1->isPackExpansion())
-        EllipsisLoc = Importer.Import(Base1->getEllipsisLoc());
+      if (Base1.isPackExpansion())
+        EllipsisLoc = Importer.Import(Base1.getEllipsisLoc());
 
       // Ensure that we have a definition for the base.
-      ImportDefinitionIfNeeded(Base1->getType()->getAsCXXRecordDecl());
+      ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl());
         
       Bases.push_back(
                     new (Importer.getToContext()) 
-                      CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
-                                       Base1->isVirtual(),
-                                       Base1->isBaseOfClass(),
-                                       Base1->getAccessSpecifierAsWritten(),
-                                   Importer.Import(Base1->getTypeSourceInfo()),
+                      CXXBaseSpecifier(Importer.Import(Base1.getSourceRange()),
+                                       Base1.isVirtual(),
+                                       Base1.isBaseOfClass(),
+                                       Base1.getAccessSpecifierAsWritten(),
+                                   Importer.Import(Base1.getTypeSourceInfo()),
                                        EllipsisLoc));
     }
     if (!Bases.empty())
@@ -2538,6 +2523,21 @@
         } else if (!D->isCompleteDefinition()) {
           // We have a forward declaration of this type, so adopt that forward
           // declaration rather than building a new one.
+            
+          // If one or both can be completed from external storage then try one
+          // last time to complete and compare them before doing this.
+            
+          if (FoundRecord->hasExternalLexicalStorage() &&
+              !FoundRecord->isCompleteDefinition())
+            FoundRecord->getASTContext().getExternalSource()->CompleteType(FoundRecord);
+          if (D->hasExternalLexicalStorage())
+            D->getASTContext().getExternalSource()->CompleteType(D);
+            
+          if (FoundRecord->isCompleteDefinition() &&
+              D->isCompleteDefinition() &&
+              !IsStructuralMatch(D, FoundRecord))
+            continue;
+              
           AdoptDecl = FoundRecord;
           continue;
         } else if (!SearchName) {
@@ -2716,7 +2716,7 @@
         FromEPI.NoexceptExpr) {
       FunctionProtoType::ExtProtoInfo DefaultEPI;
       FromTy = Importer.getFromContext().getFunctionType(
-          FromFPT->getResultType(), FromFPT->getArgTypes(), DefaultEPI);
+          FromFPT->getReturnType(), FromFPT->getParamTypes(), DefaultEPI);
       usedDifferentExceptionSpec = true;
     }
   }
@@ -2728,9 +2728,8 @@
   
   // Import the function parameters.
   SmallVector<ParmVarDecl *, 8> Parameters;
-  for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
-       P != PEnd; ++P) {
-    ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*P));
+  for (auto P : D->params()) {
+    ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(P));
     if (!ToP)
       return 0;
     
@@ -2838,10 +2837,8 @@
     return 0;
 
   unsigned Index = 1;
-  for (DeclContext::decl_iterator D = Owner->noload_decls_begin(),
-                               DEnd = Owner->noload_decls_end();
-       D != DEnd; ++D) {
-    if (*D == F)
+  for (const auto *D : Owner->noload_decls()) {
+    if (D == F)
       return Index;
 
     if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
@@ -2953,9 +2950,8 @@
     new (Importer.getToContext())NamedDecl*[D->getChainingSize()];
 
   unsigned i = 0;
-  for (IndirectFieldDecl::chain_iterator PI = D->chain_begin(),
-       PE = D->chain_end(); PI != PE; ++PI) {
-    Decl* D = Importer.Import(*PI);
+  for (auto *PI : D->chain()) {
+    Decl *D = Importer.Import(PI);
     if (!D)
       return 0;
     NamedChain[i++] = cast<NamedDecl>(D);
@@ -3014,8 +3010,7 @@
                                        Importer.Import(D->getInnerLocStart()),
                                               Loc, Name.getAsIdentifierInfo(),
                                               T, TInfo, D->getAccessControl(),
-                                              BitWidth, D->getSynthesize(),
-                                              D->getBackingIvarReferencedInAccessor());
+                                              BitWidth, D->getSynthesize());
   ToIvar->setLexicalDeclContext(LexicalDC);
   Importer.Imported(D, ToIvar);
   LexicalDC->addDeclInternal(ToIvar);
@@ -3215,11 +3210,11 @@
         continue;
 
       // Check return types.
-      if (!Importer.IsStructurallyEquivalent(D->getResultType(),
-                                             FoundMethod->getResultType())) {
+      if (!Importer.IsStructurallyEquivalent(D->getReturnType(),
+                                             FoundMethod->getReturnType())) {
         Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent)
-          << D->isInstanceMethod() << Name
-          << D->getResultType() << FoundMethod->getResultType();
+            << D->isInstanceMethod() << Name << D->getReturnType()
+            << FoundMethod->getReturnType();
         Importer.ToDiag(FoundMethod->getLocation(), 
                         diag::note_odr_objc_method_here)
           << D->isInstanceMethod() << Name;
@@ -3270,36 +3265,25 @@
   }
 
   // Import the result type.
-  QualType ResultTy = Importer.Import(D->getResultType());
+  QualType ResultTy = Importer.Import(D->getReturnType());
   if (ResultTy.isNull())
     return 0;
 
-  TypeSourceInfo *ResultTInfo = Importer.Import(D->getResultTypeSourceInfo());
+  TypeSourceInfo *ReturnTInfo = Importer.Import(D->getReturnTypeSourceInfo());
 
-  ObjCMethodDecl *ToMethod
-    = ObjCMethodDecl::Create(Importer.getToContext(),
-                             Loc,
-                             Importer.Import(D->getLocEnd()),
-                             Name.getObjCSelector(),
-                             ResultTy, ResultTInfo, DC,
-                             D->isInstanceMethod(),
-                             D->isVariadic(),
-                             D->isPropertyAccessor(),
-                             D->isImplicit(),
-                             D->isDefined(),
-                             D->getImplementationControl(),
-                             D->hasRelatedResultType());
+  ObjCMethodDecl *ToMethod = ObjCMethodDecl::Create(
+      Importer.getToContext(), Loc, Importer.Import(D->getLocEnd()),
+      Name.getObjCSelector(), ResultTy, ReturnTInfo, DC, D->isInstanceMethod(),
+      D->isVariadic(), D->isPropertyAccessor(), D->isImplicit(), D->isDefined(),
+      D->getImplementationControl(), D->hasRelatedResultType());
 
   // FIXME: When we decide to merge method definitions, we'll need to
   // deal with implicit parameters.
 
   // Import the parameters
   SmallVector<ParmVarDecl *, 5> ToParams;
-  for (ObjCMethodDecl::param_iterator FromP = D->param_begin(),
-                                   FromPEnd = D->param_end();
-       FromP != FromPEnd; 
-       ++FromP) {
-    ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*FromP));
+  for (auto *FromP : D->params()) {
+    ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(FromP));
     if (!ToP)
       return 0;
     
@@ -3558,12 +3542,8 @@
   
   // Import categories. When the categories themselves are imported, they'll
   // hook themselves into this interface.
-  for (ObjCInterfaceDecl::known_categories_iterator
-         Cat = From->known_categories_begin(),
-         CatEnd = From->known_categories_end();
-       Cat != CatEnd; ++Cat) {
-    Importer.Import(*Cat);
-  }
+  for (auto *Cat : From->known_categories())
+    Importer.Import(Cat);
   
   // If we have an @implementation, import it as well.
   if (From->getImplementation()) {
@@ -4240,8 +4220,7 @@
     return 0;
 
   VarTemplateDecl *D2 = VarTemplateDecl::Create(
-      Importer.getToContext(), DC, Loc, Name, TemplateParams, D2Templated,
-      /*PrevDecl=*/0);
+      Importer.getToContext(), DC, Loc, Name, TemplateParams, D2Templated);
   D2Templated->setDescribedVarTemplate(D2);
 
   D2->setAccess(D->getAccess());
diff --git a/lib/AST/ASTTypeTraits.cpp b/lib/AST/ASTTypeTraits.cpp
index ae47ea9..02d82d8 100644
--- a/lib/AST/ASTTypeTraits.cpp
+++ b/lib/AST/ASTTypeTraits.cpp
@@ -39,18 +39,24 @@
 #include "clang/AST/TypeNodes.def"
 };
 
-bool ASTNodeKind::isBaseOf(ASTNodeKind Other) const {
-  return isBaseOf(KindId, Other.KindId);
+bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
+  return isBaseOf(KindId, Other.KindId, Distance);
 }
 
 bool ASTNodeKind::isSame(ASTNodeKind Other) const {
   return KindId != NKI_None && KindId == Other.KindId;
 }
 
-bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived) {
+bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived,
+                           unsigned *Distance) {
   if (Base == NKI_None || Derived == NKI_None) return false;
-  while (Derived != Base && Derived != NKI_None)
+  unsigned Dist = 0;
+  while (Derived != Base && Derived != NKI_None) {
     Derived = AllKindInfo[Derived].ParentId;
+    ++Dist;
+  }
+  if (Distance)
+    *Distance = Dist;
   return Derived == Base;
 }
 
diff --git a/lib/AST/Android.mk b/lib/AST/Android.mk
index 25eb48e..9d0c005 100644
--- a/lib/AST/Android.mk
+++ b/lib/AST/Android.mk
@@ -10,6 +10,7 @@
 	AttrImpl.inc	\
 	AttrList.inc	\
 	Attrs.inc	\
+        AttrVisitor.inc \
 	CommentCommandInfo.inc \
 	CommentCommandList.inc \
 	CommentHTMLNamedCharacterReferences.inc \
diff --git a/lib/AST/AttrImpl.cpp b/lib/AST/AttrImpl.cpp
index 7af3c8b..0bf6bcd 100644
--- a/lib/AST/AttrImpl.cpp
+++ b/lib/AST/AttrImpl.cpp
@@ -24,6 +24,4 @@
 
 void InheritableParamAttr::anchor() { }
 
-void MSInheritanceAttr::anchor() { }
-
 #include "clang/AST/AttrImpl.inc"
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index 461e8b3..9006be6 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -57,29 +57,8 @@
   TypePrinter.cpp
   VTableBuilder.cpp
   VTTBuilder.cpp
-  )
 
-add_dependencies(clangAST
-  ClangARMNeon
-  ClangAttrClasses
-  ClangAttrList
-  ClangAttrImpl
-  ClangAttrDump
-  ClangCommentCommandInfo
-  ClangCommentCommandList
-  ClangCommentNodes
-  ClangCommentHTMLTags
-  ClangCommentHTMLTagsProperties
-  ClangCommentHTMLNamedCharacterReferences
-  ClangDeclNodes
-  ClangDiagnosticAST
-  ClangDiagnosticComment
-  ClangDiagnosticCommon
-  ClangDiagnosticSema
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangAST
+  LINK_LIBS
   clangBasic
   clangLex
   )
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index b51014b..eef043c 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -35,16 +35,12 @@
   std::copy(Decls.begin(), Decls.end(), DeclsFound);
 }
 
-CXXBasePaths::decl_iterator CXXBasePaths::found_decls_begin() {
+CXXBasePaths::decl_range CXXBasePaths::found_decls() {
   if (NumDeclsFound == 0)
     ComputeDeclsFound();
-  return DeclsFound;
-}
 
-CXXBasePaths::decl_iterator CXXBasePaths::found_decls_end() {
-  if (NumDeclsFound == 0)
-    ComputeDeclsFound();
-  return DeclsFound + NumDeclsFound;
+  return decl_range(decl_iterator(DeclsFound),
+                    decl_iterator(DeclsFound + NumDeclsFound));
 }
 
 /// isAmbiguous - Determines whether the set of paths provided is
@@ -141,9 +137,8 @@
   const CXXRecordDecl *Record = this;
   bool AllMatches = true;
   while (true) {
-    for (CXXRecordDecl::base_class_const_iterator
-           I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
-      const RecordType *Ty = I->getType()->getAs<RecordType>();
+    for (const auto &I : Record->bases()) {
+      const RecordType *Ty = I.getType()->getAs<RecordType>();
       if (!Ty) {
         if (AllowShortCircuit) return false;
         AllMatches = false;
@@ -186,14 +181,11 @@
   AccessSpecifier AccessToHere = ScratchPath.Access;
   bool IsFirstStep = ScratchPath.empty();
 
-  for (CXXRecordDecl::base_class_const_iterator BaseSpec = Record->bases_begin(),
-         BaseSpecEnd = Record->bases_end(); 
-       BaseSpec != BaseSpecEnd; 
-       ++BaseSpec) {
+  for (const auto &BaseSpec : Record->bases()) {
     // Find the record of the base class subobjects for this type.
-    QualType BaseType = Context.getCanonicalType(BaseSpec->getType())
-                                                          .getUnqualifiedType();
-    
+    QualType BaseType =
+        Context.getCanonicalType(BaseSpec.getType()).getUnqualifiedType();
+
     // C++ [temp.dep]p3:
     //   In the definition of a class template or a member of a class template,
     //   if a base class of the class template depends on a template-parameter,
@@ -208,7 +200,7 @@
     std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
     bool VisitBase = true;
     bool SetVirtual = false;
-    if (BaseSpec->isVirtual()) {
+    if (BaseSpec.isVirtual()) {
       VisitBase = !Subobjects.first;
       Subobjects.first = true;
       if (isDetectingVirtual() && DetectedVirtual == 0) {
@@ -223,9 +215,9 @@
     if (isRecordingPaths()) {
       // Add this base specifier to the current path.
       CXXBasePathElement Element;
-      Element.Base = &*BaseSpec;
+      Element.Base = &BaseSpec;
       Element.Class = Record;
-      if (BaseSpec->isVirtual())
+      if (BaseSpec.isVirtual())
         Element.SubobjectNumber = 0;
       else
         Element.SubobjectNumber = Subobjects.second;
@@ -247,16 +239,16 @@
       // 3. Otherwise, overall access is determined by the most restrictive
       //    access in the sequence.
       if (IsFirstStep)
-        ScratchPath.Access = BaseSpec->getAccessSpecifier();
+        ScratchPath.Access = BaseSpec.getAccessSpecifier();
       else
         ScratchPath.Access = CXXRecordDecl::MergeAccess(AccessToHere, 
-                                                 BaseSpec->getAccessSpecifier());
+                                                 BaseSpec.getAccessSpecifier());
     }
     
     // Track whether there's a path involving this specific base.
     bool FoundPathThroughBase = false;
     
-    if (BaseMatches(BaseSpec, ScratchPath, UserData)) {
+    if (BaseMatches(&BaseSpec, ScratchPath, UserData)) {
       // We've found a path that terminates at this base.
       FoundPath = FoundPathThroughBase = true;
       if (isRecordingPaths()) {
@@ -269,7 +261,7 @@
       }
     } else if (VisitBase) {
       CXXRecordDecl *BaseRecord
-        = cast<CXXRecordDecl>(BaseSpec->getType()->castAs<RecordType>()
+        = cast<CXXRecordDecl>(BaseSpec.getType()->castAs<RecordType>()
                                 ->getDecl());
       if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) {
         // C++ [class.member.lookup]p2:
@@ -501,14 +493,13 @@
     SubobjectNumber
       = ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];
 
-  for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
-         BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) {
-    if (const RecordType *RT = Base->getType()->getAs<RecordType>()) {
+  for (const auto &Base : RD->bases()) {
+    if (const RecordType *RT = Base.getType()->getAs<RecordType>()) {
       const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
       if (!BaseDecl->isPolymorphic())
         continue;
 
-      if (Overriders.empty() && !Base->isVirtual()) {
+      if (Overriders.empty() && !Base.isVirtual()) {
         // There are no other overriders of virtual member functions,
         // so let the base class fill in our overriders for us.
         Collect(BaseDecl, false, InVirtualSubobject, Overriders);
@@ -522,7 +513,7 @@
       // its base classes) more than once.
       CXXFinalOverriderMap ComputedBaseOverriders;
       CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
-      if (Base->isVirtual()) {
+      if (Base.isVirtual()) {
         CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
         BaseOverriders = MyVirtualOverriders;
         if (!MyVirtualOverriders) {
@@ -551,10 +542,7 @@
     }
   }
 
-  for (CXXRecordDecl::method_iterator M = RD->method_begin(), 
-                                   MEnd = RD->method_end();
-       M != MEnd;
-       ++M) {
+  for (auto *M : RD->methods()) {
     // We only care about virtual methods.
     if (!M->isVirtual())
       continue;
@@ -702,13 +690,12 @@
   if (Layout.isPrimaryBaseVirtual())
     Bases.insert(Layout.getPrimaryBase());
 
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    assert(!I->getType()->isDependentType() &&
+  for (const auto &I : RD->bases()) {
+    assert(!I.getType()->isDependentType() &&
            "Cannot get indirect primary bases for class with dependent bases.");
 
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
 
     // Only bases with virtual bases participate in computing the
     // indirect primary virtual base classes.
@@ -725,13 +712,12 @@
   if (!getNumVBases())
     return;
 
-  for (CXXRecordDecl::base_class_const_iterator I = bases_begin(),
-       E = bases_end(); I != E; ++I) {
-    assert(!I->getType()->isDependentType() &&
+  for (const auto &I : bases()) {
+    assert(!I.getType()->isDependentType() &&
            "Cannot get indirect primary bases for class with dependent bases.");
 
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
 
     // Only bases with virtual bases participate in computing the
     // indirect primary virtual base classes.
diff --git a/lib/AST/Comment.cpp b/lib/AST/Comment.cpp
index f24a23d..b0b2351 100644
--- a/lib/AST/Comment.cpp
+++ b/lib/AST/Comment.cpp
@@ -159,7 +159,7 @@
     Kind = FunctionKind;
     ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
                                               FD->getNumParams());
-    ResultType = FD->getResultType();
+    ReturnType = FD->getReturnType();
     unsigned NumLists = FD->getNumTemplateParameterLists();
     if (NumLists != 0) {
       TemplateKind = TemplateSpecialization;
@@ -180,7 +180,7 @@
     Kind = FunctionKind;
     ParamVars = ArrayRef<const ParmVarDecl *>(MD->param_begin(),
                                               MD->param_size());
-    ResultType = MD->getResultType();
+    ReturnType = MD->getReturnType();
     IsObjCMethod = true;
     IsInstanceMethod = MD->isInstanceMethod();
     IsClassMethod = !IsInstanceMethod;
@@ -193,7 +193,7 @@
     const FunctionDecl *FD = FTD->getTemplatedDecl();
     ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
                                               FD->getNumParams());
-    ResultType = FD->getResultType();
+    ReturnType = FD->getReturnType();
     TemplateParameters = FTD->getTemplateParameters();
     break;
   }
@@ -251,6 +251,16 @@
         TL = PointerTL.getPointeeLoc().getUnqualifiedLoc();
         continue;
       }
+      // Look through reference types.
+      if (ReferenceTypeLoc ReferenceTL = TL.getAs<ReferenceTypeLoc>()) {
+        TL = ReferenceTL.getPointeeLoc().getUnqualifiedLoc();
+        continue;
+      }
+      // Look through adjusted types.
+      if (AdjustedTypeLoc ATL = TL.getAs<AdjustedTypeLoc>()) {
+        TL = ATL.getOriginalLoc();
+        continue;
+      }
       if (BlockPointerTypeLoc BlockPointerTL =
               TL.getAs<BlockPointerTypeLoc>()) {
         TL = BlockPointerTL.getPointeeLoc().getUnqualifiedLoc();
@@ -261,13 +271,39 @@
         TL = MemberPointerTL.getPointeeLoc().getUnqualifiedLoc();
         continue;
       }
+      if (ElaboratedTypeLoc ETL = TL.getAs<ElaboratedTypeLoc>()) {
+        TL = ETL.getNamedTypeLoc();
+        continue;
+      }
       // Is this a typedef for a function type?
       if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
         Kind = FunctionKind;
         ArrayRef<ParmVarDecl *> Params = FTL.getParams();
         ParamVars = ArrayRef<const ParmVarDecl *>(Params.data(),
                                                   Params.size());
-        ResultType = FTL.getResultLoc().getType();
+        ReturnType = FTL.getReturnLoc().getType();
+        break;
+      }
+      if (TemplateSpecializationTypeLoc STL =
+              TL.getAs<TemplateSpecializationTypeLoc>()) {
+        // If we have a typedef to a template specialization with exactly one
+        // template argument of a function type, this looks like std::function,
+        // boost::function, or other function wrapper.  Treat these typedefs as
+        // functions.
+        if (STL.getNumArgs() != 1)
+          break;
+        TemplateArgumentLoc MaybeFunction = STL.getArgLoc(0);
+        if (MaybeFunction.getArgument().getKind() != TemplateArgument::Type)
+          break;
+        TypeSourceInfo *MaybeFunctionTSI = MaybeFunction.getTypeSourceInfo();
+        TypeLoc TL = MaybeFunctionTSI->getTypeLoc().getUnqualifiedLoc();
+        if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
+          Kind = FunctionKind;
+          ArrayRef<ParmVarDecl *> Params = FTL.getParams();
+          ParamVars = ArrayRef<const ParmVarDecl *>(Params.data(),
+                                                    Params.size());
+          ReturnType = FTL.getReturnLoc().getType();
+        }
         break;
       }
       break;
diff --git a/lib/AST/CommentCommandTraits.cpp b/lib/AST/CommentCommandTraits.cpp
index 01bd12e..8879aac 100644
--- a/lib/AST/CommentCommandTraits.cpp
+++ b/lib/AST/CommentCommandTraits.cpp
@@ -43,47 +43,42 @@
   return getRegisteredCommandInfo(CommandID);
 }
 
-static void
-HelperTypoCorrectCommandInfo(SmallVectorImpl<const CommandInfo *> &BestCommand,
-                             StringRef Typo, const CommandInfo *Command) {
-  const unsigned MaxEditDistance = 1;
-  unsigned BestEditDistance = MaxEditDistance + 1;
-  StringRef Name = Command->Name;
-  
-  unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
-  if (MinPossibleEditDistance > 0 &&
-      Typo.size() / MinPossibleEditDistance < 1)
-    return;
-  unsigned EditDistance = Typo.edit_distance(Name, true, MaxEditDistance);
-  if (EditDistance > MaxEditDistance)
-    return;
-  if (EditDistance == BestEditDistance)
-    BestCommand.push_back(Command);
-  else if (EditDistance < BestEditDistance) {
-    BestCommand.clear();
-    BestCommand.push_back(Command);
-    BestEditDistance = EditDistance;
-  }
-}
-
 const CommandInfo *
 CommandTraits::getTypoCorrectCommandInfo(StringRef Typo) const {
-  // single character command impostures, such as \t or \n must not go
+  // Single-character command impostures, such as \t or \n, should not go
   // through the fixit logic.
   if (Typo.size() <= 1)
-    return NULL;
-  
+    return nullptr;
+
+  // The maximum edit distance we're prepared to accept.
+  const unsigned MaxEditDistance = 1;
+
+  unsigned BestEditDistance = MaxEditDistance;
   SmallVector<const CommandInfo *, 2> BestCommand;
-  
-  const int NumOfCommands = llvm::array_lengthof(Commands);
-  for (int i = 0; i < NumOfCommands; i++)
-    HelperTypoCorrectCommandInfo(BestCommand, Typo, &Commands[i]);
-  
-  for (unsigned i = 0, e = RegisteredCommands.size(); i != e; ++i)
-    if (!RegisteredCommands[i]->IsUnknownCommand)
-      HelperTypoCorrectCommandInfo(BestCommand, Typo, RegisteredCommands[i]);
-  
-  return (BestCommand.size() != 1) ? NULL : BestCommand[0];
+
+  auto ConsiderCorrection = [&](const CommandInfo *Command) {
+    StringRef Name = Command->Name;
+
+    unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
+    if (MinPossibleEditDistance <= BestEditDistance) {
+      unsigned EditDistance = Typo.edit_distance(Name, true, BestEditDistance);
+      if (EditDistance < BestEditDistance) {
+        BestEditDistance = EditDistance;
+        BestCommand.clear();
+      }
+      if (EditDistance == BestEditDistance)
+        BestCommand.push_back(Command);
+    }
+  };
+
+  for (const auto &Command : Commands)
+    ConsiderCorrection(&Command);
+
+  for (const auto *Command : RegisteredCommands)
+    if (!Command->IsUnknownCommand)
+      ConsiderCorrection(Command);
+
+  return BestCommand.size() == 1 ? BestCommand[0] : nullptr;
 }
 
 CommandInfo *CommandTraits::createCommandInfoWithName(StringRef CommandName) {
diff --git a/lib/AST/CommentLexer.cpp b/lib/AST/CommentLexer.cpp
index 01ed3ce..792a832 100644
--- a/lib/AST/CommentLexer.cpp
+++ b/lib/AST/CommentLexer.cpp
@@ -268,6 +268,19 @@
     
 } // unnamed namespace
 
+void Lexer::formTokenWithChars(Token &Result, const char *TokEnd,
+                               tok::TokenKind Kind) {
+  const unsigned TokLen = TokEnd - BufferPtr;
+  Result.setLocation(getSourceLocation(BufferPtr));
+  Result.setKind(Kind);
+  Result.setLength(TokLen);
+#ifndef NDEBUG
+  Result.TextPtr = "<UNSET>";
+  Result.IntVal = 7;
+#endif
+  BufferPtr = TokEnd;
+}
+
 void Lexer::lexCommentText(Token &T) {
   assert(CommentState == LCS_InsideBCPLComment ||
          CommentState == LCS_InsideCComment);
diff --git a/lib/AST/CommentSema.cpp b/lib/AST/CommentSema.cpp
index 1c6222f..e51f878 100644
--- a/lib/AST/CommentSema.cpp
+++ b/lib/AST/CommentSema.cpp
@@ -68,8 +68,12 @@
   Command->setParagraph(Paragraph);
   checkBlockCommandEmptyParagraph(Command);
   checkBlockCommandDuplicate(Command);
-  checkReturnsCommand(Command);
-  checkDeprecatedCommand(Command);
+  if (ThisDeclInfo) {
+    // These checks only make sense if the comment is attached to a
+    // declaration.
+    checkReturnsCommand(Command);
+    checkDeprecatedCommand(Command);
+  }
 }
 
 ParamCommandComment *Sema::actOnParamCommandStart(
@@ -122,7 +126,7 @@
     << (DiagSelect-1) << (DiagSelect-1)
     << Comment->getSourceRange();
 }
-  
+
 void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) {
   const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID());
   if (!Info->IsRecordLikeDeclarationCommand)
@@ -558,8 +562,11 @@
 void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
   if (!Traits.getCommandInfo(Command->getCommandID())->IsReturnsCommand)
     return;
+
+  assert(ThisDeclInfo && "should not call this check on a bare comment");
+
   if (isFunctionDecl()) {
-    if (ThisDeclInfo->ResultType->isVoidType()) {
+    if (ThisDeclInfo->ReturnType->isVoidType()) {
       unsigned DiagKind;
       switch (ThisDeclInfo->CommentDecl->getKind()) {
       default:
@@ -586,7 +593,7 @@
   }
   else if (isObjCPropertyDecl())
     return;
-  
+
   Diag(Command->getLocation(),
        diag::warn_doc_returns_not_attached_to_a_function_decl)
     << Command->getCommandMarker()
@@ -636,6 +643,8 @@
   if (!Traits.getCommandInfo(Command->getCommandID())->IsDeprecatedCommand)
     return;
 
+  assert(ThisDeclInfo && "should not call this check on a bare comment");
+
   const Decl *D = ThisDeclInfo->CommentDecl;
   if (!D)
     return;
@@ -783,11 +792,14 @@
 }
 
 bool Sema::isFunctionOrMethodVariadic() {
-  if (!isAnyFunctionDecl() && !isObjCMethodDecl())
+  if (!isAnyFunctionDecl() && !isObjCMethodDecl() && !isFunctionTemplateDecl())
     return false;
   if (const FunctionDecl *FD =
         dyn_cast<FunctionDecl>(ThisDeclInfo->CurrentDecl))
     return FD->isVariadic();
+  if (const FunctionTemplateDecl *FTD =
+        dyn_cast<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl))
+    return FTD->getTemplatedDecl()->isVariadic();
   if (const ObjCMethodDecl *MD =
         dyn_cast<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl))
     return MD->isVariadic();
@@ -812,7 +824,7 @@
   }
   return false;
 }
-  
+
 bool Sema::isObjCPropertyDecl() {
   if (!ThisDeclInfo)
     return false;
@@ -834,8 +846,8 @@
     return false;
   if (!ThisDeclInfo->IsFilled)
     inspectThisDecl();
-  return isUnionDecl() || isClassOrStructDecl() 
-         || isObjCInterfaceDecl() || isObjCProtocolDecl();
+  return isUnionDecl() || isClassOrStructDecl() || isObjCInterfaceDecl() ||
+         isObjCProtocolDecl();
 }
 
 bool Sema::isUnionDecl() {
@@ -848,7 +860,7 @@
     return RD->isUnion();
   return false;
 }
-  
+
 bool Sema::isClassOrStructDecl() {
   if (!ThisDeclInfo)
     return false;
@@ -858,7 +870,7 @@
          isa<RecordDecl>(ThisDeclInfo->CurrentDecl) &&
          !isUnionDecl();
 }
-  
+
 bool Sema::isClassTemplateDecl() {
   if (!ThisDeclInfo)
     return false;
@@ -874,7 +886,7 @@
   if (!ThisDeclInfo->IsFilled)
     inspectThisDecl();
   return ThisDeclInfo->CurrentDecl &&
-  (isa<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl));
+         (isa<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl));
 }
 
 bool Sema::isObjCInterfaceDecl() {
@@ -885,7 +897,7 @@
   return ThisDeclInfo->CurrentDecl &&
          isa<ObjCInterfaceDecl>(ThisDeclInfo->CurrentDecl);
 }
-  
+
 bool Sema::isObjCProtocolDecl() {
   if (!ThisDeclInfo)
     return false;
@@ -894,7 +906,7 @@
   return ThisDeclInfo->CurrentDecl &&
          isa<ObjCProtocolDecl>(ThisDeclInfo->CurrentDecl);
 }
-  
+
 ArrayRef<const ParmVarDecl *> Sema::getParamVars() {
   if (!ThisDeclInfo->IsFilled)
     inspectThisDecl();
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 6bd9858..15410f0 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -13,6 +13,7 @@
 
 #include "clang/AST/Decl.h"
 #include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
 #include "clang/AST/ASTMutationListener.h"
 #include "clang/AST/Attr.h"
 #include "clang/AST/DeclCXX.h"
@@ -29,7 +30,6 @@
 #include "clang/Basic/Specifiers.h"
 #include "clang/Basic/TargetInfo.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/type_traits.h"
 #include <algorithm>
 
 using namespace clang;
@@ -158,8 +158,7 @@
 /// Does the given declaration have member specialization information,
 /// and if so, is it an explicit specialization?
 template <class T> static typename
-llvm::enable_if_c<!llvm::is_base_of<RedeclarableTemplateDecl, T>::value,
-                  bool>::type
+std::enable_if<!std::is_base_of<RedeclarableTemplateDecl, T>::value, bool>::type
 isExplicitMemberSpecialization(const T *D) {
   if (const MemberSpecializationInfo *member =
         D->getMemberSpecializationInfo()) {
@@ -209,11 +208,8 @@
   // If we're on Mac OS X, an 'availability' for Mac OS X attribute
   // implies visibility(default).
   if (D->getASTContext().getTargetInfo().getTriple().isOSDarwin()) {
-    for (specific_attr_iterator<AvailabilityAttr> 
-              A = D->specific_attr_begin<AvailabilityAttr>(),
-           AEnd = D->specific_attr_end<AvailabilityAttr>();
-         A != AEnd; ++A)
-      if ((*A)->getPlatform()->getName().equals("macosx"))
+    for (const auto *A : D->specific_attrs<AvailabilityAttr>())
+      if (A->getPlatform()->getName().equals("macosx"))
         return DefaultVisibility;
   }
 
@@ -512,9 +508,9 @@
   return First->isInExternCContext();
 }
 
-static bool isSingleLineExternC(const Decl &D) {
+static bool isSingleLineLanguageLinkage(const Decl &D) {
   if (const LinkageSpecDecl *SD = dyn_cast<LinkageSpecDecl>(D.getDeclContext()))
-    if (SD->getLanguage() == LinkageSpecDecl::lang_c && !SD->hasBraces())
+    if (!SD->hasBraces())
       return true;
   return false;
 }
@@ -548,7 +544,7 @@
 
       if (Var->getStorageClass() != SC_Extern &&
           Var->getStorageClass() != SC_PrivateExtern &&
-          !isSingleLineExternC(*Var))
+          !isSingleLineLanguageLinkage(*Var))
         return LinkageInfo::internal();
     }
 
@@ -561,16 +557,10 @@
       if (PrevVar->getStorageClass() == SC_Static)
         return LinkageInfo::internal();
     }
-  } else if (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)) {
+  } else if (const FunctionDecl *Function = D->getAsFunction()) {
     // C++ [temp]p4:
     //   A non-member function template can have internal linkage; any
     //   other template name shall have external linkage.
-    const FunctionDecl *Function = 0;
-    if (const FunctionTemplateDecl *FunTmpl
-                                        = dyn_cast<FunctionTemplateDecl>(D))
-      Function = FunTmpl->getTemplatedDecl();
-    else
-      Function = cast<FunctionDecl>(D);
 
     // Explicitly declared static.
     if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
@@ -782,11 +772,18 @@
   // really have linkage, but it's convenient to say they do for the
   // purposes of calculating linkage of pointer-to-data-member
   // template arguments.
+  //
+  // Templates also don't officially have linkage, but since we ignore
+  // the C++ standard and look at template arguments when determining
+  // linkage and visibility of a template specialization, we might hit
+  // a template template argument that way. If we do, we need to
+  // consider its linkage.
   if (!(isa<CXXMethodDecl>(D) ||
         isa<VarDecl>(D) ||
         isa<FieldDecl>(D) ||
         isa<IndirectFieldDecl>(D) ||
-        isa<TagDecl>(D)))
+        isa<TagDecl>(D) ||
+        isa<TemplateDecl>(D)))
     return LinkageInfo::none();
 
   LinkageInfo LV;
@@ -1242,16 +1239,13 @@
 
     // We have just computed the linkage for this decl. By induction we know
     // that all other computed linkages match, check that the one we just
-    // computed
-    // also does.
+    // computed also does.
     NamedDecl *Old = NULL;
-    for (NamedDecl::redecl_iterator I = D->redecls_begin(),
-                                    E = D->redecls_end();
-         I != E; ++I) {
-      NamedDecl *T = cast<NamedDecl>(*I);
+    for (auto I : D->redecls()) {
+      NamedDecl *T = cast<NamedDecl>(I);
       if (T == D)
         continue;
-      if (T->hasCachedLinkage()) {
+      if (!T->isInvalidDecl() && T->hasCachedLinkage()) {
         Old = T;
         break;
       }
@@ -1270,13 +1264,9 @@
 }
 
 std::string NamedDecl::getQualifiedNameAsString() const {
-  return getQualifiedNameAsString(getASTContext().getPrintingPolicy());
-}
-
-std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const {
   std::string QualName;
   llvm::raw_string_ostream OS(QualName);
-  printQualifiedName(OS, P);
+  printQualifiedName(OS, getASTContext().getPrintingPolicy());
   return OS.str();
 }
 
@@ -1314,12 +1304,12 @@
                                                             P);
     } else if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(*I)) {
       if (ND->isAnonymousNamespace())
-        OS << "<anonymous namespace>";
+        OS << "(anonymous namespace)";
       else
         OS << *ND;
     } else if (const RecordDecl *RD = dyn_cast<RecordDecl>(*I)) {
       if (!RD->getIdentifier())
-        OS << "<anonymous " << RD->getKindName() << '>';
+        OS << "(anonymous " << RD->getKindName() << ')';
       else
         OS << *RD;
     } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
@@ -1352,7 +1342,7 @@
   if (getDeclName())
     OS << *this;
   else
-    OS << "<anonymous>";
+    OS << "(anonymous)";
 }
 
 void NamedDecl::getNameForDiagnostic(raw_ostream &OS,
@@ -1392,6 +1382,7 @@
   if (isa<ObjCMethodDecl>(this))
     return false;
 
+  // FIXME: Is this correct if one of the decls comes from an inline namespace?
   if (isa<ObjCInterfaceDecl>(this) && isa<ObjCCompatibleAliasDecl>(OldD))
     return true;
 
@@ -1418,14 +1409,19 @@
 
   // A typedef of an Objective-C class type can replace an Objective-C class
   // declaration or definition, and vice versa.
+  // FIXME: Is this correct if one of the decls comes from an inline namespace?
   if ((isa<TypedefNameDecl>(this) && isa<ObjCInterfaceDecl>(OldD)) ||
       (isa<ObjCInterfaceDecl>(this) && isa<TypedefNameDecl>(OldD)))
     return true;
-  
+
   // For non-function declarations, if the declarations are of the
-  // same kind then this must be a redeclaration, or semantic analysis
-  // would not have given us the new declaration.
-  return this->getKind() == OldD->getKind();
+  // same kind and have the same parent then this must be a redeclaration,
+  // or semantic analysis would not have given us the new declaration.
+  // Note that inline namespaces can give us two declarations with the same
+  // name and kind in the same scope but different contexts.
+  return this->getKind() == OldD->getKind() &&
+         this->getDeclContext()->getRedeclContext()->Equals(
+             OldD->getDeclContext()->getRedeclContext());
 }
 
 bool NamedDecl::hasLinkage() const {
@@ -1453,11 +1449,9 @@
 
   if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<MSPropertyDecl>(D))
     return true;
-  if (isa<CXXMethodDecl>(D))
-    return cast<CXXMethodDecl>(D)->isInstance();
-  if (isa<FunctionTemplateDecl>(D))
-    return cast<CXXMethodDecl>(cast<FunctionTemplateDecl>(D)
-                                 ->getTemplatedDecl())->isInstance();
+  if (const CXXMethodDecl *MD =
+          dyn_cast_or_null<CXXMethodDecl>(D->getAsFunction()))
+    return MD->isInstance();
   return false;
 }
 
@@ -1574,7 +1568,9 @@
 SourceRange DeclaratorDecl::getSourceRange() const {
   SourceLocation RangeEnd = getLocation();
   if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
-    if (typeIsPostfix(TInfo->getType()))
+    // If the declaration has no name or the type extends past the name take the
+    // end location of the type.
+    if (!getDeclName() || typeIsPostfix(TInfo->getType()))
       RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
   }
   return SourceRange(getOuterLocStart(), RangeEnd);
@@ -1624,8 +1620,10 @@
                  SourceLocation IdLoc, IdentifierInfo *Id, QualType T,
                  TypeSourceInfo *TInfo, StorageClass SC)
     : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc), Init() {
-  assert(sizeof(VarDeclBitfields) <= sizeof(unsigned));
-  assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned));
+  static_assert(sizeof(VarDeclBitfields) <= sizeof(unsigned),
+                "VarDeclBitfields too large!");
+  static_assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned),
+                "ParmVarDeclBitfields too large!");
   AllBits = 0;
   VarDeclBits.SClass = SC;
   // Everything else is implicitly initialized to false.
@@ -1635,13 +1633,12 @@
                          SourceLocation StartL, SourceLocation IdL,
                          IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
                          StorageClass S) {
-  return new (C) VarDecl(Var, DC, StartL, IdL, Id, T, TInfo, S);
+  return new (C, DC) VarDecl(Var, DC, StartL, IdL, Id, T, TInfo, S);
 }
 
 VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarDecl));
-  return new (Mem) VarDecl(Var, 0, SourceLocation(), SourceLocation(), 0, 
-                           QualType(), 0, SC_None);
+  return new (C, ID) VarDecl(Var, 0, SourceLocation(), SourceLocation(), 0,
+                             QualType(), 0, SC_None);
 }
 
 void VarDecl::setStorageClass(StorageClass SC) {
@@ -1775,7 +1772,7 @@
   //   A declaration directly contained in a linkage-specification is treated
   //   as if it contains the extern specifier for the purpose of determining
   //   the linkage of the declared name and whether it is a definition.
-  if (isSingleLineExternC(*this))
+  if (isSingleLineLanguageLinkage(*this))
     return DeclarationOnly;
 
   // C99 6.9.2p2:
@@ -1798,23 +1795,21 @@
 
   VarDecl *LastTentative = 0;
   VarDecl *First = getFirstDecl();
-  for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
-       I != E; ++I) {
-    Kind = (*I)->isThisDeclarationADefinition();
+  for (auto I : First->redecls()) {
+    Kind = I->isThisDeclarationADefinition();
     if (Kind == Definition)
       return 0;
     else if (Kind == TentativeDefinition)
-      LastTentative = *I;
+      LastTentative = I;
   }
   return LastTentative;
 }
 
 VarDecl *VarDecl::getDefinition(ASTContext &C) {
   VarDecl *First = getFirstDecl();
-  for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
-       I != E; ++I) {
-    if ((*I)->isThisDeclarationADefinition(C) == Definition)
-      return *I;
+  for (auto I : First->redecls()) {
+    if (I->isThisDeclarationADefinition(C) == Definition)
+      return I;
   }
   return 0;
 }
@@ -1823,9 +1818,8 @@
   DefinitionKind Kind = DeclarationOnly;
   
   const VarDecl *First = getFirstDecl();
-  for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
-       I != E; ++I) {
-    Kind = std::max(Kind, (*I)->isThisDeclarationADefinition(C));
+  for (auto I : First->redecls()) {
+    Kind = std::max(Kind, I->isThisDeclarationADefinition(C));
     if (Kind == Definition)
       break;
   }
@@ -1834,13 +1828,11 @@
 }
 
 const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
-  redecl_iterator I = redecls_begin(), E = redecls_end();
-  while (I != E && !I->getInit())
-    ++I;
-
-  if (I != E) {
-    D = *I;
-    return I->getInit();
+  for (auto I : redecls()) {
+    if (auto Expr = I->getInit()) {
+      D = I;
+      return Expr;
+    }
   }
   return 0;
 }
@@ -1865,10 +1857,9 @@
   if (!isStaticDataMember())
     return 0;
   
-  for (VarDecl::redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
-       RD != RDEnd; ++RD) {
+  for (auto RD : redecls()) {
     if (RD->getLexicalDeclContext()->isFileContext())
-      return *RD;
+      return RD;
   }
   
   return 0;
@@ -2108,8 +2099,8 @@
                                  SourceLocation IdLoc, IdentifierInfo *Id,
                                  QualType T, TypeSourceInfo *TInfo,
                                  StorageClass S, Expr *DefArg) {
-  return new (C) ParmVarDecl(ParmVar, DC, StartLoc, IdLoc, Id, T, TInfo,
-                             S, DefArg);
+  return new (C, DC) ParmVarDecl(ParmVar, DC, StartLoc, IdLoc, Id, T, TInfo,
+                                 S, DefArg);
 }
 
 QualType ParmVarDecl::getOriginalType() const {
@@ -2121,9 +2112,8 @@
 }
 
 ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ParmVarDecl));
-  return new (Mem) ParmVarDecl(ParmVar, 0, SourceLocation(), SourceLocation(),
-                               0, QualType(), 0, SC_None, 0);
+  return new (C, ID) ParmVarDecl(ParmVar, 0, SourceLocation(), SourceLocation(),
+                                 0, QualType(), 0, SC_None, 0);
 }
 
 SourceRange ParmVarDecl::getSourceRange() const {
@@ -2196,9 +2186,9 @@
 }
 
 bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
-  for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+  for (auto I : redecls()) {
     if (I->Body || I->IsLateTemplateParsed) {
-      Definition = *I;
+      Definition = I;
       return true;
     }
   }
@@ -2221,10 +2211,10 @@
 }
 
 bool FunctionDecl::isDefined(const FunctionDecl *&Definition) const {
-  for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+  for (auto I : redecls()) {
     if (I->IsDeleted || I->IsDefaulted || I->Body || I->IsLateTemplateParsed ||
         I->hasAttr<AliasAttr>()) {
-      Definition = I->IsDeleted ? I->getCanonicalDecl() : *I;
+      Definition = I->IsDeleted ? I->getCanonicalDecl() : I;
       return true;
     }
   }
@@ -2303,11 +2293,12 @@
          getDeclName().getCXXOverloadedOperator() == OO_Array_New ||
          getDeclName().getCXXOverloadedOperator() == OO_Array_Delete);
 
-  if (isa<CXXRecordDecl>(getDeclContext())) return false;
-  assert(getDeclContext()->getRedeclContext()->isTranslationUnit());
+  if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
+    return false;
 
   const FunctionProtoType *proto = getType()->castAs<FunctionProtoType>();
-  if (proto->getNumArgs() != 2 || proto->isVariadic()) return false;
+  if (proto->getNumParams() != 2 || proto->isVariadic())
+    return false;
 
   ASTContext &Context =
     cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext())
@@ -2315,7 +2306,7 @@
 
   // The result type and first argument type are constant across all
   // these operators.  The second argument must be exactly void*.
-  return (proto->getArgType(1).getCanonicalType() == Context.VoidPtrTy);
+  return (proto->getParamType(1).getCanonicalType() == Context.VoidPtrTy);
 }
 
 static bool isNamespaceStd(const DeclContext *DC) {
@@ -2335,20 +2326,23 @@
 
   if (isa<CXXRecordDecl>(getDeclContext()))
     return false;
-  assert(getDeclContext()->getRedeclContext()->isTranslationUnit());
+
+  // This can only fail for an invalid 'operator new' declaration.
+  if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
+    return false;
 
   const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>();
-  if (FPT->getNumArgs() > 2 || FPT->isVariadic())
+  if (FPT->getNumParams() > 2 || FPT->isVariadic())
     return false;
 
   // If this is a single-parameter function, it must be a replaceable global
   // allocation or deallocation function.
-  if (FPT->getNumArgs() == 1)
+  if (FPT->getNumParams() == 1)
     return true;
 
   // Otherwise, we're looking for a second parameter whose type is
   // 'const std::nothrow_t &', or, in C++1y, 'std::size_t'.
-  QualType Ty = FPT->getArgType(1);
+  QualType Ty = FPT->getParamType(1);
   ASTContext &Ctx = getASTContext();
   if (Ctx.getLangOpts().SizedDeallocation &&
       Ctx.hasSameType(Ty, Ctx.getSizeType()))
@@ -2376,10 +2370,12 @@
     return 0;
   if (isa<CXXRecordDecl>(getDeclContext()))
     return 0;
-  assert(getDeclContext()->getRedeclContext()->isTranslationUnit());
+
+  if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
+    return 0;
 
   if (getNumParams() != 2 || isVariadic() ||
-      !Ctx.hasSameType(getType()->castAs<FunctionProtoType>()->getArgType(1),
+      !Ctx.hasSameType(getType()->castAs<FunctionProtoType>()->getParamType(1),
                        Ctx.getSizeType()))
     return 0;
 
@@ -2488,7 +2484,7 @@
 
   // If the function is marked "overloadable", it has a different mangled name
   // and is not the C library function.
-  if (getAttr<OverloadableAttr>())
+  if (hasAttr<OverloadableAttr>())
     return 0;
 
   if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
@@ -2510,11 +2506,8 @@
 /// based on its FunctionType.  This is the length of the ParamInfo array
 /// after it has been created.
 unsigned FunctionDecl::getNumParams() const {
-  const FunctionType *FT = getType()->castAs<FunctionType>();
-  if (isa<FunctionNoProtoType>(FT))
-    return 0;
-  return cast<FunctionProtoType>(FT)->getNumArgs();
-
+  const FunctionProtoType *FPT = getType()->getAs<FunctionProtoType>();
+  return FPT ? FPT->getNumParams() : 0;
 }
 
 void FunctionDecl::setParams(ASTContext &C,
@@ -2542,38 +2535,49 @@
 /// getMinRequiredArguments - Returns the minimum number of arguments
 /// needed to call this function. This may be fewer than the number of
 /// function parameters, if some of the parameters have default
-/// arguments (in C++) or the last parameter is a parameter pack.
+/// arguments (in C++) or are parameter packs (C++11).
 unsigned FunctionDecl::getMinRequiredArguments() const {
   if (!getASTContext().getLangOpts().CPlusPlus)
     return getNumParams();
-  
-  unsigned NumRequiredArgs = getNumParams();  
-  
-  // If the last parameter is a parameter pack, we don't need an argument for 
-  // it.
-  if (NumRequiredArgs > 0 &&
-      getParamDecl(NumRequiredArgs - 1)->isParameterPack())
-    --NumRequiredArgs;
-      
-  // If this parameter has a default argument, we don't need an argument for
-  // it.
-  while (NumRequiredArgs > 0 &&
-         getParamDecl(NumRequiredArgs-1)->hasDefaultArg())
-    --NumRequiredArgs;
 
-  // We might have parameter packs before the end. These can't be deduced,
-  // but they can still handle multiple arguments.
-  unsigned ArgIdx = NumRequiredArgs;
-  while (ArgIdx > 0) {
-    if (getParamDecl(ArgIdx - 1)->isParameterPack())
-      NumRequiredArgs = ArgIdx;
-    
-    --ArgIdx;
-  }
-  
+  unsigned NumRequiredArgs = 0;
+  for (auto *Param : params())
+    if (!Param->isParameterPack() && !Param->hasDefaultArg())
+      ++NumRequiredArgs;
   return NumRequiredArgs;
 }
 
+/// \brief The combination of the extern and inline keywords under MSVC forces
+/// the function to be required.
+///
+/// Note: This function assumes that we will only get called when isInlined()
+/// would return true for this FunctionDecl.
+bool FunctionDecl::isMSExternInline() const {
+  assert(isInlined() && "expected to get called on an inlined function!");
+
+  const ASTContext &Context = getASTContext();
+  if (!Context.getLangOpts().MSVCCompat)
+    return false;
+
+  for (const FunctionDecl *FD = this; FD; FD = FD->getPreviousDecl())
+    if (FD->getStorageClass() == SC_Extern)
+      return true;
+
+  return false;
+}
+
+static bool redeclForcesDefMSVC(const FunctionDecl *Redecl) {
+  if (Redecl->getStorageClass() != SC_Extern)
+    return false;
+
+  for (const FunctionDecl *FD = Redecl->getPreviousDecl(); FD;
+       FD = FD->getPreviousDecl())
+    if (FD->getStorageClass() == SC_Extern)
+      return false;
+
+  return true;
+}
+
 static bool RedeclForcesDefC99(const FunctionDecl *Redecl) {
   // Only consider file-scope declarations in this test.
   if (!Redecl->getLexicalDeclContext()->isTranslationUnit())
@@ -2593,7 +2597,7 @@
 /// \brief For a function declaration in C or C++, determine whether this
 /// declaration causes the definition to be externally visible.
 ///
-/// Specifically, this determines if adding the current declaration to the set
+/// For instance, this determines if adding the current declaration to the set
 /// of redeclarations of the given functions causes
 /// isInlineDefinitionExternallyVisible to change from false to true.
 bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const {
@@ -2602,6 +2606,13 @@
 
   ASTContext &Context = getASTContext();
 
+  if (Context.getLangOpts().MSVCCompat) {
+    const FunctionDecl *Definition;
+    if (hasBody(Definition) && Definition->isInlined() &&
+        redeclForcesDefMSVC(this))
+      return true;
+  }
+
   if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
     // With GNU inlining, a declaration with 'inline' but not 'extern', forces
     // an externally visible definition.
@@ -2683,9 +2694,7 @@
     
     // If any declaration is 'inline' but not 'extern', then this definition
     // is externally visible.
-    for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
-         Redecl != RedeclEnd;
-         ++Redecl) {
+    for (auto Redecl : redecls()) {
       if (Redecl->isInlineSpecified() && 
           Redecl->getStorageClass() != SC_Extern)
         return true;
@@ -2702,10 +2711,8 @@
   //   [...] If all of the file scope declarations for a function in a 
   //   translation unit include the inline function specifier without extern, 
   //   then the definition in that translation unit is an inline definition.
-  for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
-       Redecl != RedeclEnd;
-       ++Redecl) {
-    if (RedeclForcesDefC99(*Redecl))
+  for (auto Redecl : redecls()) {
+    if (RedeclForcesDefC99(Redecl))
       return true;
   }
   
@@ -2824,14 +2831,34 @@
   // Handle class scope explicit specialization special case.
   if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
     return getClassScopeSpecializationPattern();
+  
+  // If this is a generic lambda call operator specialization, its 
+  // instantiation pattern is always its primary template's pattern
+  // even if its primary template was instantiated from another 
+  // member template (which happens with nested generic lambdas).
+  // Since a lambda's call operator's body is transformed eagerly, 
+  // we don't have to go hunting for a prototype definition template 
+  // (i.e. instantiated-from-member-template) to use as an instantiation 
+  // pattern.
 
+  if (isGenericLambdaCallOperatorSpecialization(
+          dyn_cast<CXXMethodDecl>(this))) {
+    assert(getPrimaryTemplate() && "A generic lambda specialization must be "
+                                   "generated from a primary call operator "
+                                   "template");
+    assert(getPrimaryTemplate()->getTemplatedDecl()->getBody() &&
+           "A generic lambda call operator template must always have a body - "
+           "even if instantiated from a prototype (i.e. as written) member "
+           "template");
+    return getPrimaryTemplate()->getTemplatedDecl();
+  }
+  
   if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
     while (Primary->getInstantiatedFromMemberTemplate()) {
       // If we have hit a point where the user provided a specialization of
       // this template, we're done looking.
       if (Primary->isMemberSpecialization())
         break;
-      
       Primary = Primary->getInstantiatedFromMemberTemplate();
     }
     
@@ -3101,14 +3128,13 @@
                              IdentifierInfo *Id, QualType T,
                              TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
                              InClassInitStyle InitStyle) {
-  return new (C) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
-                           BW, Mutable, InitStyle);
+  return new (C, DC) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
+                               BW, Mutable, InitStyle);
 }
 
 FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FieldDecl));
-  return new (Mem) FieldDecl(Field, 0, SourceLocation(), SourceLocation(),
-                             0, QualType(), 0, 0, false, ICIS_NoInit);
+  return new (C, ID) FieldDecl(Field, 0, SourceLocation(), SourceLocation(),
+                               0, QualType(), 0, 0, false, ICIS_NoInit);
 }
 
 bool FieldDecl::isAnonymousStructOrUnion() const {
@@ -3191,8 +3217,8 @@
   if (CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(this)) {
     struct CXXRecordDecl::DefinitionData *Data = 
       new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
-    for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
-      cast<CXXRecordDecl>(*I)->DefinitionData = Data;
+    for (auto I : redecls())
+      cast<CXXRecordDecl>(I)->DefinitionData = Data;
   }
 }
 
@@ -3224,10 +3250,9 @@
   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this))
     return CXXRD->getDefinition();
 
-  for (redecl_iterator R = redecls_begin(), REnd = redecls_end();
-       R != REnd; ++R)
+  for (auto R : redecls())
     if (R->isCompleteDefinition())
-      return *R;
+      return R;
 
   return 0;
 }
@@ -3275,21 +3300,27 @@
                            IdentifierInfo *Id,
                            EnumDecl *PrevDecl, bool IsScoped,
                            bool IsScopedUsingClassTag, bool IsFixed) {
-  EnumDecl *Enum = new (C) EnumDecl(DC, StartLoc, IdLoc, Id, PrevDecl,
-                                    IsScoped, IsScopedUsingClassTag, IsFixed);
+  EnumDecl *Enum = new (C, DC) EnumDecl(DC, StartLoc, IdLoc, Id, PrevDecl,
+                                        IsScoped, IsScopedUsingClassTag,
+                                        IsFixed);
   Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
   C.getTypeDeclType(Enum, PrevDecl);
   return Enum;
 }
 
 EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumDecl));
-  EnumDecl *Enum = new (Mem) EnumDecl(0, SourceLocation(), SourceLocation(),
-                                      0, 0, false, false, false);
+  EnumDecl *Enum = new (C, ID) EnumDecl(0, SourceLocation(), SourceLocation(),
+                                        0, 0, false, false, false);
   Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
   return Enum;
 }
 
+SourceRange EnumDecl::getIntegerTypeRange() const {
+  if (const TypeSourceInfo *TI = getIntegerTypeSourceInfo())
+    return TI->getTypeLoc().getSourceRange();
+  return SourceRange();
+}
+
 void EnumDecl::completeDefinition(QualType NewType,
                                   QualType NewPromotionType,
                                   unsigned NumPositiveBits,
@@ -3353,8 +3384,8 @@
 RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
                                SourceLocation StartLoc, SourceLocation IdLoc,
                                IdentifierInfo *Id, RecordDecl* PrevDecl) {
-  RecordDecl* R = new (C) RecordDecl(Record, TK, DC, StartLoc, IdLoc, Id,
-                                     PrevDecl);
+  RecordDecl* R = new (C, DC) RecordDecl(Record, TK, DC, StartLoc, IdLoc, Id,
+                                         PrevDecl);
   R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
 
   C.getTypeDeclType(R, PrevDecl);
@@ -3362,9 +3393,8 @@
 }
 
 RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(RecordDecl));
-  RecordDecl *R = new (Mem) RecordDecl(Record, TTK_Struct, 0, SourceLocation(),
-                                       SourceLocation(), 0, 0);
+  RecordDecl *R = new (C, ID) RecordDecl(Record, TTK_Struct, 0, SourceLocation(),
+                                         SourceLocation(), 0, 0);
   R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
   return R;
 }
@@ -3427,7 +3457,7 @@
   if (Decls.empty())
     return;
 
-  llvm::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
+  std::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
                                                  /*FieldsAlreadyLoaded=*/false);
 }
 
@@ -3469,10 +3499,9 @@
 }
 
 bool BlockDecl::capturesVariable(const VarDecl *variable) const {
-  for (capture_const_iterator
-         i = capture_begin(), e = capture_end(); i != e; ++i)
+  for (const auto &I : captures())
     // Only auto vars can be captured, so no redeclaration worries.
-    if (i->getVariable() == variable)
+    if (I.getVariable() == variable)
       return true;
 
   return false;
@@ -3489,33 +3518,32 @@
 void TranslationUnitDecl::anchor() { }
 
 TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
-  return new (C) TranslationUnitDecl(C);
+  return new (C, (DeclContext*)0) TranslationUnitDecl(C);
 }
 
 void LabelDecl::anchor() { }
 
 LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
                              SourceLocation IdentL, IdentifierInfo *II) {
-  return new (C) LabelDecl(DC, IdentL, II, 0, IdentL);
+  return new (C, DC) LabelDecl(DC, IdentL, II, 0, IdentL);
 }
 
 LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
                              SourceLocation IdentL, IdentifierInfo *II,
                              SourceLocation GnuLabelL) {
   assert(GnuLabelL != IdentL && "Use this only for GNU local labels");
-  return new (C) LabelDecl(DC, IdentL, II, 0, GnuLabelL);
+  return new (C, DC) LabelDecl(DC, IdentL, II, 0, GnuLabelL);
 }
 
 LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LabelDecl));
-  return new (Mem) LabelDecl(0, SourceLocation(), 0, 0, SourceLocation());
+  return new (C, ID) LabelDecl(0, SourceLocation(), 0, 0, SourceLocation());
 }
 
 void ValueDecl::anchor() { }
 
 bool ValueDecl::isWeak() const {
-  for (attr_iterator I = attr_begin(), E = attr_end(); I != E; ++I)
-    if (isa<WeakAttr>(*I) || isa<WeakRefAttr>(*I))
+  for (const auto *I : attrs())
+    if (isa<WeakAttr>(I) || isa<WeakRefAttr>(I))
       return true;
 
   return isWeakImported();
@@ -3527,13 +3555,12 @@
                                              SourceLocation IdLoc,
                                              IdentifierInfo *Id,
                                              QualType Type) {
-  return new (C) ImplicitParamDecl(DC, IdLoc, Id, Type);
+  return new (C, DC) ImplicitParamDecl(DC, IdLoc, Id, Type);
 }
 
-ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C, 
+ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
                                                          unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ImplicitParamDecl));
-  return new (Mem) ImplicitParamDecl(0, SourceLocation(), 0, QualType());
+  return new (C, ID) ImplicitParamDecl(0, SourceLocation(), 0, QualType());
 }
 
 FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
@@ -3541,66 +3568,53 @@
                                    const DeclarationNameInfo &NameInfo,
                                    QualType T, TypeSourceInfo *TInfo,
                                    StorageClass SC,
-                                   bool isInlineSpecified, 
+                                   bool isInlineSpecified,
                                    bool hasWrittenPrototype,
                                    bool isConstexprSpecified) {
-  FunctionDecl *New = new (C) FunctionDecl(Function, DC, StartLoc, NameInfo,
-                                           T, TInfo, SC,
-                                           isInlineSpecified,
-                                           isConstexprSpecified);
+  FunctionDecl *New =
+      new (C, DC) FunctionDecl(Function, DC, StartLoc, NameInfo, T, TInfo, SC,
+                               isInlineSpecified, isConstexprSpecified);
   New->HasWrittenPrototype = hasWrittenPrototype;
   return New;
 }
 
 FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionDecl));
-  return new (Mem) FunctionDecl(Function, 0, SourceLocation(), 
-                                DeclarationNameInfo(), QualType(), 0,
-                                SC_None, false, false);
+  return new (C, ID) FunctionDecl(Function, 0, SourceLocation(),
+                                  DeclarationNameInfo(), QualType(), 0,
+                                  SC_None, false, false);
 }
 
 BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
-  return new (C) BlockDecl(DC, L);
+  return new (C, DC) BlockDecl(DC, L);
 }
 
 BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(BlockDecl));
-  return new (Mem) BlockDecl(0, SourceLocation());
-}
-
-MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
-                                                   unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(MSPropertyDecl));
-  return new (Mem) MSPropertyDecl(0, SourceLocation(), DeclarationName(),
-                                  QualType(), 0, SourceLocation(),
-                                  0, 0);
+  return new (C, ID) BlockDecl(0, SourceLocation());
 }
 
 CapturedDecl *CapturedDecl::Create(ASTContext &C, DeclContext *DC,
                                    unsigned NumParams) {
-  unsigned Size = sizeof(CapturedDecl) + NumParams * sizeof(ImplicitParamDecl*);
-  return new (C.Allocate(Size)) CapturedDecl(DC, NumParams);
+  return new (C, DC, NumParams * sizeof(ImplicitParamDecl *))
+      CapturedDecl(DC, NumParams);
 }
 
 CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, unsigned ID,
-                                   unsigned NumParams) {
-  unsigned Size = sizeof(CapturedDecl) + NumParams * sizeof(ImplicitParamDecl*);
-  void *Mem = AllocateDeserializedDecl(C, ID, Size);
-  return new (Mem) CapturedDecl(0, NumParams);
+                                               unsigned NumParams) {
+  return new (C, ID, NumParams * sizeof(ImplicitParamDecl *))
+      CapturedDecl(0, NumParams);
 }
 
 EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
                                            SourceLocation L,
                                            IdentifierInfo *Id, QualType T,
                                            Expr *E, const llvm::APSInt &V) {
-  return new (C) EnumConstantDecl(CD, L, Id, T, E, V);
+  return new (C, CD) EnumConstantDecl(CD, L, Id, T, E, V);
 }
 
 EnumConstantDecl *
 EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumConstantDecl));
-  return new (Mem) EnumConstantDecl(0, SourceLocation(), 0, QualType(), 0, 
-                                    llvm::APSInt());
+  return new (C, ID) EnumConstantDecl(0, SourceLocation(), 0, QualType(), 0,
+                                      llvm::APSInt());
 }
 
 void IndirectFieldDecl::anchor() { }
@@ -3609,14 +3623,13 @@
 IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
                           IdentifierInfo *Id, QualType T, NamedDecl **CH,
                           unsigned CHS) {
-  return new (C) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
+  return new (C, DC) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
 }
 
 IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
                                                          unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(IndirectFieldDecl));
-  return new (Mem) IndirectFieldDecl(0, SourceLocation(), DeclarationName(),
-                                     QualType(), 0, 0);
+  return new (C, ID) IndirectFieldDecl(0, SourceLocation(), DeclarationName(),
+                                       QualType(), 0, 0);
 }
 
 SourceRange EnumConstantDecl::getSourceRange() const {
@@ -3631,26 +3644,24 @@
 TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
                                  SourceLocation StartLoc, SourceLocation IdLoc,
                                  IdentifierInfo *Id, TypeSourceInfo *TInfo) {
-  return new (C) TypedefDecl(DC, StartLoc, IdLoc, Id, TInfo);
+  return new (C, DC) TypedefDecl(DC, StartLoc, IdLoc, Id, TInfo);
 }
 
 void TypedefNameDecl::anchor() { }
 
 TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypedefDecl));
-  return new (Mem) TypedefDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+  return new (C, ID) TypedefDecl(0, SourceLocation(), SourceLocation(), 0, 0);
 }
 
 TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
                                      SourceLocation StartLoc,
                                      SourceLocation IdLoc, IdentifierInfo *Id,
                                      TypeSourceInfo *TInfo) {
-  return new (C) TypeAliasDecl(DC, StartLoc, IdLoc, Id, TInfo);
+  return new (C, DC) TypeAliasDecl(DC, StartLoc, IdLoc, Id, TInfo);
 }
 
 TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasDecl));
-  return new (Mem) TypeAliasDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+  return new (C, ID) TypeAliasDecl(0, SourceLocation(), SourceLocation(), 0, 0);
 }
 
 SourceRange TypedefDecl::getSourceRange() const {
@@ -3675,24 +3686,22 @@
                                            StringLiteral *Str,
                                            SourceLocation AsmLoc,
                                            SourceLocation RParenLoc) {
-  return new (C) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
+  return new (C, DC) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
 }
 
-FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C, 
+FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
                                                        unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FileScopeAsmDecl));
-  return new (Mem) FileScopeAsmDecl(0, 0, SourceLocation(), SourceLocation());
+  return new (C, ID) FileScopeAsmDecl(0, 0, SourceLocation(), SourceLocation());
 }
 
 void EmptyDecl::anchor() {}
 
 EmptyDecl *EmptyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
-  return new (C) EmptyDecl(DC, L);
+  return new (C, DC) EmptyDecl(DC, L);
 }
 
 EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EmptyDecl));
-  return new (Mem) EmptyDecl(0, SourceLocation());
+  return new (C, ID) EmptyDecl(0, SourceLocation());
 }
 
 //===----------------------------------------------------------------------===//
@@ -3730,30 +3739,28 @@
   *reinterpret_cast<SourceLocation *>(this + 1) = EndLoc;
 }
 
-ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC, 
+ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
                                SourceLocation StartLoc, Module *Imported,
                                ArrayRef<SourceLocation> IdentifierLocs) {
-  void *Mem = C.Allocate(sizeof(ImportDecl) + 
-                         IdentifierLocs.size() * sizeof(SourceLocation));
-  return new (Mem) ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
+  return new (C, DC, IdentifierLocs.size() * sizeof(SourceLocation))
+      ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
 }
 
-ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC, 
+ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
                                        SourceLocation StartLoc,
-                                       Module *Imported, 
+                                       Module *Imported,
                                        SourceLocation EndLoc) {
-  void *Mem = C.Allocate(sizeof(ImportDecl) + sizeof(SourceLocation));
-  ImportDecl *Import = new (Mem) ImportDecl(DC, StartLoc, Imported, EndLoc);
+  ImportDecl *Import =
+      new (C, DC, sizeof(SourceLocation)) ImportDecl(DC, StartLoc,
+                                                     Imported, EndLoc);
   Import->setImplicit();
   return Import;
 }
 
 ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
                                            unsigned NumLocations) {
-  void *Mem = AllocateDeserializedDecl(C, ID, 
-                                       (sizeof(ImportDecl) + 
-                                        NumLocations * sizeof(SourceLocation)));
-  return new (Mem) ImportDecl(EmptyShell());  
+  return new (C, ID, NumLocations * sizeof(SourceLocation))
+      ImportDecl(EmptyShell());
 }
 
 ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 121c5a6..1de1fe2 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -45,25 +45,30 @@
   getASTContext().getExternalSource()->updateOutOfDateIdentifier(II);
 }
 
-void *Decl::AllocateDeserializedDecl(const ASTContext &Context, 
-                                     unsigned ID,
-                                     unsigned Size) {
+void *Decl::operator new(std::size_t Size, const ASTContext &Context,
+                         unsigned ID, std::size_t Extra) {
   // Allocate an extra 8 bytes worth of storage, which ensures that the
   // resulting pointer will still be 8-byte aligned. 
-  void *Start = Context.Allocate(Size + 8);
+  void *Start = Context.Allocate(Size + Extra + 8);
   void *Result = (char*)Start + 8;
-  
+
   unsigned *PrefixPtr = (unsigned *)Result - 2;
-  
+
   // Zero out the first 4 bytes; this is used to store the owning module ID.
   PrefixPtr[0] = 0;
-  
+
   // Store the global declaration ID in the second 4 bytes.
   PrefixPtr[1] = ID;
-  
+
   return Result;
 }
 
+void *Decl::operator new(std::size_t Size, const ASTContext &Ctx,
+                         DeclContext *Parent, std::size_t Extra) {
+  assert(!Parent || &Parent->getParentASTContext() == &Ctx);
+  return ::operator new(Size + Extra, Ctx);
+}
+
 Module *Decl::getOwningModuleSlow() const {
   assert(isFromASTFile() && "Not from AST file?");
   return getASTContext().getExternalSource()->getModule(getOwningModuleID());
@@ -80,6 +85,7 @@
 
 void Decl::setInvalidDecl(bool Invalid) {
   InvalidDecl = Invalid;
+  assert(!isa<TagDecl>(this) || !cast<TagDecl>(this)->isCompleteDefinition());
   if (Invalid && !isa<ParmVarDecl>(this)) {
     // Defensive maneuver for ill-formed code: we're likely not to make it to
     // a point where we set the access specifier, so default it to "public"
@@ -153,11 +159,12 @@
   return isTemplateParameterPack();
 }
 
-bool Decl::isFunctionOrFunctionTemplate() const {
-  if (const UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(this))
-    return UD->getTargetDecl()->isFunctionOrFunctionTemplate();
-
-  return isa<FunctionDecl>(this) || isa<FunctionTemplateDecl>(this);
+FunctionDecl *Decl::getAsFunction() {
+  if (FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
+    return FD;
+  if (const FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(this))
+    return FTD->getTemplatedDecl();
+  return 0;
 }
 
 bool Decl::isTemplateDecl() const {
@@ -306,7 +313,7 @@
     return true;
 
   // Check redeclarations.
-  for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
+  for (auto I : redecls())
     if (I->Referenced)
       return true;
 
@@ -401,8 +408,8 @@
   AvailabilityResult Result = AR_Available;
   std::string ResultMessage;
 
-  for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) {
-    if (DeprecatedAttr *Deprecated = dyn_cast<DeprecatedAttr>(*A)) {
+  for (const auto *A : attrs()) {
+    if (const auto *Deprecated = dyn_cast<DeprecatedAttr>(A)) {
       if (Result >= AR_Deprecated)
         continue;
 
@@ -413,13 +420,13 @@
       continue;
     }
 
-    if (UnavailableAttr *Unavailable = dyn_cast<UnavailableAttr>(*A)) {
+    if (const auto *Unavailable = dyn_cast<UnavailableAttr>(A)) {
       if (Message)
         *Message = Unavailable->getMessage();
       return AR_Unavailable;
     }
 
-    if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) {
+    if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
       AvailabilityResult AR = CheckAvailability(getASTContext(), Availability,
                                                 Message);
 
@@ -475,11 +482,11 @@
   if (!canBeWeakImported(IsDefinition))
     return false;
 
-  for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) {
-    if (isa<WeakImportAttr>(*A))
+  for (const auto *A : attrs()) {
+    if (isa<WeakImportAttr>(A))
       return true;
 
-    if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) {
+    if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
       if (CheckAvailability(getASTContext(), Availability, 0) 
                                                          == AR_NotYetIntroduced)
         return true;
@@ -662,7 +669,7 @@
   return SourceLocation();
 }
 
-void Decl::CheckAccessDeclContext() const {
+bool Decl::AccessDeclContextSanity() const {
 #ifndef NDEBUG
   // Suppress this check if any of the following hold:
   // 1. this is the translation unit (and thus has no parent)
@@ -684,16 +691,35 @@
       // AS_none as access specifier.
       isa<CXXRecordDecl>(this) ||
       isa<ClassScopeFunctionSpecializationDecl>(this))
-    return;
+    return true;
 
   assert(Access != AS_none &&
          "Access specifier is AS_none inside a record decl");
 #endif
+  return true;
 }
 
 static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
 static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
 
+const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
+  QualType Ty;
+  if (const ValueDecl *D = dyn_cast<ValueDecl>(this))
+    Ty = D->getType();
+  else if (const TypedefNameDecl *D = dyn_cast<TypedefNameDecl>(this))
+    Ty = D->getUnderlyingType();
+  else
+    return 0;
+
+  if (Ty->isFunctionPointerType())
+    Ty = Ty->getAs<PointerType>()->getPointeeType();
+  else if (BlocksToo && Ty->isBlockPointerType())
+    Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
+
+  return Ty->getAs<FunctionType>();
+}
+
+
 /// Starting at a given context (a Decl or DeclContext), look for a
 /// code context that is not a closure (a lambda, block, etc.).
 template <class T> static Decl *getNonClosureContext(T *D) {
@@ -939,13 +965,12 @@
 /// \brief We have just acquired external visible storage, and we already have
 /// built a lookup map. For every name in the map, pull in the new names from
 /// the external storage.
-void DeclContext::reconcileExternalVisibleStorage() {
+void DeclContext::reconcileExternalVisibleStorage() const {
   assert(NeedToReconcileExternalVisibleStorage && LookupPtr.getPointer());
   NeedToReconcileExternalVisibleStorage = false;
 
-  StoredDeclsMap &Map = *LookupPtr.getPointer();
-  for (StoredDeclsMap::iterator I = Map.begin(); I != Map.end(); ++I)
-    I->second.setHasExternalDecls();
+  for (auto &Lookup : *LookupPtr.getPointer())
+    Lookup.second.setHasExternalDecls();
 }
 
 /// \brief Load the declarations within this lexical storage from an
@@ -982,8 +1007,8 @@
   // Splice the newly-read declarations into the beginning of the list
   // of declarations.
   Decl *ExternalFirst, *ExternalLast;
-  llvm::tie(ExternalFirst, ExternalLast) = BuildDeclChain(Decls,
-                                                          FieldsAlreadyLoaded);
+  std::tie(ExternalFirst, ExternalLast) =
+      BuildDeclChain(Decls, FieldsAlreadyLoaded);
   ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
   FirstDecl = ExternalFirst;
   if (!LastDecl)
@@ -997,6 +1022,8 @@
   StoredDeclsMap *Map;
   if (!(Map = DC->LookupPtr.getPointer()))
     Map = DC->CreateStoredDeclsMap(Context);
+  if (DC->NeedToReconcileExternalVisibleStorage)
+    DC->reconcileExternalVisibleStorage();
 
   (*Map)[Name].removeExternalDecls();
 
@@ -1011,6 +1038,8 @@
   StoredDeclsMap *Map;
   if (!(Map = DC->LookupPtr.getPointer()))
     Map = DC->CreateStoredDeclsMap(Context);
+  if (DC->NeedToReconcileExternalVisibleStorage)
+    DC->reconcileExternalVisibleStorage();
 
   StoredDeclsList &List = (*Map)[Name];
 
@@ -1050,14 +1079,9 @@
   return List.getLookupResult();
 }
 
-DeclContext::decl_iterator DeclContext::noload_decls_begin() const {
-  return decl_iterator(FirstDecl);
-}
-
 DeclContext::decl_iterator DeclContext::decls_begin() const {
   if (hasExternalLexicalStorage())
     LoadLexicalDeclsFromExternalStorage();
-
   return decl_iterator(FirstDecl);
 }
 
@@ -1187,6 +1211,10 @@
 /// buildLookup - Build the lookup data structure with all of the
 /// declarations in this DeclContext (and any other contexts linked
 /// to it or transparent contexts nested within it) and return it.
+///
+/// Note that the produced map may miss out declarations from an
+/// external source. If it does, those entries will be marked with
+/// the 'hasExternalDecls' flag.
 StoredDeclsMap *DeclContext::buildLookup() {
   assert(this == getPrimaryContext() && "buildLookup called on non-primary DC");
 
@@ -1202,7 +1230,6 @@
 
   // We no longer have any lazy decls.
   LookupPtr.setInt(false);
-  NeedToReconcileExternalVisibleStorage = false;
   return LookupPtr.getPointer();
 }
 
@@ -1251,11 +1278,13 @@
     return PrimaryContext->lookup(Name);
 
   if (hasExternalVisibleStorage()) {
+    if (NeedToReconcileExternalVisibleStorage)
+      reconcileExternalVisibleStorage();
+
     StoredDeclsMap *Map = LookupPtr.getPointer();
+
     if (LookupPtr.getInt())
       Map = buildLookup();
-    else if (NeedToReconcileExternalVisibleStorage)
-      reconcileExternalVisibleStorage();
 
     if (!Map)
       Map = CreateStoredDeclsMap(getParentASTContext());
@@ -1267,7 +1296,7 @@
       return R.first->second.getLookupResult();
 
     ExternalASTSource *Source = getParentASTContext().getExternalSource();
-    if (Source->FindExternalVisibleDeclsByName(this, Name) || R.second) {
+    if (Source->FindExternalVisibleDeclsByName(this, Name) || !R.second) {
       if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
         StoredDeclsMap::iterator I = Map->find(Name);
         if (I != Map->end())
@@ -1502,13 +1531,13 @@
 
 /// Returns iterator range [First, Last) of UsingDirectiveDecls stored within
 /// this context.
-DeclContext::udir_iterator_range
-DeclContext::getUsingDirectives() const {
+DeclContext::udir_range DeclContext::using_directives() const {
   // FIXME: Use something more efficient than normal lookup for using
   // directives. In C++, using directives are looked up more than anything else.
   lookup_const_result Result = lookup(UsingDirectiveDecl::getName());
-  return udir_iterator_range(reinterpret_cast<udir_iterator>(Result.begin()),
-                             reinterpret_cast<udir_iterator>(Result.end()));
+  return udir_range(
+      reinterpret_cast<UsingDirectiveDecl *const *>(Result.begin()),
+      reinterpret_cast<UsingDirectiveDecl *const *>(Result.end()));
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index a17abdd..d0ec4b9 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -31,8 +31,7 @@
 void AccessSpecDecl::anchor() { }
 
 AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(AccessSpecDecl));
-  return new (Mem) AccessSpecDecl(EmptyShell());
+  return new (C, ID) AccessSpecDecl(EmptyShell());
 }
 
 void LazyASTUnresolvedSet::getFromExternalSource(ASTContext &C) const {
@@ -51,7 +50,7 @@
     Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false),
     Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
     HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false),
-    HasMutableFields(false), HasOnlyCMembers(true),
+    HasMutableFields(false), HasVariantMembers(false), HasOnlyCMembers(true),
     HasInClassInitializer(false), HasUninitializedReferenceMember(false),
     NeedOverloadResolutionForMoveConstructor(false),
     NeedOverloadResolutionForMoveAssignment(false),
@@ -95,8 +94,8 @@
                                      SourceLocation IdLoc, IdentifierInfo *Id,
                                      CXXRecordDecl* PrevDecl,
                                      bool DelayTypeCreation) {
-  CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TK, DC, StartLoc, IdLoc,
-                                           Id, PrevDecl);
+  CXXRecordDecl *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, DC, StartLoc,
+                                               IdLoc, Id, PrevDecl);
   R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
 
   // FIXME: DelayTypeCreation seems like such a hack
@@ -109,8 +108,8 @@
                                            TypeSourceInfo *Info, SourceLocation Loc,
                                            bool Dependent, bool IsGeneric, 
                                            LambdaCaptureDefault CaptureDefault) {
-  CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc,
-                                           0, 0);
+  CXXRecordDecl *R =
+      new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc, 0, 0);
   R->IsBeingDefined = true;
   R->DefinitionData = new (C) struct LambdaDefinitionData(R, Info, 
                                                           Dependent, 
@@ -124,10 +123,8 @@
 
 CXXRecordDecl *
 CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXRecordDecl));
-  CXXRecordDecl *R = new (Mem) CXXRecordDecl(CXXRecord, TTK_Struct, 0,
-                                             SourceLocation(), SourceLocation(),
-                                             0, 0);
+  CXXRecordDecl *R = new (C, ID) CXXRecordDecl(
+      CXXRecord, TTK_Struct, 0, SourceLocation(), SourceLocation(), 0, 0);
   R->MayHaveOutOfDateDef = false;
   return R;
 }
@@ -205,19 +202,17 @@
       data().HasNonLiteralTypeFieldsOrBases = true;
     
     // Now go through all virtual bases of this base and add them.
-    for (CXXRecordDecl::base_class_iterator VBase =
-          BaseClassDecl->vbases_begin(),
-         E = BaseClassDecl->vbases_end(); VBase != E; ++VBase) {
+    for (const auto &VBase : BaseClassDecl->vbases()) {
       // Add this base if it's not already in the list.
-      if (SeenVBaseTypes.insert(C.getCanonicalType(VBase->getType()))) {
-        VBases.push_back(VBase);
+      if (SeenVBaseTypes.insert(C.getCanonicalType(VBase.getType()))) {
+        VBases.push_back(&VBase);
 
         // C++11 [class.copy]p8:
         //   The implicitly-declared copy constructor for a class X will have
         //   the form 'X::X(const X&)' if each [...] virtual base class B of X
         //   has a copy constructor whose first parameter is of type
         //   'const B&' or 'const volatile B&' [...]
-        if (CXXRecordDecl *VBaseDecl = VBase->getType()->getAsCXXRecordDecl())
+        if (CXXRecordDecl *VBaseDecl = VBase.getType()->getAsCXXRecordDecl())
           if (!VBaseDecl->hasCopyConstructorWithConstParam())
             data().ImplicitCopyConstructorHasConstParam = false;
       }
@@ -532,8 +527,11 @@
   if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) {
     SMKind |= SMF_Destructor;
 
-    if (!DD->isImplicit())
+    if (DD->isUserProvided())
       data().HasIrrelevantDestructor = false;
+    // If the destructor is explicitly defaulted and not trivial or not public
+    // or if the destructor is deleted, we clear HasIrrelevantDestructor in
+    // finishedDefaultedOrDeletedMember.
 
     // C++11 [class.dtor]p5:
     //   A destructor is trivial if [...] the destructor is not virtual.
@@ -656,7 +654,13 @@
     // Keep track of the presence of mutable fields.
     if (Field->isMutable())
       data().HasMutableFields = true;
-    
+
+    // C++11 [class.union]p8, DR1460:
+    //   If X is a union, a non-static data member of X that is not an anonymous
+    //   union is a variant member of X.
+    if (isUnion() && !Field->isAnonymousStructOrUnion())
+      data().HasVariantMembers = true;
+
     // C++0x [class]p9:
     //   A POD struct is a class that is both a trivial class and a 
     //   standard-layout class, and has no non-static data members of type 
@@ -692,7 +696,9 @@
     if (!T->isLiteralType(Context) || T.isVolatileQualified())
       data().HasNonLiteralTypeFieldsOrBases = true;
 
-    if (Field->hasInClassInitializer()) {
+    if (Field->hasInClassInitializer() ||
+        (Field->isAnonymousStructOrUnion() &&
+         Field->getType()->getAsCXXRecordDecl()->hasInClassInitializer())) {
       data().HasInClassInitializer = true;
 
       // C++11 [class]p5:
@@ -809,15 +815,13 @@
         // Virtual bases and virtual methods make a class non-empty, but they
         // also make it non-standard-layout so we needn't check here.
         // A non-empty base class may leave the class standard-layout, but not
-        // if we have arrived here, and have at least on non-static data
+        // if we have arrived here, and have at least one non-static data
         // member. If IsStandardLayout remains true, then the first non-static
         // data member must come through here with Empty still true, and Empty
         // will subsequently be set to false below.
         if (data().IsStandardLayout && data().Empty) {
-          for (CXXRecordDecl::base_class_const_iterator BI = bases_begin(),
-                                                        BE = bases_end();
-               BI != BE; ++BI) {
-            if (Context.hasSameUnqualifiedType(BI->getType(), T)) {
+          for (const auto &BI : bases()) {
+            if (Context.hasSameUnqualifiedType(BI.getType(), T)) {
               data().IsStandardLayout = false;
               break;
             }
@@ -862,6 +866,13 @@
         if (FieldRec->hasUninitializedReferenceMember() &&
             !Field->hasInClassInitializer())
           data().HasUninitializedReferenceMember = true;
+
+        // C++11 [class.union]p8, DR1460:
+        //   a non-static data member of an anonymous union that is a member of
+        //   X is also a variant member of X.
+        if (FieldRec->hasVariantMembers() &&
+            Field->isAnonymousStructOrUnion())
+          data().HasVariantMembers = true;
       }
     } else {
       // Base element type of field is a non-class type.
@@ -928,9 +939,11 @@
     else if (Constructor->isConstexpr())
       // We may now know that the constructor is constexpr.
       data().HasConstexprNonCopyMoveConstructor = true;
-  } else if (isa<CXXDestructorDecl>(D))
+  } else if (isa<CXXDestructorDecl>(D)) {
     SMKind |= SMF_Destructor;
-  else if (D->isCopyAssignmentOperator())
+    if (!D->isTrivial() || D->getAccess() != AS_public || D->isDeleted())
+      data().HasIrrelevantDestructor = false;
+  } else if (D->isCopyAssignmentOperator())
     SMKind |= SMF_CopyAssignment;
   else if (D->isMoveAssignmentOperator())
     SMKind |= SMF_MoveAssignment;
@@ -1018,13 +1031,9 @@
 }
 
 static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
-  QualType T;
-  if (isa<UsingShadowDecl>(Conv))
-    Conv = cast<UsingShadowDecl>(Conv)->getTargetDecl();
-  if (FunctionTemplateDecl *ConvTemp = dyn_cast<FunctionTemplateDecl>(Conv))
-    T = ConvTemp->getTemplatedDecl()->getResultType();
-  else 
-    T = cast<CXXConversionDecl>(Conv)->getConversionType();
+  QualType T =
+      cast<CXXConversionDecl>(Conv->getUnderlyingDecl()->getAsFunction())
+          ->getConversionType();
   return Context.getCanonicalType(T);
 }
 
@@ -1087,14 +1096,13 @@
   }
 
   // Collect information recursively from any base classes.
-  for (CXXRecordDecl::base_class_iterator
-         I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
-    const RecordType *RT = I->getType()->getAs<RecordType>();
+  for (const auto &I : Record->bases()) {
+    const RecordType *RT = I.getType()->getAs<RecordType>();
     if (!RT) continue;
 
     AccessSpecifier BaseAccess
-      = CXXRecordDecl::MergeAccess(Access, I->getAccessSpecifier());
-    bool BaseInVirtual = InVirtual || I->isVirtual();
+      = CXXRecordDecl::MergeAccess(Access, I.getAccessSpecifier());
+    bool BaseInVirtual = InVirtual || I.isVirtual();
 
     CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
     CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
@@ -1130,13 +1138,12 @@
     HiddenTypes.insert(GetConversionType(Context, ConvI.getDecl()));
 
   // Recursively collect conversions from base classes.
-  for (CXXRecordDecl::base_class_iterator
-         I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
-    const RecordType *RT = I->getType()->getAs<RecordType>();
+  for (const auto &I : Record->bases()) {
+    const RecordType *RT = I.getType()->getAs<RecordType>();
     if (!RT) continue;
 
     CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()),
-                              I->isVirtual(), I->getAccessSpecifier(),
+                              I.isVirtual(), I.getAccessSpecifier(),
                               HiddenTypes, Output, VBaseCs, HiddenVBaseCs);
   }
 
@@ -1317,11 +1324,9 @@
       isDependentContext())
     return false;
   
-  for (CXXRecordDecl::base_class_const_iterator B = bases_begin(),
-                                             BEnd = bases_end();
-       B != BEnd; ++B) {
+  for (const auto &B : bases()) {
     CXXRecordDecl *BaseDecl 
-      = cast<CXXRecordDecl>(B->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(B.getType()->getAs<RecordType>()->getDecl());
     if (BaseDecl->isAbstract())
       return true;
   }
@@ -1383,9 +1388,8 @@
       return MD;
   }
 
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-         E = RD->bases_end(); I != E; ++I) {
-    const RecordType *RT = I->getType()->getAs<RecordType>();
+  for (const auto &I : RD->bases()) {
+    const RecordType *RT = I.getType()->getAs<RecordType>();
     if (!RT)
       continue;
     const CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
@@ -1404,17 +1408,14 @@
                       QualType T, TypeSourceInfo *TInfo,
                       StorageClass SC, bool isInline,
                       bool isConstexpr, SourceLocation EndLocation) {
-  return new (C) CXXMethodDecl(CXXMethod, RD, StartLoc, NameInfo, T, TInfo,
-                               SC, isInline, isConstexpr,
-                               EndLocation);
+  return new (C, RD) CXXMethodDecl(CXXMethod, RD, StartLoc, NameInfo, T, TInfo,
+                                   SC, isInline, isConstexpr, EndLocation);
 }
 
 CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXMethodDecl));
-  return new (Mem) CXXMethodDecl(CXXMethod, 0, SourceLocation(), 
-                                 DeclarationNameInfo(), QualType(),
-                                 0, SC_None, false, false,
-                                 SourceLocation());
+  return new (C, ID) CXXMethodDecl(CXXMethod, 0, SourceLocation(),
+                                   DeclarationNameInfo(), QualType(), 0,
+                                   SC_None, false, false, SourceLocation());
 }
 
 bool CXXMethodDecl::isUsualDeallocationFunction() const {
@@ -1677,9 +1678,9 @@
 
 CXXConstructorDecl *
 CXXConstructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConstructorDecl));
-  return new (Mem) CXXConstructorDecl(0, SourceLocation(),DeclarationNameInfo(),
-                                      QualType(), 0, false, false, false,false);
+  return new (C, ID) CXXConstructorDecl(0, SourceLocation(),
+                                        DeclarationNameInfo(), QualType(),
+                                        0, false, false, false, false);
 }
 
 CXXConstructorDecl *
@@ -1692,9 +1693,9 @@
   assert(NameInfo.getName().getNameKind()
          == DeclarationName::CXXConstructorName &&
          "Name must refer to a constructor");
-  return new (C) CXXConstructorDecl(RD, StartLoc, NameInfo, T, TInfo,
-                                    isExplicit, isInline, isImplicitlyDeclared,
-                                    isConstexpr);
+  return new (C, RD) CXXConstructorDecl(RD, StartLoc, NameInfo, T, TInfo,
+                                        isExplicit, isInline,
+                                        isImplicitlyDeclared, isConstexpr);
 }
 
 CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
@@ -1827,9 +1828,8 @@
 
 CXXDestructorDecl *
 CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXDestructorDecl));
-  return new (Mem) CXXDestructorDecl(0, SourceLocation(), DeclarationNameInfo(),
-                                   QualType(), 0, false, false);
+  return new (C, ID) CXXDestructorDecl(
+      0, SourceLocation(), DeclarationNameInfo(), QualType(), 0, false, false);
 }
 
 CXXDestructorDecl *
@@ -1841,18 +1841,18 @@
   assert(NameInfo.getName().getNameKind()
          == DeclarationName::CXXDestructorName &&
          "Name must refer to a destructor");
-  return new (C) CXXDestructorDecl(RD, StartLoc, NameInfo, T, TInfo, isInline,
-                                   isImplicitlyDeclared);
+  return new (C, RD) CXXDestructorDecl(RD, StartLoc, NameInfo, T, TInfo,
+                                       isInline, isImplicitlyDeclared);
 }
 
 void CXXConversionDecl::anchor() { }
 
 CXXConversionDecl *
 CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConversionDecl));
-  return new (Mem) CXXConversionDecl(0, SourceLocation(), DeclarationNameInfo(),
-                                     QualType(), 0, false, false, false,
-                                     SourceLocation());
+  return new (C, ID) CXXConversionDecl(0, SourceLocation(),
+                                       DeclarationNameInfo(), QualType(),
+                                       0, false, false, false,
+                                       SourceLocation());
 }
 
 CXXConversionDecl *
@@ -1865,9 +1865,9 @@
   assert(NameInfo.getName().getNameKind()
          == DeclarationName::CXXConversionFunctionName &&
          "Name must refer to a conversion function");
-  return new (C) CXXConversionDecl(RD, StartLoc, NameInfo, T, TInfo,
-                                   isInline, isExplicit, isConstexpr,
-                                   EndLocation);
+  return new (C, RD) CXXConversionDecl(RD, StartLoc, NameInfo, T, TInfo,
+                                       isInline, isExplicit, isConstexpr,
+                                       EndLocation);
 }
 
 bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
@@ -1883,13 +1883,13 @@
                                          SourceLocation LangLoc,
                                          LanguageIDs Lang,
                                          bool HasBraces) {
-  return new (C) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces);
+  return new (C, DC) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces);
 }
 
-LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LinkageSpecDecl));
-  return new (Mem) LinkageSpecDecl(0, SourceLocation(), SourceLocation(),
-                                   lang_c, false);
+LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C,
+                                                     unsigned ID) {
+  return new (C, ID) LinkageSpecDecl(0, SourceLocation(), SourceLocation(),
+                                     lang_c, false);
 }
 
 void UsingDirectiveDecl::anchor() { }
@@ -1903,16 +1903,15 @@
                                                DeclContext *CommonAncestor) {
   if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Used))
     Used = NS->getOriginalNamespace();
-  return new (C) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
-                                    IdentLoc, Used, CommonAncestor);
+  return new (C, DC) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
+                                        IdentLoc, Used, CommonAncestor);
 }
 
-UsingDirectiveDecl *
-UsingDirectiveDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDirectiveDecl));
-  return new (Mem) UsingDirectiveDecl(0, SourceLocation(), SourceLocation(),
-                                      NestedNameSpecifierLoc(),
-                                      SourceLocation(), 0, 0);
+UsingDirectiveDecl *UsingDirectiveDecl::CreateDeserialized(ASTContext &C,
+                                                           unsigned ID) {
+  return new (C, ID) UsingDirectiveDecl(0, SourceLocation(), SourceLocation(),
+                                        NestedNameSpecifierLoc(),
+                                        SourceLocation(), 0, 0);
 }
 
 NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
@@ -1922,8 +1921,6 @@
   return cast_or_null<NamespaceDecl>(NominatedNamespace);
 }
 
-void NamespaceDecl::anchor() { }
-
 NamespaceDecl::NamespaceDecl(DeclContext *DC, bool Inline, 
                              SourceLocation StartLoc,
                              SourceLocation IdLoc, IdentifierInfo *Id,
@@ -1941,13 +1938,22 @@
                                      bool Inline, SourceLocation StartLoc,
                                      SourceLocation IdLoc, IdentifierInfo *Id,
                                      NamespaceDecl *PrevDecl) {
-  return new (C) NamespaceDecl(DC, Inline, StartLoc, IdLoc, Id, PrevDecl);
+  return new (C, DC) NamespaceDecl(DC, Inline, StartLoc, IdLoc, Id, PrevDecl);
 }
 
 NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceDecl));
-  return new (Mem) NamespaceDecl(0, false, SourceLocation(), SourceLocation(), 
-                                 0, 0);
+  return new (C, ID) NamespaceDecl(0, false, SourceLocation(), SourceLocation(),
+                                   0, 0);
+}
+
+NamespaceDecl *NamespaceDecl::getNextRedeclaration() {
+  return RedeclLink.getNext();
+}
+NamespaceDecl *NamespaceDecl::getPreviousDeclImpl() {
+  return getPreviousDecl();
+}
+NamespaceDecl *NamespaceDecl::getMostRecentDeclImpl() {
+  return getMostRecentDecl();
 }
 
 void NamespaceAliasDecl::anchor() { }
@@ -1961,24 +1967,22 @@
                                                NamedDecl *Namespace) {
   if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
     Namespace = NS->getOriginalNamespace();
-  return new (C) NamespaceAliasDecl(DC, UsingLoc, AliasLoc, Alias, 
-                                    QualifierLoc, IdentLoc, Namespace);
+  return new (C, DC) NamespaceAliasDecl(DC, UsingLoc, AliasLoc, Alias,
+                                        QualifierLoc, IdentLoc, Namespace);
 }
 
 NamespaceAliasDecl *
 NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceAliasDecl));
-  return new (Mem) NamespaceAliasDecl(0, SourceLocation(), SourceLocation(), 0,
-                                      NestedNameSpecifierLoc(), 
-                                      SourceLocation(), 0);
+  return new (C, ID) NamespaceAliasDecl(0, SourceLocation(), SourceLocation(),
+                                        0, NestedNameSpecifierLoc(),
+                                        SourceLocation(), 0);
 }
 
 void UsingShadowDecl::anchor() { }
 
 UsingShadowDecl *
 UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingShadowDecl));
-  return new (Mem) UsingShadowDecl(0, SourceLocation(), 0, 0);
+  return new (C, ID) UsingShadowDecl(0, SourceLocation(), 0, 0);
 }
 
 UsingDecl *UsingShadowDecl::getUsingDecl() const {
@@ -2026,13 +2030,12 @@
                              NestedNameSpecifierLoc QualifierLoc,
                              const DeclarationNameInfo &NameInfo,
                              bool HasTypename) {
-  return new (C) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename);
+  return new (C, DC) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename);
 }
 
 UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDecl));
-  return new (Mem) UsingDecl(0, SourceLocation(), NestedNameSpecifierLoc(),
-                             DeclarationNameInfo(), false);
+  return new (C, ID) UsingDecl(0, SourceLocation(), NestedNameSpecifierLoc(),
+                               DeclarationNameInfo(), false);
 }
 
 SourceRange UsingDecl::getSourceRange() const {
@@ -2048,15 +2051,14 @@
                                  SourceLocation UsingLoc,
                                  NestedNameSpecifierLoc QualifierLoc,
                                  const DeclarationNameInfo &NameInfo) {
-  return new (C) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
-                                          QualifierLoc, NameInfo);
+  return new (C, DC) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
+                                              QualifierLoc, NameInfo);
 }
 
 UnresolvedUsingValueDecl *
 UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UnresolvedUsingValueDecl));
-  return new (Mem) UnresolvedUsingValueDecl(0, QualType(), SourceLocation(),
-                                            NestedNameSpecifierLoc(),
+  return new (C, ID) UnresolvedUsingValueDecl(0, QualType(), SourceLocation(),
+                                              NestedNameSpecifierLoc(),
                                             DeclarationNameInfo());
 }
 
@@ -2075,20 +2077,16 @@
                                     NestedNameSpecifierLoc QualifierLoc,
                                     SourceLocation TargetNameLoc,
                                     DeclarationName TargetName) {
-  return new (C) UnresolvedUsingTypenameDecl(DC, UsingLoc, TypenameLoc,
-                                             QualifierLoc, TargetNameLoc,
-                                             TargetName.getAsIdentifierInfo());
+  return new (C, DC) UnresolvedUsingTypenameDecl(
+      DC, UsingLoc, TypenameLoc, QualifierLoc, TargetNameLoc,
+      TargetName.getAsIdentifierInfo());
 }
 
 UnresolvedUsingTypenameDecl *
 UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, 
-                                       sizeof(UnresolvedUsingTypenameDecl));
-  return new (Mem) UnresolvedUsingTypenameDecl(0, SourceLocation(),
-                                               SourceLocation(),
-                                               NestedNameSpecifierLoc(),
-                                               SourceLocation(),
-                                               0);
+  return new (C, ID) UnresolvedUsingTypenameDecl(
+      0, SourceLocation(), SourceLocation(), NestedNameSpecifierLoc(),
+      SourceLocation(), 0);
 }
 
 void StaticAssertDecl::anchor() { }
@@ -2099,15 +2097,29 @@
                                            StringLiteral *Message,
                                            SourceLocation RParenLoc,
                                            bool Failed) {
-  return new (C) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
-                                  RParenLoc, Failed);
+  return new (C, DC) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
+                                      RParenLoc, Failed);
 }
 
-StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C, 
+StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
                                                        unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(StaticAssertDecl));
-  return new (Mem) StaticAssertDecl(0, SourceLocation(), 0, 0,
-                                    SourceLocation(), false);
+  return new (C, ID) StaticAssertDecl(0, SourceLocation(), 0, 0,
+                                      SourceLocation(), false);
+}
+
+MSPropertyDecl *MSPropertyDecl::Create(ASTContext &C, DeclContext *DC,
+                                       SourceLocation L, DeclarationName N,
+                                       QualType T, TypeSourceInfo *TInfo,
+                                       SourceLocation StartL,
+                                       IdentifierInfo *Getter,
+                                       IdentifierInfo *Setter) {
+  return new (C, DC) MSPropertyDecl(DC, L, N, T, TInfo, StartL, Getter, Setter);
+}
+
+MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
+                                                   unsigned ID) {
+  return new (C, ID) MSPropertyDecl(0, SourceLocation(), DeclarationName(),
+                                    QualType(), 0, SourceLocation(), 0, 0);
 }
 
 static const char *getAccessName(AccessSpecifier AS) {
diff --git a/lib/AST/DeclFriend.cpp b/lib/AST/DeclFriend.cpp
index 1c639d6..02374c7 100644
--- a/lib/AST/DeclFriend.cpp
+++ b/lib/AST/DeclFriend.cpp
@@ -46,21 +46,17 @@
   }
 #endif
 
-  std::size_t Size = sizeof(FriendDecl)
-    + FriendTypeTPLists.size() * sizeof(TemplateParameterList*);
-  void *Mem = C.Allocate(Size);
-  FriendDecl *FD = new (Mem) FriendDecl(DC, L, Friend, FriendL,
-                                        FriendTypeTPLists);
+  std::size_t Extra = FriendTypeTPLists.size() * sizeof(TemplateParameterList*);
+  FriendDecl *FD = new (C, DC, Extra) FriendDecl(DC, L, Friend, FriendL,
+                                                 FriendTypeTPLists);
   cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
   return FD;
 }
 
 FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID,
                                            unsigned FriendTypeNumTPLists) {
-  std::size_t Size = sizeof(FriendDecl)
-    + FriendTypeNumTPLists * sizeof(TemplateParameterList*);
-  void *Mem = AllocateDeserializedDecl(C, ID, Size);
-  return new (Mem) FriendDecl(EmptyShell(), FriendTypeNumTPLists);
+  std::size_t Extra = FriendTypeNumTPLists * sizeof(TemplateParameterList*);
+  return new (C, ID, Extra) FriendDecl(EmptyShell(), FriendTypeNumTPLists);
 }
 
 FriendDecl *CXXRecordDecl::getFirstFriend() const {
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index b2b5b70..c53dba3 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -112,11 +112,7 @@
   if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(this)) {
     // Also look into categories, including class extensions, looking
     // for a user declared instance method.
-    for (ObjCInterfaceDecl::visible_categories_iterator
-         Cat = ID->visible_categories_begin(),
-         CatEnd = ID->visible_categories_end();
-         Cat != CatEnd;
-         ++Cat) {
+    for (const auto *Cat : ID->visible_categories()) {
       if (ObjCMethodDecl *MD = Cat->getInstanceMethod(Sel))
         if (!MD->isImplicit())
           return true;
@@ -125,8 +121,7 @@
       // Also search through the categories looking for a 'readwrite' declaration
       // of this property. If one found, presumably a setter will be provided
       // (properties declared in categories will not get auto-synthesized).
-      for (ObjCContainerDecl::prop_iterator P = Cat->prop_begin(),
-           E = Cat->prop_end(); P != E; ++P)
+      for (const auto *P : Cat->properties())
         if (P->getIdentifier() == Property->getIdentifier()) {
           if (P->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite)
             return true;
@@ -135,13 +130,10 @@
     }
     
     // Also look into protocols, for a user declared instance method.
-    for (ObjCInterfaceDecl::all_protocol_iterator P =
-         ID->all_referenced_protocol_begin(),
-         PE = ID->all_referenced_protocol_end(); P != PE; ++P) {
-      ObjCProtocolDecl *Proto = (*P);
+    for (const auto *Proto : ID->all_referenced_protocols())
       if (Proto->HasUserDeclaredSetterMethod(Property))
         return true;
-    }
+
     // And in its super class.
     ObjCInterfaceDecl *OSC = ID->getSuperClass();
     while (OSC) {
@@ -151,11 +143,9 @@
     }
   }
   if (const ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(this))
-    for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
-         E = PD->protocol_end(); PI != E; ++PI) {
-      if ((*PI)->HasUserDeclaredSetterMethod(Property))
+    for (const auto *PI : PD->protocols())
+      if (PI->HasUserDeclaredSetterMethod(Property))
         return true;
-    }
   return false;
 }
 
@@ -209,29 +199,23 @@
       break;
     case Decl::ObjCProtocol: {
       const ObjCProtocolDecl *PID = cast<ObjCProtocolDecl>(this);
-      for (ObjCProtocolDecl::protocol_iterator I = PID->protocol_begin(),
-           E = PID->protocol_end(); I != E; ++I)
-        if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+      for (const auto *I : PID->protocols())
+        if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
           return P;
       break;
     }
     case Decl::ObjCInterface: {
       const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this);
       // Look through categories (but not extensions).
-      for (ObjCInterfaceDecl::visible_categories_iterator
-             Cat = OID->visible_categories_begin(),
-             CatEnd = OID->visible_categories_end();
-           Cat != CatEnd; ++Cat) {
+      for (const auto *Cat : OID->visible_categories()) {
         if (!Cat->IsClassExtension())
           if (ObjCPropertyDecl *P = Cat->FindPropertyDeclaration(PropertyId))
             return P;
       }
 
       // Look through protocols.
-      for (ObjCInterfaceDecl::all_protocol_iterator
-            I = OID->all_referenced_protocol_begin(),
-            E = OID->all_referenced_protocol_end(); I != E; ++I)
-        if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+      for (const auto *I : OID->all_referenced_protocols())
+        if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
           return P;
 
       // Finally, check the super class.
@@ -243,11 +227,9 @@
       const ObjCCategoryDecl *OCD = cast<ObjCCategoryDecl>(this);
       // Look through protocols.
       if (!OCD->IsClassExtension())
-        for (ObjCCategoryDecl::protocol_iterator
-              I = OCD->protocol_begin(), E = OCD->protocol_end(); I != E; ++I)
-        if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
-          return P;
-
+        for (const auto *I : OCD->protocols())
+          if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
+            return P;
       break;
     }
   }
@@ -275,10 +257,8 @@
     return PD;
 
   // Look through protocols.
-  for (ObjCInterfaceDecl::all_protocol_iterator
-        I = all_referenced_protocol_begin(),
-        E = all_referenced_protocol_end(); I != E; ++I)
-    if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+  for (const auto *I : all_referenced_protocols())
+    if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
       return P;
 
   return 0;
@@ -286,16 +266,12 @@
 
 void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM,
                                                      PropertyDeclOrder &PO) const {
-  for (ObjCContainerDecl::prop_iterator P = prop_begin(),
-      E = prop_end(); P != E; ++P) {
-    ObjCPropertyDecl *Prop = *P;
+  for (auto *Prop : properties()) {
     PM[Prop->getIdentifier()] = Prop;
     PO.push_back(Prop);
   }
-  for (ObjCInterfaceDecl::all_protocol_iterator
-      PI = all_referenced_protocol_begin(),
-      E = all_referenced_protocol_end(); PI != E; ++PI)
-    (*PI)->collectPropertiesToImplement(PM, PO);
+  for (const auto *PI : all_referenced_protocols())
+    PI->collectPropertiesToImplement(PM, PO);
   // Note, the properties declared only in class extensions are still copied
   // into the main @interface's property list, and therefore we don't
   // explicitly, have to search class extension properties.
@@ -341,10 +317,7 @@
   for (unsigned i = 0; i < ExtNum; i++) {
     bool protocolExists = false;
     ObjCProtocolDecl *ProtoInExtension = ExtList[i];
-    for (all_protocol_iterator
-          p = all_referenced_protocol_begin(),
-          e = all_referenced_protocol_end(); p != e; ++p) {
-      ObjCProtocolDecl *Proto = (*p);
+    for (auto *Proto : all_referenced_protocols()) {
       if (C.ProtocolCompatibleWithProtocol(ProtoInExtension, Proto)) {
         protocolExists = true;
         break;
@@ -360,14 +333,116 @@
     return;
 
   // Merge ProtocolRefs into class's protocol list;
-  for (all_protocol_iterator p = all_referenced_protocol_begin(), 
-        e = all_referenced_protocol_end(); p != e; ++p) {
-    ProtocolRefs.push_back(*p);
+  for (auto *P : all_referenced_protocols()) {
+    ProtocolRefs.push_back(P);
   }
 
   data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C);
 }
 
+const ObjCInterfaceDecl *
+ObjCInterfaceDecl::findInterfaceWithDesignatedInitializers() const {
+  const ObjCInterfaceDecl *IFace = this;
+  while (IFace) {
+    if (IFace->hasDesignatedInitializers())
+      return IFace;
+    if (!IFace->inheritsDesignatedInitializers())
+      break;
+    IFace = IFace->getSuperClass();
+  }
+  return 0;
+}
+
+static bool isIntroducingInitializers(const ObjCInterfaceDecl *D) {
+  for (const auto *MD : D->instance_methods()) {
+    if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
+      return true;
+  }
+  for (const auto *Ext : D->visible_extensions()) {
+    for (const auto *MD : Ext->instance_methods()) {
+      if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
+        return true;
+    }
+  }
+  return false;
+}
+
+bool ObjCInterfaceDecl::inheritsDesignatedInitializers() const {
+  switch (data().InheritedDesignatedInitializers) {
+  case DefinitionData::IDI_Inherited:
+    return true;
+  case DefinitionData::IDI_NotInherited:
+    return false;
+  case DefinitionData::IDI_Unknown: {
+    // If the class introduced initializers we conservatively assume that we
+    // don't know if any of them is a designated initializer to avoid possible
+    // misleading warnings.
+    if (isIntroducingInitializers(this)) {
+      data().InheritedDesignatedInitializers = DefinitionData::IDI_NotInherited;
+      return false;
+    } else {
+      data().InheritedDesignatedInitializers = DefinitionData::IDI_Inherited;
+      return true;
+    }
+  }
+  }
+
+  llvm_unreachable("unexpected InheritedDesignatedInitializers value");
+}
+
+void ObjCInterfaceDecl::getDesignatedInitializers(
+    llvm::SmallVectorImpl<const ObjCMethodDecl *> &Methods) const {
+  // Check for a complete definition and recover if not so.
+  if (!isThisDeclarationADefinition())
+    return;
+  if (data().ExternallyCompleted)
+    LoadExternalDefinition();
+
+  const ObjCInterfaceDecl *IFace= findInterfaceWithDesignatedInitializers();
+  if (!IFace)
+    return;
+
+  for (const auto *MD : IFace->instance_methods())
+    if (MD->isThisDeclarationADesignatedInitializer())
+      Methods.push_back(MD);
+  for (const auto *Ext : IFace->visible_extensions()) {
+    for (const auto *MD : Ext->instance_methods())
+      if (MD->isThisDeclarationADesignatedInitializer())
+        Methods.push_back(MD);
+  }
+}
+
+bool ObjCInterfaceDecl::isDesignatedInitializer(Selector Sel,
+                                      const ObjCMethodDecl **InitMethod) const {
+  // Check for a complete definition and recover if not so.
+  if (!isThisDeclarationADefinition())
+    return false;
+  if (data().ExternallyCompleted)
+    LoadExternalDefinition();
+
+  const ObjCInterfaceDecl *IFace= findInterfaceWithDesignatedInitializers();
+  if (!IFace)
+    return false;
+
+  if (const ObjCMethodDecl *MD = IFace->getInstanceMethod(Sel)) {
+    if (MD->isThisDeclarationADesignatedInitializer()) {
+      if (InitMethod)
+        *InitMethod = MD;
+      return true;
+    }
+  }
+  for (const auto *Ext : IFace->visible_extensions()) {
+    if (const ObjCMethodDecl *MD = Ext->getInstanceMethod(Sel)) {
+      if (MD->isThisDeclarationADesignatedInitializer()) {
+        if (InitMethod)
+          *InitMethod = MD;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 void ObjCInterfaceDecl::allocateDefinitionData() {
   assert(!hasDefinition() && "ObjC class already has a definition");
   Data.setPointer(new (getASTContext()) DefinitionData());
@@ -382,9 +457,8 @@
   allocateDefinitionData();
 
   // Update all of the declarations with a pointer to the definition.
-  for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
-       RD != RDEnd; ++RD) {
-    if (*RD != this)
+  for (auto RD : redecls()) {
+    if (RD != this)
       RD->Data = Data;
   }
 }
@@ -405,10 +479,7 @@
       return I;
     }
 
-    for (ObjCInterfaceDecl::visible_extensions_iterator
-           Ext = ClassDecl->visible_extensions_begin(),
-           ExtEnd = ClassDecl->visible_extensions_end();
-         Ext != ExtEnd; ++Ext) {
+    for (const auto *Ext : ClassDecl->visible_extensions()) {
       if (ObjCIvarDecl *I = Ext->getIvarDecl(ID)) {
         clsDeclared = ClassDecl;
         return I;
@@ -443,11 +514,9 @@
 
 ObjCProtocolDecl *
 ObjCInterfaceDecl::lookupNestedProtocol(IdentifierInfo *Name) {
-  for (ObjCInterfaceDecl::all_protocol_iterator P =
-       all_referenced_protocol_begin(), PE = all_referenced_protocol_end();
-       P != PE; ++P)
-    if ((*P)->lookupProtocolNamed(Name))
-      return (*P);
+  for (auto *P : all_referenced_protocols())
+    if (P->lookupProtocolNamed(Name))
+      return P;
   ObjCInterfaceDecl *SuperClass = getSuperClass();
   return SuperClass ? SuperClass->lookupNestedProtocol(Name) : NULL;
 }
@@ -457,9 +526,11 @@
 /// When argument category "C" is specified, any implicit method found
 /// in this category is ignored.
 ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel, 
-                                     bool isInstance,
-                                     bool shallowCategoryLookup,
-                                     const ObjCCategoryDecl *C) const {
+                                                bool isInstance,
+                                                bool shallowCategoryLookup,
+                                                bool followSuper,
+                                                const ObjCCategoryDecl *C) const
+{
   // FIXME: Should make sure no callers ever do this.
   if (!hasDefinition())
     return 0;
@@ -470,24 +541,19 @@
   if (data().ExternallyCompleted)
     LoadExternalDefinition();
 
-  while (ClassDecl != NULL) {
+  while (ClassDecl) {
     if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance)))
       return MethodDecl;
 
     // Didn't find one yet - look through protocols.
-    for (ObjCInterfaceDecl::protocol_iterator I = ClassDecl->protocol_begin(),
-                                              E = ClassDecl->protocol_end();
-           I != E; ++I)
-      if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+    for (const auto *I : ClassDecl->protocols())
+      if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
         return MethodDecl;
     
     // Didn't find one yet - now look through categories.
-    for (ObjCInterfaceDecl::visible_categories_iterator
-         Cat = ClassDecl->visible_categories_begin(),
-         CatEnd = ClassDecl->visible_categories_end();
-         Cat != CatEnd; ++Cat) {
+    for (const auto *Cat : ClassDecl->visible_categories()) {
       if ((MethodDecl = Cat->getMethod(Sel, isInstance)))
-        if (C != (*Cat) || !MethodDecl->isImplicit())
+        if (C != Cat || !MethodDecl->isImplicit())
           return MethodDecl;
 
       if (!shallowCategoryLookup) {
@@ -497,11 +563,15 @@
         for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
              E = Protocols.end(); I != E; ++I)
           if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
-            if (C != (*Cat) || !MethodDecl->isImplicit())
+            if (C != Cat || !MethodDecl->isImplicit())
               return MethodDecl;
       }
     }
-  
+
+    if (!followSuper)
+      return NULL;
+
+    // Get the super class (if any).
     ClassDecl = ClassDecl->getSuperClass();
   }
   return NULL;
@@ -550,31 +620,38 @@
 // ObjCMethodDecl
 //===----------------------------------------------------------------------===//
 
-ObjCMethodDecl *ObjCMethodDecl::Create(ASTContext &C,
-                                       SourceLocation beginLoc,
-                                       SourceLocation endLoc,
-                                       Selector SelInfo, QualType T,
-                                       TypeSourceInfo *ResultTInfo,
-                                       DeclContext *contextDecl,
-                                       bool isInstance,
-                                       bool isVariadic,
-                                       bool isPropertyAccessor,
-                                       bool isImplicitlyDeclared,
-                                       bool isDefined,
-                                       ImplementationControl impControl,
-                                       bool HasRelatedResultType) {
-  return new (C) ObjCMethodDecl(beginLoc, endLoc,
-                                SelInfo, T, ResultTInfo, contextDecl,
-                                isInstance, isVariadic, isPropertyAccessor,
-                                isImplicitlyDeclared, isDefined,
-                                impControl,
-                                HasRelatedResultType);
+ObjCMethodDecl *ObjCMethodDecl::Create(
+    ASTContext &C, SourceLocation beginLoc, SourceLocation endLoc,
+    Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo,
+    DeclContext *contextDecl, bool isInstance, bool isVariadic,
+    bool isPropertyAccessor, bool isImplicitlyDeclared, bool isDefined,
+    ImplementationControl impControl, bool HasRelatedResultType) {
+  return new (C, contextDecl) ObjCMethodDecl(
+      beginLoc, endLoc, SelInfo, T, ReturnTInfo, contextDecl, isInstance,
+      isVariadic, isPropertyAccessor, isImplicitlyDeclared, isDefined,
+      impControl, HasRelatedResultType);
 }
 
 ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCMethodDecl));
-  return new (Mem) ObjCMethodDecl(SourceLocation(), SourceLocation(), 
-                                  Selector(), QualType(), 0, 0);
+  return new (C, ID) ObjCMethodDecl(SourceLocation(), SourceLocation(),
+                                    Selector(), QualType(), 0, 0);
+}
+
+bool ObjCMethodDecl::isThisDeclarationADesignatedInitializer() const {
+  return getMethodFamily() == OMF_init &&
+      hasAttr<ObjCDesignatedInitializerAttr>();
+}
+
+bool ObjCMethodDecl::isDesignatedInitializerForTheInterface(
+    const ObjCMethodDecl **InitMethod) const {
+  if (getMethodFamily() != OMF_init)
+    return false;
+  const DeclContext *DC = getDeclContext();
+  if (isa<ObjCProtocolDecl>(DC))
+    return false;
+  if (const ObjCInterfaceDecl *ID = getClassInterface())
+    return ID->isDesignatedInitializer(getSelector(), InitMethod);
+  return false;
 }
 
 Stmt *ObjCMethodDecl::getBody() const {
@@ -730,7 +807,7 @@
   // init only has a conventional meaning for an instance method, and
   // it has to return an object.
   case OMF_init:
-    if (!isInstanceMethod() || !getResultType()->isObjCObjectPointerType())
+    if (!isInstanceMethod() || !getReturnType()->isObjCObjectPointerType())
       family = OMF_None;
     break;
 
@@ -740,7 +817,7 @@
   case OMF_copy:
   case OMF_mutableCopy:
   case OMF_new:
-    if (!getResultType()->isObjCObjectPointerType())
+    if (!getReturnType()->isObjCObjectPointerType())
       family = OMF_None;
     break;
 
@@ -757,15 +834,14 @@
     break;
       
   case OMF_performSelector:
-    if (!isInstanceMethod() ||
-        !getResultType()->isObjCIdType())
+    if (!isInstanceMethod() || !getReturnType()->isObjCIdType())
       family = OMF_None;
     else {
       unsigned noParams = param_size();
       if (noParams < 1 || noParams > 3)
         family = OMF_None;
       else {
-        ObjCMethodDecl::arg_type_iterator it = arg_type_begin();
+        ObjCMethodDecl::param_type_iterator it = param_type_begin();
         QualType ArgT = (*it);
         if (!ArgT->isObjCSelType()) {
           family = OMF_None;
@@ -838,7 +914,7 @@
   setSelfDecl(self);
 
   if (selfIsConsumed)
-    self->addAttr(new (Context) NSConsumedAttr(SourceLocation(), Context));
+    self->addAttr(NSConsumedAttr::CreateImplicit(Context));
 
   if (selfIsPseudoStrong)
     self->setARCPseudoStrong(true);
@@ -855,8 +931,8 @@
     return CD->getClassInterface();
   if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(getDeclContext()))
     return IMD->getClassInterface();
-
-  assert(!isa<ObjCProtocolDecl>(getDeclContext()) && "It's a protocol method");
+  if (isa<ObjCProtocolDecl>(getDeclContext()))
+    return 0;
   llvm_unreachable("unknown method context");
 }
 
@@ -886,10 +962,8 @@
           return;
         }
 
-    for (ObjCCategoryDecl::protocol_iterator P = Category->protocol_begin(),
-                                          PEnd = Category->protocol_end();
-         P != PEnd; ++P)
-      CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+    for (const auto *P : Category->protocols())
+      CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
     return;
   }
 
@@ -906,26 +980,17 @@
     }
 
   if (const ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)){
-    for (ObjCProtocolDecl::protocol_iterator P = Protocol->protocol_begin(),
-                                          PEnd = Protocol->protocol_end();
-         P != PEnd; ++P)
-      CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+    for (const auto *P : Protocol->protocols())
+      CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
   }
 
   if (const ObjCInterfaceDecl *
         Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
-    for (ObjCInterfaceDecl::protocol_iterator P = Interface->protocol_begin(),
-                                           PEnd = Interface->protocol_end();
-         P != PEnd; ++P)
-      CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+    for (const auto *P : Interface->protocols())
+      CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
 
-    for (ObjCInterfaceDecl::known_categories_iterator
-           Cat = Interface->known_categories_begin(),
-           CatEnd = Interface->known_categories_end();
-         Cat != CatEnd; ++Cat) {
-      CollectOverriddenMethodsRecurse(*Cat, Method, Methods,
-                                      MovedToSuper);
-    }
+    for (const auto *Cat : Interface->known_categories())
+      CollectOverriddenMethodsRecurse(Cat, Method, Methods, MovedToSuper);
 
     if (const ObjCInterfaceDecl *Super = Interface->getSuperClass())
       return CollectOverriddenMethodsRecurse(Super, Method, Methods,
@@ -1016,13 +1081,11 @@
     
     bool IsGetter = (NumArgs == 0);
 
-    for (ObjCContainerDecl::prop_iterator I = Container->prop_begin(),
-                                          E = Container->prop_end();
-         I != E; ++I) {
-      Selector NextSel = IsGetter ? (*I)->getGetterName()
-                                  : (*I)->getSetterName();
+    for (const auto *I : Container->properties()) {
+      Selector NextSel = IsGetter ? I->getGetterName()
+                                  : I->getSetterName();
       if (NextSel == Sel)
-        return *I;
+        return I;
     }
 
     llvm_unreachable("Marked as a property accessor but no property found!");
@@ -1055,19 +1118,18 @@
                                              ObjCInterfaceDecl *PrevDecl,
                                              SourceLocation ClassLoc,
                                              bool isInternal){
-  ObjCInterfaceDecl *Result = new (C) ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc, 
-                                                        PrevDecl, isInternal);
+  ObjCInterfaceDecl *Result = new (C, DC)
+      ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc, PrevDecl, isInternal);
   Result->Data.setInt(!C.getLangOpts().Modules);
   C.getObjCInterfaceType(Result, PrevDecl);
   return Result;
 }
 
-ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(ASTContext &C, 
+ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(ASTContext &C,
                                                          unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCInterfaceDecl));
-  ObjCInterfaceDecl *Result = new (Mem) ObjCInterfaceDecl(0, SourceLocation(),
-                                                          0, SourceLocation(),
-                                                          0, false);
+  ObjCInterfaceDecl *Result = new (C, ID) ObjCInterfaceDecl(0, SourceLocation(),
+                                                            0, SourceLocation(),
+                                                            0, false);
   Result->Data.setInt(!C.getLangOpts().Modules);
   return Result;
 }
@@ -1103,6 +1165,23 @@
   data().ExternallyCompleted = true;
 }
 
+void ObjCInterfaceDecl::setHasDesignatedInitializers() {
+  // Check for a complete definition and recover if not so.
+  if (!isThisDeclarationADefinition())
+    return;
+  data().HasDesignatedInitializers = true;
+}
+
+bool ObjCInterfaceDecl::hasDesignatedInitializers() const {
+  // Check for a complete definition and recover if not so.
+  if (!isThisDeclarationADefinition())
+    return false;
+  if (data().ExternallyCompleted)
+    LoadExternalDefinition();
+
+  return data().HasDesignatedInitializers;
+}
+
 ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const {
   if (const ObjCInterfaceDecl *Def = getDefinition()) {
     if (data().ExternallyCompleted)
@@ -1157,10 +1236,7 @@
         curIvar->setNextIvar(*I);
     }
 
-    for (ObjCInterfaceDecl::known_extensions_iterator
-           Ext = known_extensions_begin(),
-           ExtEnd = known_extensions_end();
-         Ext != ExtEnd; ++Ext) {
+    for (const auto *Ext : known_extensions()) {
       if (!Ext->ivar_empty()) {
         ObjCCategoryDecl::ivar_iterator
           I = Ext->ivar_begin(),
@@ -1184,19 +1260,17 @@
     data().IvarListMissingImplementation = false;
     if (!ImplDecl->ivar_empty()) {
       SmallVector<SynthesizeIvarChunk, 16> layout;
-      for (ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(),
-           E = ImplDecl->ivar_end(); I != E; ++I) {
-        ObjCIvarDecl *IV = *I;
+      for (auto *IV : ImplDecl->ivars()) {
         if (IV->getSynthesize() && !IV->isInvalidDecl()) {
           layout.push_back(SynthesizeIvarChunk(
                              IV->getASTContext().getTypeSize(IV->getType()), IV));
           continue;
         }
         if (!data().IvarList)
-          data().IvarList = *I;
+          data().IvarList = IV;
         else
-          curIvar->setNextIvar(*I);
-        curIvar = *I;
+          curIvar->setNextIvar(IV);
+        curIvar = IV;
       }
       
       if (!layout.empty()) {
@@ -1228,23 +1302,16 @@
   if (data().ExternallyCompleted)
     LoadExternalDefinition();
 
-  for (visible_categories_iterator Cat = visible_categories_begin(),
-                                   CatEnd = visible_categories_end();
-       Cat != CatEnd;
-       ++Cat) {
+  for (auto *Cat : visible_categories())
     if (Cat->getIdentifier() == CategoryId)
-      return *Cat;
-  }
+      return Cat;
   
   return 0;
 }
 
 ObjCMethodDecl *
 ObjCInterfaceDecl::getCategoryInstanceMethod(Selector Sel) const {
-  for (visible_categories_iterator Cat = visible_categories_begin(),
-                                   CatEnd = visible_categories_end();
-       Cat != CatEnd;
-       ++Cat) {
+  for (const auto *Cat : visible_categories()) {
     if (ObjCCategoryImplDecl *Impl = Cat->getImplementation())
       if (ObjCMethodDecl *MD = Impl->getInstanceMethod(Sel))
         return MD;
@@ -1254,10 +1321,7 @@
 }
 
 ObjCMethodDecl *ObjCInterfaceDecl::getCategoryClassMethod(Selector Sel) const {
-  for (visible_categories_iterator Cat = visible_categories_begin(),
-                                   CatEnd = visible_categories_end();
-       Cat != CatEnd;
-       ++Cat) {
+  for (const auto *Cat : visible_categories()) {
     if (ObjCCategoryImplDecl *Impl = Cat->getImplementation())
       if (ObjCMethodDecl *MD = Impl->getClassMethod(Sel))
         return MD;
@@ -1277,9 +1341,8 @@
   
   ObjCInterfaceDecl *IDecl = this;
   // 1st, look up the class.
-  for (ObjCInterfaceDecl::protocol_iterator
-        PI = IDecl->protocol_begin(), E = IDecl->protocol_end(); PI != E; ++PI){
-    if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
+  for (auto *PI : IDecl->protocols()){
+    if (getASTContext().ProtocolCompatibleWithProtocol(lProto, PI))
       return true;
     // This is dubious and is added to be compatible with gcc.  In gcc, it is
     // also allowed assigning a protocol-qualified 'id' type to a LHS object
@@ -1288,20 +1351,15 @@
     // FIXME: Treat this as an extension, and flag this as an error when GCC
     // extensions are not enabled.
     if (RHSIsQualifiedID &&
-        getASTContext().ProtocolCompatibleWithProtocol(*PI, lProto))
+        getASTContext().ProtocolCompatibleWithProtocol(PI, lProto))
       return true;
   }
 
   // 2nd, look up the category.
   if (lookupCategory)
-    for (visible_categories_iterator Cat = visible_categories_begin(),
-                                     CatEnd = visible_categories_end();
-         Cat != CatEnd;
-         ++Cat) {
-      for (ObjCCategoryDecl::protocol_iterator PI = Cat->protocol_begin(),
-                                               E = Cat->protocol_end();
-           PI != E; ++PI)
-        if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
+    for (const auto *Cat : visible_categories()) {
+      for (auto *PI : Cat->protocols())
+        if (getASTContext().ProtocolCompatibleWithProtocol(lProto, PI))
           return true;
     }
 
@@ -1325,8 +1383,7 @@
                                    SourceLocation IdLoc, IdentifierInfo *Id,
                                    QualType T, TypeSourceInfo *TInfo,
                                    AccessControl ac, Expr *BW,
-                                   bool synthesized,
-                                   bool backingIvarReferencedInAccessor) {
+                                   bool synthesized) {
   if (DC) {
     // Ivar's can only appear in interfaces, implementations (via synthesized
     // properties), and class extensions (via direct declaration, or synthesized
@@ -1353,14 +1410,13 @@
     ID->setIvarList(0);
   }
 
-  return new (C) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo,
-                              ac, BW, synthesized, backingIvarReferencedInAccessor);
+  return new (C, DC) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo, ac, BW,
+                                  synthesized);
 }
 
 ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCIvarDecl));
-  return new (Mem) ObjCIvarDecl(0, SourceLocation(), SourceLocation(), 0,
-                                QualType(), 0, ObjCIvarDecl::None, 0, false, false);
+  return new (C, ID) ObjCIvarDecl(0, SourceLocation(), SourceLocation(), 0,
+                                  QualType(), 0, ObjCIvarDecl::None, 0, false);
 }
 
 const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
@@ -1397,14 +1453,13 @@
 *ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC,
                              SourceLocation StartLoc,  SourceLocation IdLoc,
                              IdentifierInfo *Id, QualType T, Expr *BW) {
-  return new (C) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
+  return new (C, DC) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
 }
 
-ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C, 
+ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
                                                              unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCAtDefsFieldDecl));
-  return new (Mem) ObjCAtDefsFieldDecl(0, SourceLocation(), SourceLocation(),
-                                       0, QualType(), 0);
+  return new (C, ID) ObjCAtDefsFieldDecl(0, SourceLocation(), SourceLocation(),
+                                         0, QualType(), 0);
 }
 
 //===----------------------------------------------------------------------===//
@@ -1429,17 +1484,16 @@
                                            SourceLocation nameLoc,
                                            SourceLocation atStartLoc,
                                            ObjCProtocolDecl *PrevDecl) {
-  ObjCProtocolDecl *Result 
-    = new (C) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc, PrevDecl);
+  ObjCProtocolDecl *Result =
+      new (C, DC) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc, PrevDecl);
   Result->Data.setInt(!C.getLangOpts().Modules);
   return Result;
 }
 
-ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C, 
+ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
                                                        unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCProtocolDecl));
-  ObjCProtocolDecl *Result = new (Mem) ObjCProtocolDecl(0, 0, SourceLocation(),
-                                                        SourceLocation(), 0);
+  ObjCProtocolDecl *Result =
+      new (C, ID) ObjCProtocolDecl(0, 0, SourceLocation(), SourceLocation(), 0);
   Result->Data.setInt(!C.getLangOpts().Modules);
   return Result;
 }
@@ -1450,8 +1504,8 @@
   if (Name == getIdentifier())
     return PDecl;
 
-  for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
-    if ((PDecl = (*I)->lookupProtocolNamed(Name)))
+  for (auto *I : protocols())
+    if ((PDecl = I->lookupProtocolNamed(Name)))
       return PDecl;
 
   return NULL;
@@ -1472,8 +1526,8 @@
   if ((MethodDecl = getMethod(Sel, isInstance)))
     return MethodDecl;
 
-  for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
-    if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+  for (const auto *I : protocols())
+    if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
       return MethodDecl;
   return NULL;
 }
@@ -1488,8 +1542,7 @@
   allocateDefinitionData();
   
   // Update all of the declarations with a pointer to the definition.
-  for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
-       RD != RDEnd; ++RD)
+  for (auto RD : redecls())
     RD->Data = this->Data;
 }
 
@@ -1497,17 +1550,14 @@
                                                     PropertyDeclOrder &PO) const {
   
   if (const ObjCProtocolDecl *PDecl = getDefinition()) {
-    for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
-         E = PDecl->prop_end(); P != E; ++P) {
-      ObjCPropertyDecl *Prop = *P;
+    for (auto *Prop : PDecl->properties()) {
       // Insert into PM if not there already.
       PM.insert(std::make_pair(Prop->getIdentifier(), Prop));
       PO.push_back(Prop);
     }
     // Scan through protocol's protocols.
-    for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
-         E = PDecl->protocol_end(); PI != E; ++PI)
-      (*PI)->collectPropertiesToImplement(PM, PO);
+    for (const auto *PI : PDecl->protocols())
+      PI->collectPropertiesToImplement(PM, PO);
   }
 }
 
@@ -1517,9 +1567,7 @@
                                                 ProtocolPropertyMap &PM) const {
   if (const ObjCProtocolDecl *PDecl = getDefinition()) {
     bool MatchFound = false;
-    for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
-         E = PDecl->prop_end(); P != E; ++P) {
-      ObjCPropertyDecl *Prop = *P;
+    for (auto *Prop : PDecl->properties()) {
       if (Prop == Property)
         continue;
       if (Prop->getIdentifier() == Property->getIdentifier()) {
@@ -1530,9 +1578,8 @@
     }
     // Scan through protocol's protocols which did not have a matching property.
     if (!MatchFound)
-      for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
-           E = PDecl->protocol_end(); PI != E; ++PI)
-        (*PI)->collectInheritedProtocolProperties(Property, PM);
+      for (const auto *PI : PDecl->protocols())
+        PI->collectInheritedProtocolProperties(Property, PM);
   }
 }
 
@@ -1543,17 +1590,16 @@
 void ObjCCategoryDecl::anchor() { }
 
 ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
-                                           SourceLocation AtLoc, 
+                                           SourceLocation AtLoc,
                                            SourceLocation ClassNameLoc,
                                            SourceLocation CategoryNameLoc,
                                            IdentifierInfo *Id,
                                            ObjCInterfaceDecl *IDecl,
                                            SourceLocation IvarLBraceLoc,
                                            SourceLocation IvarRBraceLoc) {
-  ObjCCategoryDecl *CatDecl = new (C) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc,
-                                                       CategoryNameLoc, Id,
-                                                       IDecl,
-                                                       IvarLBraceLoc, IvarRBraceLoc);
+  ObjCCategoryDecl *CatDecl =
+      new (C, DC) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, CategoryNameLoc, Id,
+                                   IDecl, IvarLBraceLoc, IvarRBraceLoc);
   if (IDecl) {
     // Link this category into its class's category list.
     CatDecl->NextClassCategory = IDecl->getCategoryListRaw();
@@ -1567,11 +1613,10 @@
   return CatDecl;
 }
 
-ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C, 
+ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
                                                        unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryDecl));
-  return new (Mem) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(),
-                                    SourceLocation(), 0, 0);
+  return new (C, ID) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(),
+                                      SourceLocation(), 0, 0);
 }
 
 ObjCCategoryImplDecl *ObjCCategoryDecl::getImplementation() const {
@@ -1599,15 +1644,14 @@
                              SourceLocation CategoryNameLoc) {
   if (ClassInterface && ClassInterface->hasDefinition())
     ClassInterface = ClassInterface->getDefinition();
-  return new (C) ObjCCategoryImplDecl(DC, Id, ClassInterface,
-                                      nameLoc, atStartLoc, CategoryNameLoc);
+  return new (C, DC) ObjCCategoryImplDecl(DC, Id, ClassInterface, nameLoc,
+                                          atStartLoc, CategoryNameLoc);
 }
 
 ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C, 
                                                                unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryImplDecl));
-  return new (Mem) ObjCCategoryImplDecl(0, 0, 0, SourceLocation(), 
-                                        SourceLocation(), SourceLocation());
+  return new (C, ID) ObjCCategoryImplDecl(0, 0, 0, SourceLocation(),
+                                          SourceLocation(), SourceLocation());
 }
 
 ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const {
@@ -1649,12 +1693,10 @@
 ///
 ObjCPropertyImplDecl *ObjCImplDecl::
 FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const {
-  for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){
-    ObjCPropertyImplDecl *PID = *i;
+  for (auto *PID : property_impls())
     if (PID->getPropertyIvarDecl() &&
         PID->getPropertyIvarDecl()->getIdentifier() == ivarId)
       return PID;
-  }
   return 0;
 }
 
@@ -1664,11 +1706,9 @@
 ///
 ObjCPropertyImplDecl *ObjCImplDecl::
 FindPropertyImplDecl(IdentifierInfo *Id) const {
-  for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){
-    ObjCPropertyImplDecl *PID = *i;
+  for (auto *PID : property_impls())
     if (PID->getPropertyDecl()->getIdentifier() == Id)
       return PID;
-  }
   return 0;
 }
 
@@ -1695,16 +1735,15 @@
                                SourceLocation IvarRBraceLoc) {
   if (ClassInterface && ClassInterface->hasDefinition())
     ClassInterface = ClassInterface->getDefinition();
-  return new (C) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
-                                        nameLoc, atStartLoc, superLoc,
-                                        IvarLBraceLoc, IvarRBraceLoc);
+  return new (C, DC) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
+                                            nameLoc, atStartLoc, superLoc,
+                                            IvarLBraceLoc, IvarRBraceLoc);
 }
 
 ObjCImplementationDecl *
 ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCImplementationDecl));
-  return new (Mem) ObjCImplementationDecl(0, 0, 0, SourceLocation(), 
-                                          SourceLocation());
+  return new (C, ID) ObjCImplementationDecl(0, 0, 0, SourceLocation(),
+                                            SourceLocation());
 }
 
 void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
@@ -1737,13 +1776,12 @@
                                 SourceLocation L,
                                 IdentifierInfo *Id,
                                 ObjCInterfaceDecl* AliasedClass) {
-  return new (C) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
+  return new (C, DC) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
 }
 
 ObjCCompatibleAliasDecl *
 ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCompatibleAliasDecl));
-  return new (Mem) ObjCCompatibleAliasDecl(0, SourceLocation(), 0, 0);
+  return new (C, ID) ObjCCompatibleAliasDecl(0, SourceLocation(), 0, 0);
 }
 
 //===----------------------------------------------------------------------===//
@@ -1759,15 +1797,13 @@
                                            SourceLocation LParenLoc,
                                            TypeSourceInfo *T,
                                            PropertyControl propControl) {
-  return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T);
+  return new (C, DC) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T);
 }
 
-ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C, 
+ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
                                                        unsigned ID) {
-  void * Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyDecl));
-  return new (Mem) ObjCPropertyDecl(0, SourceLocation(), 0, SourceLocation(),
-                                    SourceLocation(),
-                                    0);
+  return new (C, ID) ObjCPropertyDecl(0, SourceLocation(), 0, SourceLocation(),
+                                      SourceLocation(), 0);
 }
 
 //===----------------------------------------------------------------------===//
@@ -1782,15 +1818,14 @@
                                                    Kind PK,
                                                    ObjCIvarDecl *ivar,
                                                    SourceLocation ivarLoc) {
-  return new (C) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
-                                      ivarLoc);
+  return new (C, DC) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
+                                          ivarLoc);
 }
 
-ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C, 
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
                                                                unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyImplDecl));
-  return new (Mem) ObjCPropertyImplDecl(0, SourceLocation(), SourceLocation(),
-                                        0, Dynamic, 0, SourceLocation());
+  return new (C, ID) ObjCPropertyImplDecl(0, SourceLocation(), SourceLocation(),
+                                          0, Dynamic, 0, SourceLocation());
 }
 
 SourceRange ObjCPropertyImplDecl::getSourceRange() const {
diff --git a/lib/AST/DeclOpenMP.cpp b/lib/AST/DeclOpenMP.cpp
index 0d195f7..37d4ae2 100644
--- a/lib/AST/DeclOpenMP.cpp
+++ b/lib/AST/DeclOpenMP.cpp
@@ -12,8 +12,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclBase.h"
 #include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
 #include "clang/AST/DeclOpenMP.h"
 #include "clang/AST/Expr.h"
 
@@ -29,12 +29,8 @@
                                                    DeclContext *DC,
                                                    SourceLocation L,
                                                    ArrayRef<Expr *> VL) {
-  unsigned Size = sizeof(OMPThreadPrivateDecl) +
-                  (VL.size() * sizeof(Expr *));
-
-  void *Mem = C.Allocate(Size, llvm::alignOf<OMPThreadPrivateDecl>());
-  OMPThreadPrivateDecl *D = new (Mem) OMPThreadPrivateDecl(OMPThreadPrivate,
-                                                           DC, L);
+  OMPThreadPrivateDecl *D = new (C, DC, VL.size() * sizeof(Expr *))
+      OMPThreadPrivateDecl(OMPThreadPrivate, DC, L);
   D->NumVars = VL.size();
   D->setVars(VL);
   return D;
@@ -43,11 +39,8 @@
 OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C,
                                                                unsigned ID,
                                                                unsigned N) {
-  unsigned Size = sizeof(OMPThreadPrivateDecl) + (N * sizeof(Expr *));
-
-  void *Mem = AllocateDeserializedDecl(C, ID, Size);
-  OMPThreadPrivateDecl *D = new (Mem) OMPThreadPrivateDecl(OMPThreadPrivate,
-                                                           0, SourceLocation());
+  OMPThreadPrivateDecl *D = new (C, ID, N * sizeof(Expr *))
+      OMPThreadPrivateDecl(OMPThreadPrivate, 0, SourceLocation());
   D->NumVars = N;
   return D;
 }
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 767f662..a57532c 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -114,7 +114,7 @@
     else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
       BaseType = ATy->getElementType();
     else if (const FunctionType* FTy = BaseType->getAs<FunctionType>())
-      BaseType = FTy->getResultType();
+      BaseType = FTy->getReturnType();
     else if (const VectorType *VTy = BaseType->getAs<VectorType>())
       BaseType = VTy->getElementType();
     else if (const ReferenceType *RTy = BaseType->getAs<ReferenceType>())
@@ -167,7 +167,7 @@
   }
 }
 
-void DeclContext::dumpDeclContext() const {
+LLVM_DUMP_METHOD void DeclContext::dumpDeclContext() const {
   // Get the translation unit
   const DeclContext *DC = this;
   while (!DC->isTranslationUnit())
@@ -238,17 +238,6 @@
     if (D->isImplicit())
       continue;
 
-    // FIXME: Ugly hack so we don't pretty-print the builtin declaration
-    // of __builtin_va_list or __[u]int128_t.  There should be some other way
-    // to check that.
-    if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) {
-      if (IdentifierInfo *II = ND->getIdentifier()) {
-        if (II->isStr("__builtin_va_list") ||
-            II->isStr("__int128_t") || II->isStr("__uint128_t"))
-          continue;
-      }
-    }
-
     // The next bits of code handles stuff like "struct {int x;} a,b"; we're
     // forced to merge the declarations because there's no other way to
     // refer to the struct in question.  This limited merging is safe without
@@ -396,6 +385,7 @@
 
 void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
   CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
+  CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
   if (!Policy.SuppressSpecifiers) {
     switch (D->getStorageClass()) {
     case SC_None: break;
@@ -409,7 +399,9 @@
     if (D->isInlineSpecified())  Out << "inline ";
     if (D->isVirtualAsWritten()) Out << "virtual ";
     if (D->isModulePrivate())    Out << "__module_private__ ";
-    if (CDecl && CDecl->isExplicitSpecified())
+    if (D->isConstexpr() && !D->isExplicitlyDefaulted()) Out << "constexpr ";
+    if ((CDecl && CDecl->isExplicitSpecified()) ||
+        (ConversionDecl && ConversionDecl->isExplicit()))
       Out << "explicit ";
   }
 
@@ -423,8 +415,7 @@
     Ty = PT->getInnerType();
   }
 
-  if (isa<FunctionType>(Ty)) {
-    const FunctionType *AFT = Ty->getAs<FunctionType>();
+  if (const FunctionType *AFT = Ty->getAs<FunctionType>()) {
     const FunctionProtoType *FT = 0;
     if (D->hasWrittenPrototype())
       FT = dyn_cast<FunctionProtoType>(AFT);
@@ -459,6 +450,17 @@
         Proto += " volatile";
       if (FT->isRestrict())
         Proto += " restrict";
+
+      switch (FT->getRefQualifier()) {
+      case RQ_None:
+        break;
+      case RQ_LValue:
+        Proto += " &";
+        break;
+      case RQ_RValue:
+        Proto += " &&";
+        break;
+      }
     }
 
     if (FT && FT->hasDynamicExceptionSpec()) {
@@ -488,10 +490,7 @@
 
     if (CDecl) {
       bool HasInitializerList = false;
-      for (CXXConstructorDecl::init_const_iterator B = CDecl->init_begin(),
-           E = CDecl->init_end();
-           B != E; ++B) {
-        CXXCtorInitializer *BMInitializer = (*B);
+      for (const auto *BMInitializer : CDecl->inits()) {
         if (BMInitializer->isInClassMemberInitializer())
           continue;
 
@@ -547,16 +546,18 @@
           }
         }
         Out << ")";
+        if (BMInitializer->isPackExpansion())
+          Out << "...";
       }
-      if (!Proto.empty())
-        Out << Proto;
-    } else {
+    } else if (!ConversionDecl && !isa<CXXDestructorDecl>(D)) {
       if (FT && FT->hasTrailingReturn()) {
         Out << "auto " << Proto << " -> ";
         Proto.clear();
       }
-      AFT->getResultType().print(Out, Policy, Proto);
+      AFT->getReturnType().print(Out, Policy, Proto);
+      Proto.clear();
     }
+    Out << Proto;
   } else {
     Ty.print(Out, Policy, Proto);
   }
@@ -884,10 +885,9 @@
 void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
   if (PrintInstantiation) {
     TemplateParameterList *Params = D->getTemplateParameters();
-    for (FunctionTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end();
-         I != E; ++I) {
-      PrintTemplateParameters(Params, (*I)->getTemplateSpecializationArgs());
-      Visit(*I);
+    for (auto *I : D->specializations()) {
+      PrintTemplateParameters(Params, I->getTemplateSpecializationArgs());
+      Visit(I);
     }
   }
 
@@ -897,10 +897,9 @@
 void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
   if (PrintInstantiation) {
     TemplateParameterList *Params = D->getTemplateParameters();
-    for (ClassTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end();
-         I != E; ++I) {
-      PrintTemplateParameters(Params, &(*I)->getTemplateArgs());
-      Visit(*I);
+    for (auto *I : D->specializations()) {
+      PrintTemplateParameters(Params, &I->getTemplateArgs());
+      Visit(I);
       Out << '\n';
     }
   }
@@ -917,19 +916,19 @@
     Out << "- ";
   else
     Out << "+ ";
-  if (!OMD->getResultType().isNull())
-    Out << '(' << OMD->getASTContext().getUnqualifiedObjCPointerType(OMD->getResultType()).
-                    getAsString(Policy) << ")";
+  if (!OMD->getReturnType().isNull())
+    Out << '(' << OMD->getASTContext()
+                      .getUnqualifiedObjCPointerType(OMD->getReturnType())
+                      .getAsString(Policy) << ")";
 
   std::string name = OMD->getSelector().getAsString();
   std::string::size_type pos, lastPos = 0;
-  for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
-       E = OMD->param_end(); PI != E; ++PI) {
+  for (const auto *PI : OMD->params()) {
     // FIXME: selector is missing here!
     pos = name.find_first_of(':', lastPos);
     Out << " " << name.substr(lastPos, pos - lastPos);
-    Out << ":(" << (*PI)->getASTContext().getUnqualifiedObjCPointerType((*PI)->getType()).
-                      getAsString(Policy) << ')' << **PI;
+    Out << ":(" << PI->getASTContext().getUnqualifiedObjCPointerType(PI->getType()).
+                      getAsString(Policy) << ')' << *PI;
     lastPos = pos + 1;
   }
 
@@ -960,10 +959,9 @@
   if (OID->ivar_size() > 0) {
     Out << "{\n";
     Indentation += Policy.Indentation;
-    for (ObjCImplementationDecl::ivar_iterator I = OID->ivar_begin(),
-         E = OID->ivar_end(); I != E; ++I) {
+    for (const auto *I : OID->ivars()) {
       Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
-                    getAsString(Policy) << ' ' << **I << ";\n";
+                    getAsString(Policy) << ' ' << *I << ";\n";
     }
     Indentation -= Policy.Indentation;
     Out << "}\n";
@@ -999,10 +997,10 @@
     Out << "{\n";
     eolnOut = true;
     Indentation += Policy.Indentation;
-    for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
-         E = OID->ivar_end(); I != E; ++I) {
-      Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
-                    getAsString(Policy) << ' ' << **I << ";\n";
+    for (const auto *I : OID->ivars()) {
+      Indent() << I->getASTContext()
+                      .getUnqualifiedObjCPointerType(I->getType())
+                      .getAsString(Policy) << ' ' << *I << ";\n";
     }
     Indentation -= Policy.Indentation;
     Out << "}\n";
@@ -1051,11 +1049,9 @@
   if (PID->ivar_size() > 0) {
     Out << "{\n";
     Indentation += Policy.Indentation;
-    for (ObjCCategoryDecl::ivar_iterator I = PID->ivar_begin(),
-         E = PID->ivar_end(); I != E; ++I) {
+    for (const auto *I : PID->ivars())
       Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
-                    getAsString(Policy) << ' ' << **I << ";\n";
-    }
+                    getAsString(Policy) << ' ' << *I << ";\n";
     Indentation -= Policy.Indentation;
     Out << "}\n";
   }
@@ -1090,13 +1086,13 @@
     }
 
     if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
-      Out << (first ? ' ' : ',') << "getter = "
-          << PDecl->getGetterName().getAsString();
+      Out << (first ? ' ' : ',') << "getter = ";
+      PDecl->getGetterName().print(Out);
       first = false;
     }
     if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
-      Out << (first ? ' ' : ',') << "setter = "
-          << PDecl->getSetterName().getAsString();
+      Out << (first ? ' ' : ',') << "setter = ";
+      PDecl->getSetterName().print(Out);
       first = false;
     }
 
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index 7172fb7..fc73e6f 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -229,14 +229,13 @@
                                                TemplateParameterList *Params,
                                                    NamedDecl *Decl) {
   AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
-  return new (C) FunctionTemplateDecl(DC, L, Name, Params, Decl);
+  return new (C, DC) FunctionTemplateDecl(DC, L, Name, Params, Decl);
 }
 
 FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
                                                                unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionTemplateDecl));
-  return new (Mem) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(),
-                                        0, 0);
+  return new (C, ID) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(),
+                                          0, 0);
 }
 
 RedeclarableTemplateDecl::CommonBase *
@@ -308,15 +307,15 @@
                                              NamedDecl *Decl,
                                              ClassTemplateDecl *PrevDecl) {
   AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
-  ClassTemplateDecl *New = new (C) ClassTemplateDecl(DC, L, Name, Params, Decl);
+  ClassTemplateDecl *New =
+      new (C, DC) ClassTemplateDecl(DC, L, Name, Params, Decl);
   New->setPreviousDecl(PrevDecl);
   return New;
 }
 
 ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C, 
                                                          unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ClassTemplateDecl));
-  return new (Mem) ClassTemplateDecl(EmptyShell());
+  return new (C, ID) ClassTemplateDecl(EmptyShell());
 }
 
 void ClassTemplateDecl::LoadLazySpecializations() const {
@@ -471,7 +470,7 @@
                              unsigned D, unsigned P, IdentifierInfo *Id,
                              bool Typename, bool ParameterPack) {
   TemplateTypeParmDecl *TTPDecl =
-    new (C) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
+    new (C, DC) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
   QualType TTPType = C.getTemplateTypeParmType(D, P, ParameterPack, TTPDecl);
   TTPDecl->TypeForDecl = TTPType.getTypePtr();
   return TTPDecl;
@@ -479,9 +478,8 @@
 
 TemplateTypeParmDecl *
 TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTypeParmDecl));
-  return new (Mem) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(),
-                                        0, false);
+  return new (C, ID) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(),
+                                          0, false);
 }
 
 SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
@@ -544,8 +542,8 @@
                                 unsigned D, unsigned P, IdentifierInfo *Id,
                                 QualType T, bool ParameterPack,
                                 TypeSourceInfo *TInfo) {
-  return new (C) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
-                                         T, ParameterPack, TInfo);
+  return new (C, DC) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
+                                             T, ParameterPack, TInfo);
 }
 
 NonTypeTemplateParmDecl *
@@ -557,34 +555,26 @@
                                 const QualType *ExpandedTypes, 
                                 unsigned NumExpandedTypes,
                                 TypeSourceInfo **ExpandedTInfos) {
-  unsigned Size = sizeof(NonTypeTemplateParmDecl) 
-                + NumExpandedTypes * 2 * sizeof(void*);
-  void *Mem = C.Allocate(Size);
-  return new (Mem) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc,
-                                           D, P, Id, T, TInfo,
-                                           ExpandedTypes, NumExpandedTypes, 
-                                           ExpandedTInfos);
+  unsigned Extra = NumExpandedTypes * 2 * sizeof(void*);
+  return new (C, DC, Extra) NonTypeTemplateParmDecl(
+      DC, StartLoc, IdLoc, D, P, Id, T, TInfo,
+      ExpandedTypes, NumExpandedTypes, ExpandedTInfos);
 }
 
 NonTypeTemplateParmDecl *
 NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NonTypeTemplateParmDecl));
-  return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(), 
-                                           SourceLocation(), 0, 0, 0, 
-                                           QualType(), false, 0);
+  return new (C, ID) NonTypeTemplateParmDecl(0, SourceLocation(),
+                                             SourceLocation(), 0, 0, 0,
+                                             QualType(), false, 0);
 }
 
 NonTypeTemplateParmDecl *
 NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
                                             unsigned NumExpandedTypes) {
-  unsigned Size = sizeof(NonTypeTemplateParmDecl) 
-                + NumExpandedTypes * 2 * sizeof(void*);
-  
-  void *Mem = AllocateDeserializedDecl(C, ID, Size);
-  return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(), 
-                                           SourceLocation(), 0, 0, 0,
-                                           QualType(), 0, 0, NumExpandedTypes,
-                                           0);
+  unsigned Extra = NumExpandedTypes * 2 * sizeof(void*);
+  return new (C, ID, Extra) NonTypeTemplateParmDecl(
+      0, SourceLocation(), SourceLocation(), 0, 0, 0, QualType(), 0,
+      0, NumExpandedTypes, 0);
 }
 
 SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
@@ -624,8 +614,8 @@
                                  SourceLocation L, unsigned D, unsigned P,
                                  bool ParameterPack, IdentifierInfo *Id,
                                  TemplateParameterList *Params) {
-  return new (C) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id, 
-                                          Params);
+  return new (C, DC) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
+                                              Params);
 }
 
 TemplateTemplateParmDecl *
@@ -634,28 +624,23 @@
                                  IdentifierInfo *Id,
                                  TemplateParameterList *Params,
                                  ArrayRef<TemplateParameterList *> Expansions) {
-  void *Mem = C.Allocate(sizeof(TemplateTemplateParmDecl) +
-                         sizeof(TemplateParameterList*) * Expansions.size());
-  return new (Mem) TemplateTemplateParmDecl(DC, L, D, P, Id, Params,
-                                            Expansions.size(),
-                                            Expansions.data());
+  return new (C, DC, sizeof(TemplateParameterList*) * Expansions.size())
+      TemplateTemplateParmDecl(DC, L, D, P, Id, Params,
+                               Expansions.size(), Expansions.data());
 }
 
 TemplateTemplateParmDecl *
 TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTemplateParmDecl));
-  return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false,
-                                            0, 0);
+  return new (C, ID) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false,
+                                              0, 0);
 }
 
 TemplateTemplateParmDecl *
 TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
                                              unsigned NumExpansions) {
-  unsigned Size = sizeof(TemplateTemplateParmDecl) +
-                  sizeof(TemplateParameterList*) * NumExpansions;
-  void *Mem = AllocateDeserializedDecl(C, ID, Size);
-  return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, 0, 0,
-                                            NumExpansions, 0);
+  return new (C, ID, sizeof(TemplateParameterList*) * NumExpansions)
+      TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, 0, 0,
+                               NumExpansions, 0);
 }
 
 //===----------------------------------------------------------------------===//
@@ -734,13 +719,10 @@
                                         const TemplateArgument *Args,
                                         unsigned NumArgs,
                                    ClassTemplateSpecializationDecl *PrevDecl) {
-  ClassTemplateSpecializationDecl *Result
-    = new (Context)ClassTemplateSpecializationDecl(Context,
-                                                   ClassTemplateSpecialization,
-                                                   TK, DC, StartLoc, IdLoc,
-                                                   SpecializedTemplate,
-                                                   Args, NumArgs,
-                                                   PrevDecl);
+  ClassTemplateSpecializationDecl *Result =
+      new (Context, DC) ClassTemplateSpecializationDecl(
+          Context, ClassTemplateSpecialization, TK, DC, StartLoc, IdLoc,
+          SpecializedTemplate, Args, NumArgs, PrevDecl);
   Result->MayHaveOutOfDateDef = false;
 
   Context.getTypeDeclType(Result, PrevDecl);
@@ -748,12 +730,10 @@
 }
 
 ClassTemplateSpecializationDecl *
-ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, 
+ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
                                                     unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, 
-                                       sizeof(ClassTemplateSpecializationDecl));
   ClassTemplateSpecializationDecl *Result =
-    new (Mem) ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
+    new (C, ID) ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
   Result->MayHaveOutOfDateDef = false;
   return Result;
 }
@@ -855,14 +835,10 @@
   const ASTTemplateArgumentListInfo *ASTArgInfos =
     ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
 
-  ClassTemplatePartialSpecializationDecl *Result
-    = new (Context)ClassTemplatePartialSpecializationDecl(Context, TK, DC,
-                                                          StartLoc, IdLoc,
-                                                          Params,
-                                                          SpecializedTemplate,
-                                                          Args, NumArgs,
-                                                          ASTArgInfos,
-                                                          PrevDecl);
+  ClassTemplatePartialSpecializationDecl *Result = new (Context, DC)
+      ClassTemplatePartialSpecializationDecl(Context, TK, DC, StartLoc, IdLoc,
+                                             Params, SpecializedTemplate, Args,
+                                             NumArgs, ASTArgInfos, PrevDecl);
   Result->setSpecializationKind(TSK_ExplicitSpecialization);
   Result->MayHaveOutOfDateDef = false;
 
@@ -873,10 +849,8 @@
 ClassTemplatePartialSpecializationDecl *
 ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
                                                            unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, 
-                sizeof(ClassTemplatePartialSpecializationDecl));
-  ClassTemplatePartialSpecializationDecl *Result
-    = new (Mem) ClassTemplatePartialSpecializationDecl();
+  ClassTemplatePartialSpecializationDecl *Result =
+      new (C, ID) ClassTemplatePartialSpecializationDecl();
   Result->MayHaveOutOfDateDef = false;
   return Result;
 }
@@ -894,15 +868,13 @@
                                                TemplateParameterList **Params,
                                                FriendUnion Friend,
                                                SourceLocation FLoc) {
-  FriendTemplateDecl *Result
-    = new (Context) FriendTemplateDecl(DC, L, NParams, Params, Friend, FLoc);
-  return Result;
+  return new (Context, DC) FriendTemplateDecl(DC, L, NParams, Params,
+                                              Friend, FLoc);
 }
 
 FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
                                                            unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendTemplateDecl));
-  return new (Mem) FriendTemplateDecl(EmptyShell());
+  return new (C, ID) FriendTemplateDecl(EmptyShell());
 }
 
 //===----------------------------------------------------------------------===//
@@ -916,14 +888,13 @@
                                                   TemplateParameterList *Params,
                                                      NamedDecl *Decl) {
   AdoptTemplateParameterList(Params, DC);
-  return new (C) TypeAliasTemplateDecl(DC, L, Name, Params, Decl);
+  return new (C, DC) TypeAliasTemplateDecl(DC, L, Name, Params, Decl);
 }
 
 TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
                                                                  unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasTemplateDecl));
-  return new (Mem) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(),
-                                         0, 0);
+  return new (C, ID) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(),
+                                           0, 0);
 }
 
 void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) {
@@ -945,10 +916,8 @@
 ClassScopeFunctionSpecializationDecl *
 ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
                                                          unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, 
-                sizeof(ClassScopeFunctionSpecializationDecl));
-  return new (Mem) ClassScopeFunctionSpecializationDecl(0, SourceLocation(), 0,
-                                             false, TemplateArgumentListInfo());
+  return new (C, ID) ClassScopeFunctionSpecializationDecl(
+      0, SourceLocation(), 0, false, TemplateArgumentListInfo());
 }
 
 //===----------------------------------------------------------------------===//
@@ -972,20 +941,16 @@
 VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC,
                                          SourceLocation L, DeclarationName Name,
                                          TemplateParameterList *Params,
-                                         NamedDecl *Decl,
-                                         VarTemplateDecl *PrevDecl) {
-  VarTemplateDecl *New = new (C) VarTemplateDecl(DC, L, Name, Params, Decl);
-  New->setPreviousDecl(PrevDecl);
-  return New;
+                                         VarDecl *Decl) {
+  return new (C, DC) VarTemplateDecl(DC, L, Name, Params, Decl);
 }
 
 VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C,
                                                      unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarTemplateDecl));
-  return new (Mem) VarTemplateDecl(EmptyShell());
+  return new (C, ID) VarTemplateDecl(EmptyShell());
 }
 
-// TODO: Unify accross class, function and variable templates?
+// TODO: Unify across class, function and variable templates?
 //       May require moving this and Common to RedeclarableTemplateDecl.
 void VarTemplateDecl::LoadLazySpecializations() const {
   Common *CommonPtr = getCommonPtr();
@@ -1111,20 +1076,14 @@
     SourceLocation IdLoc, VarTemplateDecl *SpecializedTemplate, QualType T,
     TypeSourceInfo *TInfo, StorageClass S, const TemplateArgument *Args,
     unsigned NumArgs) {
-  VarTemplateSpecializationDecl *Result = new (Context)
-      VarTemplateSpecializationDecl(Context, VarTemplateSpecialization, DC,
-                                    StartLoc, IdLoc, SpecializedTemplate, T,
-                                    TInfo, S, Args, NumArgs);
-  return Result;
+  return new (Context, DC) VarTemplateSpecializationDecl(
+      Context, VarTemplateSpecialization, DC, StartLoc, IdLoc,
+      SpecializedTemplate, T, TInfo, S, Args, NumArgs);
 }
 
 VarTemplateSpecializationDecl *
 VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
-  void *Mem =
-      AllocateDeserializedDecl(C, ID, sizeof(VarTemplateSpecializationDecl));
-  VarTemplateSpecializationDecl *Result =
-      new (Mem) VarTemplateSpecializationDecl(VarTemplateSpecialization);
-  return Result;
+  return new (C, ID) VarTemplateSpecializationDecl(VarTemplateSpecialization);
 }
 
 void VarTemplateSpecializationDecl::getNameForDiagnostic(
@@ -1183,7 +1142,7 @@
     = ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
 
   VarTemplatePartialSpecializationDecl *Result =
-      new (Context) VarTemplatePartialSpecializationDecl(
+      new (Context, DC) VarTemplatePartialSpecializationDecl(
           Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo,
           S, Args, NumArgs, ASTArgInfos);
   Result->setSpecializationKind(TSK_ExplicitSpecialization);
@@ -1193,9 +1152,5 @@
 VarTemplatePartialSpecializationDecl *
 VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
                                                          unsigned ID) {
-  void *Mem = AllocateDeserializedDecl(
-      C, ID, sizeof(VarTemplatePartialSpecializationDecl));
-  VarTemplatePartialSpecializationDecl *Result =
-      new (Mem) VarTemplatePartialSpecializationDecl();
-  return Result;
+  return new (C, ID) VarTemplatePartialSpecializationDecl();
 }
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index e064e23..f9041c0 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -143,13 +143,16 @@
   case DeclarationName::ObjCZeroArgSelector:
   case DeclarationName::ObjCOneArgSelector:
   case DeclarationName::ObjCMultiArgSelector:
-    return OS << N.getObjCSelector().getAsString();
+    N.getObjCSelector().print(OS);
+    return OS;
 
   case DeclarationName::CXXConstructorName: {
     QualType ClassType = N.getCXXNameType();
     if (const RecordType *ClassRec = ClassType->getAs<RecordType>())
       return OS << *ClassRec->getDecl();
-    return OS << ClassType.getAsString();
+    LangOptions LO;
+    LO.CPlusPlus = true;
+    return OS << ClassType.getAsString(PrintingPolicy(LO));
   }
 
   case DeclarationName::CXXDestructorName: {
@@ -157,7 +160,9 @@
     QualType Type = N.getCXXNameType();
     if (const RecordType *Rec = Type->getAs<RecordType>())
       return OS << *Rec->getDecl();
-    return OS << Type.getAsString();
+    LangOptions LO;
+    LO.CPlusPlus = true;
+    return OS << Type.getAsString(PrintingPolicy(LO));
   }
 
   case DeclarationName::CXXOperatorName: {
@@ -184,7 +189,10 @@
     QualType Type = N.getCXXNameType();
     if (const RecordType *Rec = Type->getAs<RecordType>())
       return OS << *Rec->getDecl();
-    return OS << Type.getAsString();
+    LangOptions LO;
+    LO.CPlusPlus = true;
+    LO.Bool = true;
+    return OS << Type.getAsString(PrintingPolicy(LO));
   }
   case DeclarationName::CXXUsingDirective:
     return OS << "<using-directive>";
@@ -537,7 +545,10 @@
         OS << '~';
       else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName)
         OS << "operator ";
-      OS << TInfo->getType().getAsString();
+      LangOptions LO;
+      LO.CPlusPlus = true;
+      LO.Bool = true;
+      OS << TInfo->getType().getAsString(PrintingPolicy(LO));
     } else
       OS << Name;
     return;
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 9055dda..2f5f14f 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -105,37 +105,6 @@
   return E;
 }
 
-const Expr *
-Expr::findMaterializedTemporary(const MaterializeTemporaryExpr *&MTE) const {
-  const Expr *E = this;
-
-  // This might be a default initializer for a reference member. Walk over the
-  // wrapper node for that.
-  if (const CXXDefaultInitExpr *DAE = dyn_cast<CXXDefaultInitExpr>(E))
-    E = DAE->getExpr();
-
-  // Look through single-element init lists that claim to be lvalues. They're
-  // just syntactic wrappers in this case.
-  if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
-    if (ILE->getNumInits() == 1 && ILE->isGLValue()) {
-      E = ILE->getInit(0);
-      if (const CXXDefaultInitExpr *DAE = dyn_cast<CXXDefaultInitExpr>(E))
-        E = DAE->getExpr();
-    }
-  }
-
-  // Look through expressions for materialized temporaries (for now).
-  if (const MaterializeTemporaryExpr *M
-      = dyn_cast<MaterializeTemporaryExpr>(E)) {
-    MTE = M;
-    E = M->GetTemporaryExpr();
-  }
-
-  if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
-    E = DAE->getExpr();
-  return E;
-}
-
 /// isKnownToHaveBooleanValue - Return true if this is an integer expression
 /// that is known to return 0 or 1.  This happens for _Bool/bool expressions
 /// but also int expressions which are produced by things like comparisons in
@@ -484,7 +453,7 @@
 
   if (IT == PredefinedExpr::FuncDName) {
     if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) {
-      OwningPtr<MangleContext> MC;
+      std::unique_ptr<MangleContext> MC;
       MC.reset(Context.createMangleContext());
 
       if (MC->shouldMangleDeclName(ND)) {
@@ -619,13 +588,15 @@
     // not a constructor or destructor.
     if ((isa<CXXMethodDecl>(FD) &&
          cast<CXXMethodDecl>(FD)->getParent()->isLambda()) ||
-        (FT && FT->getResultType()->getAs<AutoType>()))
+        (FT && FT->getReturnType()->getAs<AutoType>()))
       Proto = "auto " + Proto;
-    else if (FT && FT->getResultType()->getAs<DecltypeType>())
-      FT->getResultType()->getAs<DecltypeType>()->getUnderlyingType()
+    else if (FT && FT->getReturnType()->getAs<DecltypeType>())
+      FT->getReturnType()
+          ->getAs<DecltypeType>()
+          ->getUnderlyingType()
           .getAsStringInternal(Proto, Policy);
     else if (!isa<CXXConstructorDecl>(FD) && !isa<CXXDestructorDecl>(FD))
-      AFT->getResultType().getAsStringInternal(Proto, Policy);
+      AFT->getReturnType().getAsStringInternal(Proto, Policy);
 
     Out << Proto;
 
@@ -658,7 +629,7 @@
       Out << '(' << *CID << ')';
 
     Out <<  ' ';
-    Out << MD->getSelector().getAsString();
+    MD->getSelector().print(Out);
     Out <<  ']';
 
     Out.flush();
@@ -810,6 +781,9 @@
                                      StringKind Kind, bool Pascal, QualType Ty,
                                      const SourceLocation *Loc,
                                      unsigned NumStrs) {
+  assert(C.getAsConstantArrayType(Ty) &&
+         "StringLiteral must be of constant array type!");
+
   // Allocate enough space for the StringLiteral plus an array of locations for
   // any concatenated string tokens.
   void *Mem = C.Allocate(sizeof(StringLiteral)+
@@ -1215,9 +1189,9 @@
   this->NumArgs = NumArgs;
 }
 
-/// isBuiltinCall - If this is a call to a builtin, return the builtin ID.  If
+/// getBuiltinCallee - If this is a call to a builtin, return the builtin ID. If
 /// not, return 0.
-unsigned CallExpr::isBuiltinCall() const {
+unsigned CallExpr::getBuiltinCallee() const {
   // All simple function calls (e.g. func()) are implicitly cast to pointer to
   // function. As a result, we try and obtain the DeclRefExpr from the
   // ImplicitCastExpr.
@@ -1240,7 +1214,7 @@
 }
 
 bool CallExpr::isUnevaluatedBuiltinCall(ASTContext &Ctx) const {
-  if (unsigned BI = isBuiltinCall())
+  if (unsigned BI = getBuiltinCallee())
     return Ctx.BuiltinInfo.isUnevaluated(BI);
   return false;
 }
@@ -1256,7 +1230,7 @@
     CalleeType = Expr::findBoundMemberType(getCallee());
     
   const FunctionType *FnType = CalleeType->castAs<FunctionType>();
-  return FnType->getResultType();
+  return FnType->getReturnType();
 }
 
 SourceLocation CallExpr::getLocStart() const {
@@ -1421,7 +1395,7 @@
   return EndLoc;
 }
 
-void CastExpr::CheckCastConsistency() const {
+bool CastExpr::CastConsistency() const {
   switch (getCastKind()) {
   case CK_DerivedToBase:
   case CK_UncheckedDerivedToBase:
@@ -1474,6 +1448,11 @@
     assert(getSubExpr()->getType()->isFunctionType());
     goto CheckNoBasePath;
 
+  case CK_AddressSpaceConversion:
+    assert(getType()->isPointerType());
+    assert(getSubExpr()->getType()->isPointerType());
+    assert(getType()->getPointeeType().getAddressSpace() !=
+           getSubExpr()->getType()->getPointeeType().getAddressSpace());
   // These should not have an inheritance path.
   case CK_Dynamic:
   case CK_ToUnion:
@@ -1524,6 +1503,7 @@
     assert(path_empty() && "Cast kind should not have a base path!");
     break;
   }
+  return true;
 }
 
 const char *CastExpr::getCastKindName() const {
@@ -1625,7 +1605,7 @@
   case CK_ARCReclaimReturnedObject:
     return "ARCReclaimReturnedObject";
   case CK_ARCExtendBlockObject:
-    return "ARCCExtendBlockObject";
+    return "ARCExtendBlockObject";
   case CK_AtomicToNonAtomic:
     return "AtomicToNonAtomic";
   case CK_NonAtomicToAtomic:
@@ -1636,6 +1616,8 @@
     return "BuiltinFnToFnPtr";
   case CK_ZeroToOCLEvent:
     return "ZeroToOCLEvent";
+  case CK_AddressSpaceConversion:
+    return "AddressSpaceConversion";
   }
 
   llvm_unreachable("Unhandled cast kind!");
@@ -1867,12 +1849,12 @@
 Expr *InitListExpr::updateInit(const ASTContext &C, unsigned Init, Expr *expr) {
   if (Init >= InitExprs.size()) {
     InitExprs.insert(C, InitExprs.end(), Init - InitExprs.size() + 1, 0);
-    InitExprs.back() = expr;
+    setInit(Init, expr);
     return 0;
   }
 
   Expr *Result = cast_or_null<Expr>(InitExprs[Init]);
-  InitExprs[Init] = expr;
+  setInit(Init, expr);
   return Result;
 }
 
@@ -1892,7 +1874,11 @@
   const ArrayType *AT = getType()->getAsArrayTypeUnsafe();
   if (!AT || !AT->getElementType()->isIntegerType())
     return false;
-  const Expr *Init = getInit(0)->IgnoreParens();
+  // It is possible for getInit() to return null.
+  const Expr *Init = getInit(0);
+  if (!Init)
+    return false;
+  Init = Init->IgnoreParens();
   return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init);
 }
 
@@ -2078,15 +2064,22 @@
     return true;
 
   case CXXOperatorCallExprClass: {
-    // We warn about operator== and operator!= even when user-defined operator
+    // Warn about operator ==,!=,<,>,<=, and >= even when user-defined operator
     // overloads as there is no reasonable way to define these such that they
     // have non-trivial, desirable side-effects. See the -Wunused-comparison
-    // warning: these operators are commonly typo'ed, and so warning on them
+    // warning: operators == and != are commonly typo'ed, and so warning on them
     // provides additional value as well. If this list is updated,
     // DiagnoseUnusedComparison should be as well.
     const CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(this);
-    if (Op->getOperator() == OO_EqualEqual ||
-        Op->getOperator() == OO_ExclaimEqual) {
+    switch (Op->getOperator()) {
+    default:
+      break;
+    case OO_EqualEqual:
+    case OO_ExclaimEqual:
+    case OO_Less:
+    case OO_Greater:
+    case OO_GreaterEqual:
+    case OO_LessEqual:
       WarnE = this;
       Loc = Op->getOperatorLoc();
       R1 = Op->getSourceRange();
@@ -2106,8 +2099,8 @@
       //
       // Note: If new cases are added here, DiagnoseUnusedExprResult should be
       // updated to match for QoI.
-      if (FD->getAttr<WarnUnusedResultAttr>() ||
-          FD->getAttr<PureAttr>() || FD->getAttr<ConstAttr>()) {
+      if (FD->hasAttr<WarnUnusedResultAttr>() ||
+          FD->hasAttr<PureAttr>() || FD->hasAttr<ConstAttr>()) {
         WarnE = this;
         Loc = CE->getCallee()->getLocStart();
         R1 = CE->getCallee()->getSourceRange();
@@ -2152,7 +2145,7 @@
     }
 
     const ObjCMethodDecl *MD = ME->getMethodDecl();
-    if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
+    if (MD && MD->hasAttr<WarnUnusedResultAttr>()) {
       WarnE = this;
       Loc = getExprLoc();
       return true;
@@ -2821,8 +2814,6 @@
   case CXXThisExprClass:
   case CXXScalarValueInitExprClass:
   case TypeTraitExprClass:
-  case UnaryTypeTraitExprClass:
-  case BinaryTypeTraitExprClass:
   case ArrayTypeTraitExprClass:
   case ExpressionTraitExprClass:
   case CXXNoexceptExprClass:
@@ -3055,7 +3046,7 @@
 Expr::isNullPointerConstant(ASTContext &Ctx,
                             NullPointerConstantValueDependence NPC) const {
   if (isValueDependent() &&
-      (!Ctx.getLangOpts().CPlusPlus11 || Ctx.getLangOpts().MicrosoftMode)) {
+      (!Ctx.getLangOpts().CPlusPlus11 || Ctx.getLangOpts().MSVCCompat)) {
     switch (NPC) {
     case NPC_NeverValueDependent:
       llvm_unreachable("Unexpected value dependent expression!");
@@ -3141,8 +3132,7 @@
     const IntegerLiteral *Lit = dyn_cast<IntegerLiteral>(this);
     if (Lit && !Lit->getValue())
       return NPCK_ZeroLiteral;
-    else if (!Ctx.getLangOpts().MicrosoftMode ||
-             !isCXX98IntegralConstantExpr(Ctx))
+    else if (!Ctx.getLangOpts().MSVCCompat || !isCXX98IntegralConstantExpr(Ctx))
       return NPCK_NotNull;
   } else {
     // If we have an integer constant expression, we need to *evaluate* it and
@@ -3806,30 +3796,21 @@
 
 Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
   assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
-  char *Ptr = static_cast<char *>(
-                  const_cast<void *>(static_cast<const void *>(this)));
-  Ptr += sizeof(DesignatedInitExpr);
-  Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+  Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
   return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
 }
 
 Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const {
   assert(D.Kind == Designator::ArrayRangeDesignator &&
          "Requires array range designator");
-  char *Ptr = static_cast<char *>(
-                  const_cast<void *>(static_cast<const void *>(this)));
-  Ptr += sizeof(DesignatedInitExpr);
-  Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+  Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
   return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
 }
 
 Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
   assert(D.Kind == Designator::ArrayRangeDesignator &&
          "Requires array range designator");
-  char *Ptr = static_cast<char *>(
-                  const_cast<void *>(static_cast<const void *>(this)));
-  Ptr += sizeof(DesignatedInitExpr);
-  Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+  Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
   return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2));
 }
 
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 3738c0e..ee49925 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -108,10 +108,8 @@
     return UuidForRD;
   }
 
-  for (CXXRecordDecl::redecl_iterator I = RD->redecls_begin(),
-                                      E = RD->redecls_end();
-       I != E; ++I)
-    if (UuidAttr *Uuid = I->getAttr<UuidAttr>())
+  for (auto I : RD->redecls())
+    if (auto Uuid = I->getAttr<UuidAttr>())
       return Uuid;
 
   return 0;
@@ -1291,16 +1289,11 @@
     NamedDecl *decl = *begin;
     if (isa<UnresolvedUsingValueDecl>(decl))
       return false;
-    if (isa<UsingShadowDecl>(decl))
-      decl = cast<UsingShadowDecl>(decl)->getUnderlyingDecl();
 
     // Unresolved member expressions should only contain methods and
     // method templates.
-    assert(isa<CXXMethodDecl>(decl) || isa<FunctionTemplateDecl>(decl));
-
-    if (isa<FunctionTemplateDecl>(decl))
-      decl = cast<FunctionTemplateDecl>(decl)->getTemplatedDecl();
-    if (cast<CXXMethodDecl>(decl)->isStatic())
+    if (cast<CXXMethodDecl>(decl->getUnderlyingDecl()->getAsFunction())
+            ->isStatic())
       return false;
   } while (++begin != end);
 
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index 54f77ef..55b9f13 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -165,8 +165,6 @@
   case Expr::FloatingLiteralClass:
   case Expr::CXXNoexceptExprClass:
   case Expr::CXXScalarValueInitExprClass:
-  case Expr::UnaryTypeTraitExprClass:
-  case Expr::BinaryTypeTraitExprClass:
   case Expr::TypeTraitExprClass:
   case Expr::ArrayTypeTraitExprClass:
   case Expr::ExpressionTraitExprClass:
@@ -348,7 +346,7 @@
   case Expr::ObjCMessageExprClass:
     if (const ObjCMethodDecl *Method =
           cast<ObjCMessageExpr>(E)->getMethodDecl()) {
-      Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getResultType());
+      Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getReturnType());
       return (kind == Cl::CL_PRValue) ? Cl::CL_ObjCMessageRValue : kind;
     }
     return Cl::CL_PRValue;
@@ -543,10 +541,21 @@
          "This is only relevant for C++.");
 
   // C++ [expr.cond]p2
-  //   If either the second or the third operand has type (cv) void, [...]
-  //   the result [...] is a prvalue.
-  if (True->getType()->isVoidType() || False->getType()->isVoidType())
+  //   If either the second or the third operand has type (cv) void,
+  //   one of the following shall hold:
+  if (True->getType()->isVoidType() || False->getType()->isVoidType()) {
+    // The second or the third operand (but not both) is a (possibly
+    // parenthesized) throw-expression; the result is of the [...] value
+    // category of the other.
+    bool TrueIsThrow = isa<CXXThrowExpr>(True->IgnoreParenImpCasts());
+    bool FalseIsThrow = isa<CXXThrowExpr>(False->IgnoreParenImpCasts());
+    if (const Expr *NonThrow = TrueIsThrow ? (FalseIsThrow ? 0    : False)
+                                           : (FalseIsThrow ? True : 0))
+      return ClassifyInternal(Ctx, NonThrow);
+
+    //   [Otherwise] the result [...] is a prvalue.
     return Cl::CL_PRValue;
+  }
 
   // Note that at this point, we have already performed all conversions
   // according to [expr.cond]p3.
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 390cfe9..9c69080 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -474,13 +474,30 @@
 
       /// Evaluate in any way we know how. Don't worry about side-effects that
       /// can't be modeled.
-      EM_IgnoreSideEffects
+      EM_IgnoreSideEffects,
+
+      /// Evaluate as a constant expression. Stop if we find that the expression
+      /// is not a constant expression. Some expressions can be retried in the
+      /// optimizer if we don't constant fold them here, but in an unevaluated
+      /// context we try to fold them immediately since the optimizer never
+      /// gets a chance to look at it.
+      EM_ConstantExpressionUnevaluated,
+
+      /// Evaluate as a potential constant expression. Keep going if we hit a
+      /// construct that we can't evaluate yet (because we don't yet know the
+      /// value of something) but stop if we hit something that could never be
+      /// a constant expression. Some expressions can be retried in the
+      /// optimizer if we don't constant fold them here, but in an unevaluated
+      /// context we try to fold them immediately since the optimizer never
+      /// gets a chance to look at it.
+      EM_PotentialConstantExpressionUnevaluated
     } EvalMode;
 
     /// Are we checking whether the expression is a potential constant
     /// expression?
     bool checkingPotentialConstantExpression() const {
-      return EvalMode == EM_PotentialConstantExpression;
+      return EvalMode == EM_PotentialConstantExpression ||
+             EvalMode == EM_PotentialConstantExpressionUnevaluated;
     }
 
     /// Are we checking an expression for overflow?
@@ -573,6 +590,8 @@
             // some later problem.
           case EM_ConstantExpression:
           case EM_PotentialConstantExpression:
+          case EM_ConstantExpressionUnevaluated:
+          case EM_PotentialConstantExpressionUnevaluated:
             HasActiveDiagnostic = false;
             return OptionalDiagnostic();
           }
@@ -644,11 +663,13 @@
     bool keepEvaluatingAfterSideEffect() {
       switch (EvalMode) {
       case EM_PotentialConstantExpression:
+      case EM_PotentialConstantExpressionUnevaluated:
       case EM_EvaluateForOverflow:
       case EM_IgnoreSideEffects:
         return true;
 
       case EM_ConstantExpression:
+      case EM_ConstantExpressionUnevaluated:
       case EM_ConstantFold:
         return false;
       }
@@ -670,10 +691,12 @@
 
       switch (EvalMode) {
       case EM_PotentialConstantExpression:
+      case EM_PotentialConstantExpressionUnevaluated:
       case EM_EvaluateForOverflow:
         return true;
 
       case EM_ConstantExpression:
+      case EM_ConstantExpressionUnevaluated:
       case EM_ConstantFold:
       case EM_IgnoreSideEffects:
         return false;
@@ -696,7 +719,9 @@
                         Info.EvalStatus.Diag->empty() &&
                         !Info.EvalStatus.HasSideEffects),
         OldMode(Info.EvalMode) {
-      if (Enabled && Info.EvalMode == EvalInfo::EM_ConstantExpression)
+      if (Enabled &&
+          (Info.EvalMode == EvalInfo::EM_ConstantExpression ||
+           Info.EvalMode == EvalInfo::EM_ConstantExpressionUnevaluated))
         Info.EvalMode = EvalInfo::EM_ConstantFold;
     }
     void keepDiagnostics() { Enabled = false; }
@@ -1141,7 +1166,7 @@
 
 /// Should this call expression be treated as a string literal?
 static bool IsStringLiteralCall(const CallExpr *E) {
-  unsigned Builtin = E->isBuiltinCall();
+  unsigned Builtin = E->getBuiltinCallee();
   return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
           Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
 }
@@ -1338,8 +1363,7 @@
           return false;
       }
     }
-    for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
-         I != E; ++I) {
+    for (const auto *I : RD->fields()) {
       if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
                                    Value.getStructField(I->getFieldIndex())))
         return false;
@@ -1807,9 +1831,8 @@
 static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
                                        LValue &LVal,
                                        const IndirectFieldDecl *IFD) {
-  for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
-                                         CE = IFD->chain_end(); C != CE; ++C)
-    if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(*C)))
+  for (const auto *C : IFD->chain())
+    if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(C)))
       return false;
   return true;
 }
@@ -3092,14 +3115,18 @@
     Result.set(VD, Info.CurrentCall->Index);
     APValue &Val = Info.CurrentCall->createTemporary(VD, true);
 
-    if (!VD->getInit()) {
+    const Expr *InitE = VD->getInit();
+    if (!InitE) {
       Info.Diag(D->getLocStart(), diag::note_constexpr_uninitialized)
         << false << VD->getType();
       Val = APValue();
       return false;
     }
 
-    if (!EvaluateInPlace(Val, Info, Result, VD->getInit())) {
+    if (InitE->isValueDependent())
+      return false;
+
+    if (!EvaluateInPlace(Val, Info, Result, InitE)) {
       // Wipe out any partially-computed value, to allow tracking that this
       // evaluation failed.
       Val = APValue();
@@ -3291,13 +3318,12 @@
 
   case Stmt::DeclStmtClass: {
     const DeclStmt *DS = cast<DeclStmt>(S);
-    for (DeclStmt::const_decl_iterator DclIt = DS->decl_begin(),
-           DclEnd = DS->decl_end(); DclIt != DclEnd; ++DclIt) {
+    for (const auto *DclIt : DS->decls()) {
       // Each declaration initialization is its own full-expression.
       // FIXME: This isn't quite right; if we're performing aggregate
       // initialization, each braced subexpression is its own full-expression.
       FullExpressionRAII Scope(Info);
-      if (!EvaluateDecl(Info, *DclIt) && !Info.keepEvaluatingAfterFailure())
+      if (!EvaluateDecl(Info, DclIt) && !Info.keepEvaluatingAfterFailure())
         return ESR_Failed;
     }
     return ESR_Succeeded;
@@ -3315,9 +3341,8 @@
     BlockScopeRAII Scope(Info);
 
     const CompoundStmt *CS = cast<CompoundStmt>(S);
-    for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
-           BE = CS->body_end(); BI != BE; ++BI) {
-      EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI, Case);
+    for (const auto *BI : CS->body()) {
+      EvalStmtResult ESR = EvaluateStmt(Result, Info, BI, Case);
       if (ESR == ESR_Succeeded)
         Case = 0;
       else if (ESR != ESR_CaseNotFound)
@@ -3593,7 +3618,7 @@
 
   EvalStmtResult ESR = EvaluateStmt(Result, Info, Body);
   if (ESR == ESR_Succeeded) {
-    if (Callee->getResultType()->isVoidType())
+    if (Callee->getReturnType()->isVoidType())
       return true;
     Info.Diag(Callee->getLocEnd(), diag::note_constexpr_no_return);
   }
@@ -3659,15 +3684,14 @@
 #ifndef NDEBUG
   CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
 #endif
-  for (CXXConstructorDecl::init_const_iterator I = Definition->init_begin(),
-       E = Definition->init_end(); I != E; ++I) {
+  for (const auto *I : Definition->inits()) {
     LValue Subobject = This;
     APValue *Value = &Result;
 
     // Determine the subobject to initialize.
     FieldDecl *FD = 0;
-    if ((*I)->isBaseInitializer()) {
-      QualType BaseType((*I)->getBaseClass(), 0);
+    if (I->isBaseInitializer()) {
+      QualType BaseType(I->getBaseClass(), 0);
 #ifndef NDEBUG
       // Non-virtual base classes are initialized in the order in the class
       // definition. We have already checked for virtual base classes.
@@ -3676,12 +3700,12 @@
              "base class initializers not in expected order");
       ++BaseIt;
 #endif
-      if (!HandleLValueDirectBase(Info, (*I)->getInit(), Subobject, RD,
+      if (!HandleLValueDirectBase(Info, I->getInit(), Subobject, RD,
                                   BaseType->getAsCXXRecordDecl(), &Layout))
         return false;
       Value = &Result.getStructBase(BasesSeen++);
-    } else if ((FD = (*I)->getMember())) {
-      if (!HandleLValueMember(Info, (*I)->getInit(), Subobject, FD, &Layout))
+    } else if ((FD = I->getMember())) {
+      if (!HandleLValueMember(Info, I->getInit(), Subobject, FD, &Layout))
         return false;
       if (RD->isUnion()) {
         Result = APValue(FD);
@@ -3689,13 +3713,11 @@
       } else {
         Value = &Result.getStructField(FD->getFieldIndex());
       }
-    } else if (IndirectFieldDecl *IFD = (*I)->getIndirectMember()) {
+    } else if (IndirectFieldDecl *IFD = I->getIndirectMember()) {
       // Walk the indirect field decl's chain to find the object to initialize,
       // and make sure we've initialized every step along it.
-      for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
-                                             CE = IFD->chain_end();
-           C != CE; ++C) {
-        FD = cast<FieldDecl>(*C);
+      for (auto *C : IFD->chain()) {
+        FD = cast<FieldDecl>(C);
         CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
         // Switch the union field if it differs. This happens if we had
         // preceding zero-initialization, and we're now initializing a union
@@ -3710,7 +3732,7 @@
             *Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
                              std::distance(CD->field_begin(), CD->field_end()));
         }
-        if (!HandleLValueMember(Info, (*I)->getInit(), Subobject, FD))
+        if (!HandleLValueMember(Info, I->getInit(), Subobject, FD))
           return false;
         if (CD->isUnion())
           Value = &Value->getUnionValue();
@@ -3722,8 +3744,8 @@
     }
 
     FullExpressionRAII InitScope(Info);
-    if (!EvaluateInPlace(*Value, Info, Subobject, (*I)->getInit()) ||
-        (FD && FD->isBitField() && !truncateBitfieldValue(Info, (*I)->getInit(),
+    if (!EvaluateInPlace(*Value, Info, Subobject, I->getInit()) ||
+        (FD && FD->isBitField() && !truncateBitfieldValue(Info, I->getInit(),
                                                           *Value, FD))) {
       // If we're checking for a potential constant expression, evaluate all
       // initializers even if some of them fail.
@@ -3742,15 +3764,14 @@
 //===----------------------------------------------------------------------===//
 namespace {
 
-// FIXME: RetTy is always bool. Remove it.
-template <class Derived, typename RetTy=bool>
+template <class Derived>
 class ExprEvaluatorBase
-  : public ConstStmtVisitor<Derived, RetTy> {
+  : public ConstStmtVisitor<Derived, bool> {
 private:
-  RetTy DerivedSuccess(const APValue &V, const Expr *E) {
+  bool DerivedSuccess(const APValue &V, const Expr *E) {
     return static_cast<Derived*>(this)->Success(V, E);
   }
-  RetTy DerivedZeroInitialization(const Expr *E) {
+  bool DerivedZeroInitialization(const Expr *E) {
     return static_cast<Derived*>(this)->ZeroInitialization(E);
   }
 
@@ -3795,14 +3816,14 @@
 
 protected:
   EvalInfo &Info;
-  typedef ConstStmtVisitor<Derived, RetTy> StmtVisitorTy;
+  typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy;
   typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
 
   OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
     return Info.CCEDiag(E, D);
   }
 
-  RetTy ZeroInitialization(const Expr *E) { return Error(E); }
+  bool ZeroInitialization(const Expr *E) { return Error(E); }
 
 public:
   ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
@@ -3819,28 +3840,28 @@
     return Error(E, diag::note_invalid_subexpr_in_const_expr);
   }
 
-  RetTy VisitStmt(const Stmt *) {
+  bool VisitStmt(const Stmt *) {
     llvm_unreachable("Expression evaluator should not be called on stmts");
   }
-  RetTy VisitExpr(const Expr *E) {
+  bool VisitExpr(const Expr *E) {
     return Error(E);
   }
 
-  RetTy VisitParenExpr(const ParenExpr *E)
+  bool VisitParenExpr(const ParenExpr *E)
     { return StmtVisitorTy::Visit(E->getSubExpr()); }
-  RetTy VisitUnaryExtension(const UnaryOperator *E)
+  bool VisitUnaryExtension(const UnaryOperator *E)
     { return StmtVisitorTy::Visit(E->getSubExpr()); }
-  RetTy VisitUnaryPlus(const UnaryOperator *E)
+  bool VisitUnaryPlus(const UnaryOperator *E)
     { return StmtVisitorTy::Visit(E->getSubExpr()); }
-  RetTy VisitChooseExpr(const ChooseExpr *E)
+  bool VisitChooseExpr(const ChooseExpr *E)
     { return StmtVisitorTy::Visit(E->getChosenSubExpr()); }
-  RetTy VisitGenericSelectionExpr(const GenericSelectionExpr *E)
+  bool VisitGenericSelectionExpr(const GenericSelectionExpr *E)
     { return StmtVisitorTy::Visit(E->getResultExpr()); }
-  RetTy VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
+  bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
     { return StmtVisitorTy::Visit(E->getReplacement()); }
-  RetTy VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
+  bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
     { return StmtVisitorTy::Visit(E->getExpr()); }
-  RetTy VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
+  bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
     // The initializer may not have been parsed yet, or might be erroneous.
     if (!E->getExpr())
       return Error(E);
@@ -3848,19 +3869,19 @@
   }
   // We cannot create any objects for which cleanups are required, so there is
   // nothing to do here; all cleanups must come from unevaluated subexpressions.
-  RetTy VisitExprWithCleanups(const ExprWithCleanups *E)
+  bool VisitExprWithCleanups(const ExprWithCleanups *E)
     { return StmtVisitorTy::Visit(E->getSubExpr()); }
 
-  RetTy VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
+  bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
     CCEDiag(E, diag::note_constexpr_invalid_cast) << 0;
     return static_cast<Derived*>(this)->VisitCastExpr(E);
   }
-  RetTy VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
+  bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
     CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
     return static_cast<Derived*>(this)->VisitCastExpr(E);
   }
 
-  RetTy VisitBinaryOperator(const BinaryOperator *E) {
+  bool VisitBinaryOperator(const BinaryOperator *E) {
     switch (E->getOpcode()) {
     default:
       return Error(E);
@@ -3882,7 +3903,7 @@
     }
   }
 
-  RetTy VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
+  bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
     // Evaluate and cache the common expression. We treat it as a temporary,
     // even though it's not quite the same thing.
     if (!Evaluate(Info.CurrentCall->createTemporary(E->getOpaqueValue(), false),
@@ -3892,7 +3913,7 @@
     return HandleConditionalOperator(E);
   }
 
-  RetTy VisitConditionalOperator(const ConditionalOperator *E) {
+  bool VisitConditionalOperator(const ConditionalOperator *E) {
     bool IsBcpCall = false;
     // If the condition (ignoring parens) is a __builtin_constant_p call,
     // the result is a constant expression if it can be folded without
@@ -3900,7 +3921,7 @@
     // for discussion.
     if (const CallExpr *CallCE =
           dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
-      if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+      if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
         IsBcpCall = true;
 
     // Always assume __builtin_constant_p(...) ? ... : ... is a potential
@@ -3917,7 +3938,7 @@
     return true;
   }
 
-  RetTy VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+  bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
     if (APValue *Value = Info.CurrentCall->getTemporary(E))
       return DerivedSuccess(*Value, E);
 
@@ -3931,7 +3952,7 @@
     return StmtVisitorTy::Visit(Source);
   }
 
-  RetTy VisitCallExpr(const CallExpr *E) {
+  bool VisitCallExpr(const CallExpr *E) {
     const Expr *Callee = E->getCallee()->IgnoreParens();
     QualType CalleeType = Callee->getType();
 
@@ -4016,28 +4037,28 @@
     return DerivedSuccess(Result, E);
   }
 
-  RetTy VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+  bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
     return StmtVisitorTy::Visit(E->getInitializer());
   }
-  RetTy VisitInitListExpr(const InitListExpr *E) {
+  bool VisitInitListExpr(const InitListExpr *E) {
     if (E->getNumInits() == 0)
       return DerivedZeroInitialization(E);
     if (E->getNumInits() == 1)
       return StmtVisitorTy::Visit(E->getInit(0));
     return Error(E);
   }
-  RetTy VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+  bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
     return DerivedZeroInitialization(E);
   }
-  RetTy VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+  bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
     return DerivedZeroInitialization(E);
   }
-  RetTy VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+  bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
     return DerivedZeroInitialization(E);
   }
 
   /// A member expression where the object is a prvalue is itself a prvalue.
-  RetTy VisitMemberExpr(const MemberExpr *E) {
+  bool VisitMemberExpr(const MemberExpr *E) {
     assert(!E->isArrow() && "missing call to bound member function?");
 
     APValue Val;
@@ -4061,7 +4082,7 @@
            DerivedSuccess(Result, E);
   }
 
-  RetTy VisitCastExpr(const CastExpr *E) {
+  bool VisitCastExpr(const CastExpr *E) {
     switch (E->getCastKind()) {
     default:
       break;
@@ -4093,13 +4114,13 @@
     return Error(E);
   }
 
-  RetTy VisitUnaryPostInc(const UnaryOperator *UO) {
+  bool VisitUnaryPostInc(const UnaryOperator *UO) {
     return VisitUnaryPostIncDec(UO);
   }
-  RetTy VisitUnaryPostDec(const UnaryOperator *UO) {
+  bool VisitUnaryPostDec(const UnaryOperator *UO) {
     return VisitUnaryPostIncDec(UO);
   }
-  RetTy VisitUnaryPostIncDec(const UnaryOperator *UO) {
+  bool VisitUnaryPostIncDec(const UnaryOperator *UO) {
     if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure())
       return Error(UO);
 
@@ -4113,7 +4134,7 @@
     return DerivedSuccess(RVal, UO);
   }
 
-  RetTy VisitStmtExpr(const StmtExpr *E) {
+  bool VisitStmtExpr(const StmtExpr *E) {
     // We will have checked the full-expressions inside the statement expression
     // when they were completed, and don't need to check them again now.
     if (Info.checkingForOverflow())
@@ -4162,11 +4183,11 @@
 namespace {
 template<class Derived>
 class LValueExprEvaluatorBase
-  : public ExprEvaluatorBase<Derived, bool> {
+  : public ExprEvaluatorBase<Derived> {
 protected:
   LValue &Result;
   typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
-  typedef ExprEvaluatorBase<Derived, bool> ExprEvaluatorBaseTy;
+  typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy;
 
   bool Success(APValue::LValueBase B) {
     Result.set(B);
@@ -4584,7 +4605,7 @@
 
 namespace {
 class PointerExprEvaluator
-  : public ExprEvaluatorBase<PointerExprEvaluator, bool> {
+  : public ExprEvaluatorBase<PointerExprEvaluator> {
   LValue &Result;
 
   bool Success(const Expr *E) {
@@ -4769,7 +4790,7 @@
   if (IsStringLiteralCall(E))
     return Success(E);
 
-  switch (E->isBuiltinCall()) {
+  switch (E->getBuiltinCallee()) {
   case Builtin::BI__builtin_addressof:
     return EvaluateLValue(E->getArg(0), Result, Info);
 
@@ -4784,7 +4805,7 @@
 
 namespace {
 class MemberPointerExprEvaluator
-  : public ExprEvaluatorBase<MemberPointerExprEvaluator, bool> {
+  : public ExprEvaluatorBase<MemberPointerExprEvaluator> {
   MemberPtr &Result;
 
   bool Success(const ValueDecl *D) {
@@ -4872,7 +4893,7 @@
 
 namespace {
   class RecordExprEvaluator
-  : public ExprEvaluatorBase<RecordExprEvaluator, bool> {
+  : public ExprEvaluatorBase<RecordExprEvaluator> {
     const LValue &This;
     APValue &Result;
   public:
@@ -4925,14 +4946,13 @@
     }
   }
 
-  for (RecordDecl::field_iterator I = RD->field_begin(), End = RD->field_end();
-       I != End; ++I) {
+  for (const auto *I : RD->fields()) {
     // -- if T is a reference type, no initialization is performed.
     if (I->getType()->isReferenceType())
       continue;
 
     LValue Subobject = This;
-    if (!HandleLValueMember(Info, E, Subobject, *I, &Layout))
+    if (!HandleLValueMember(Info, E, Subobject, I, &Layout))
       return false;
 
     ImplicitValueInitExpr VIE(I->getType());
@@ -5040,8 +5060,7 @@
                    std::distance(RD->field_begin(), RD->field_end()));
   unsigned ElementNo = 0;
   bool Success = true;
-  for (RecordDecl::field_iterator Field = RD->field_begin(),
-       FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) {
+  for (const auto *Field : RD->fields()) {
     // Anonymous bit-fields are not considered members of the class for
     // purposes of aggregate initialization.
     if (Field->isUnnamedBitfield())
@@ -5054,7 +5073,7 @@
     // FIXME: Diagnostics here should point to the end of the initializer
     // list, not the start.
     if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E,
-                            Subobject, *Field, &Layout))
+                            Subobject, Field, &Layout))
       return false;
 
     // Perform an implicit value-initialization for members beyond the end of
@@ -5069,7 +5088,7 @@
     APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
     if (!EvaluateInPlace(FieldVal, Info, Subobject, Init) ||
         (Field->isBitField() && !truncateBitfieldValue(Info, Init,
-                                                       FieldVal, *Field))) {
+                                                       FieldVal, Field))) {
       if (!Info.keepEvaluatingAfterFailure())
         return false;
       Success = false;
@@ -5089,16 +5108,15 @@
     if (!Result.isUninit())
       return true;
 
-    if (ZeroInit)
-      return ZeroInitialization(E);
-
-    const CXXRecordDecl *RD = FD->getParent();
-    if (RD->isUnion())
-      Result = APValue((FieldDecl*)0);
-    else
-      Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
-                       std::distance(RD->field_begin(), RD->field_end()));
-    return true;
+    // We can get here in two different ways:
+    //  1) We're performing value-initialization, and should zero-initialize
+    //     the object, or
+    //  2) We're performing default-initialization of an object with a trivial
+    //     constexpr default constructor, in which case we should start the
+    //     lifetimes of all the base subobjects (there can be no data member
+    //     subobjects in this case) per [basic.life]p1.
+    // Either way, ZeroInitialization is appropriate.
+    return ZeroInitialization(E);
   }
 
   const FunctionDecl *Definition = 0;
@@ -5235,7 +5253,7 @@
 
 namespace {
   class VectorExprEvaluator
-  : public ExprEvaluatorBase<VectorExprEvaluator, bool> {
+  : public ExprEvaluatorBase<VectorExprEvaluator> {
     APValue &Result;
   public:
 
@@ -5416,7 +5434,7 @@
 
 namespace {
   class ArrayExprEvaluator
-  : public ExprEvaluatorBase<ArrayExprEvaluator, bool> {
+  : public ExprEvaluatorBase<ArrayExprEvaluator> {
     const LValue &This;
     APValue &Result;
   public:
@@ -5578,19 +5596,9 @@
     if (HadZeroInit)
       return true;
 
-    if (ZeroInit) {
-      ImplicitValueInitExpr VIE(Type);
-      return EvaluateInPlace(*Value, Info, Subobject, &VIE);
-    }
-
-    const CXXRecordDecl *RD = FD->getParent();
-    if (RD->isUnion())
-      *Value = APValue((FieldDecl*)0);
-    else
-      *Value =
-          APValue(APValue::UninitStruct(), RD->getNumBases(),
-                  std::distance(RD->field_begin(), RD->field_end()));
-    return true;
+    // See RecordExprEvaluator::VisitCXXConstructExpr for explanation.
+    ImplicitValueInitExpr VIE(Type);
+    return EvaluateInPlace(*Value, Info, Subobject, &VIE);
   }
 
   const FunctionDecl *Definition = 0;
@@ -5621,7 +5629,7 @@
 
 namespace {
 class IntExprEvaluator
-  : public ExprEvaluatorBase<IntExprEvaluator, bool> {
+  : public ExprEvaluatorBase<IntExprEvaluator> {
   APValue &Result;
 public:
   IntExprEvaluator(EvalInfo &info, APValue &result)
@@ -5727,14 +5735,6 @@
     return ZeroInitialization(E);
   }
 
-  bool VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
-    return Success(E->getValue(), E);
-  }
-
-  bool VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
-    return Success(E->getValue(), E);
-  }
-
   bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
     return Success(E->getValue(), E);
   }
@@ -5975,7 +5975,7 @@
 }
 
 bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
-  switch (unsigned BuiltinOp = E->isBuiltinCall()) {
+  switch (unsigned BuiltinOp = E->getBuiltinCallee()) {
   default:
     return ExprEvaluatorBaseTy::VisitCallExpr(E);
 
@@ -5994,7 +5994,17 @@
 
     // Expression had no side effects, but we couldn't statically determine the
     // size of the referenced object.
-    return Error(E);
+    switch (Info.EvalMode) {
+    case EvalInfo::EM_ConstantExpression:
+    case EvalInfo::EM_PotentialConstantExpression:
+    case EvalInfo::EM_ConstantFold:
+    case EvalInfo::EM_EvaluateForOverflow:
+    case EvalInfo::EM_IgnoreSideEffects:
+      return Error(E);
+    case EvalInfo::EM_ConstantExpressionUnevaluated:
+    case EvalInfo::EM_PotentialConstantExpressionUnevaluated:
+      return Success(-1ULL, E);
+    }
   }
 
   case Builtin::BI__builtin_bswap16:
@@ -7120,6 +7130,7 @@
   case CK_BuiltinFnToFnPtr:
   case CK_ZeroToOCLEvent:
   case CK_NonAtomicToAtomic:
+  case CK_AddressSpaceConversion:
     llvm_unreachable("invalid cast kind for integral value");
 
   case CK_BitCast:
@@ -7258,7 +7269,7 @@
 
 namespace {
 class FloatExprEvaluator
-  : public ExprEvaluatorBase<FloatExprEvaluator, bool> {
+  : public ExprEvaluatorBase<FloatExprEvaluator> {
   APFloat &Result;
 public:
   FloatExprEvaluator(EvalInfo &info, APFloat &result)
@@ -7319,7 +7330,7 @@
 }
 
 bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
-  switch (E->isBuiltinCall()) {
+  switch (E->getBuiltinCallee()) {
   default:
     return ExprEvaluatorBaseTy::VisitCallExpr(E);
 
@@ -7474,7 +7485,7 @@
 
 namespace {
 class ComplexExprEvaluator
-  : public ExprEvaluatorBase<ComplexExprEvaluator, bool> {
+  : public ExprEvaluatorBase<ComplexExprEvaluator> {
   ComplexValue &Result;
 
 public:
@@ -7592,6 +7603,7 @@
   case CK_BuiltinFnToFnPtr:
   case CK_ZeroToOCLEvent:
   case CK_NonAtomicToAtomic:
+  case CK_AddressSpaceConversion:
     llvm_unreachable("invalid cast kind for complex value");
 
   case CK_LValueToRValue:
@@ -7858,7 +7870,7 @@
 
 namespace {
 class AtomicExprEvaluator :
-    public ExprEvaluatorBase<AtomicExprEvaluator, bool> {
+    public ExprEvaluatorBase<AtomicExprEvaluator> {
   APValue &Result;
 public:
   AtomicExprEvaluator(EvalInfo &Info, APValue &Result)
@@ -7898,7 +7910,7 @@
 
 namespace {
 class VoidExprEvaluator
-  : public ExprEvaluatorBase<VoidExprEvaluator, bool> {
+  : public ExprEvaluatorBase<VoidExprEvaluator> {
 public:
   VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
 
@@ -8000,6 +8012,8 @@
 /// an object can indirectly refer to subobjects which were initialized earlier.
 static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
                             const Expr *E, bool AllowNonLiteralTypes) {
+  assert(!E->isValueDependent());
+
   if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, &This))
     return false;
 
@@ -8019,6 +8033,9 @@
 /// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
 /// lvalue-to-rvalue cast if it is an lvalue.
 static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
+  if (E->getType().isNull())
+    return false;
+
   if (!CheckLiteralType(Info, E))
     return false;
 
@@ -8046,6 +8063,13 @@
     IsConst = true;
     return true;
   }
+
+  // This case should be rare, but we need to check it before we check on
+  // the type below.
+  if (Exp->getType().isNull()) {
+    IsConst = false;
+    return true;
+  }
   
   // FIXME: Evaluating values of large array and record types can cause
   // performance problems. Only do so in C++11 for now.
@@ -8303,10 +8327,20 @@
   case Expr::MaterializeTemporaryExprClass:
   case Expr::PseudoObjectExprClass:
   case Expr::AtomicExprClass:
-  case Expr::InitListExprClass:
   case Expr::LambdaExprClass:
     return ICEDiag(IK_NotICE, E->getLocStart());
 
+  case Expr::InitListExprClass: {
+    // C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the
+    // form "T x = { a };" is equivalent to "T x = a;".
+    // Unless we're initializing a reference, T is a scalar as it is known to be
+    // of integral or enumeration type.
+    if (E->isRValue())
+      if (cast<InitListExpr>(E)->getNumInits() == 1)
+        return CheckICE(cast<InitListExpr>(E)->getInit(0), Ctx);
+    return ICEDiag(IK_NotICE, E->getLocStart());
+  }
+
   case Expr::SizeOfPackExprClass:
   case Expr::GNUNullExprClass:
     // GCC considers the GNU __null value to be an integral constant expression.
@@ -8325,8 +8359,6 @@
   case Expr::ObjCBoolLiteralExprClass:
   case Expr::CXXBoolLiteralExprClass:
   case Expr::CXXScalarValueInitExprClass:
-  case Expr::UnaryTypeTraitExprClass:
-  case Expr::BinaryTypeTraitExprClass:
   case Expr::TypeTraitExprClass:
   case Expr::ArrayTypeTraitExprClass:
   case Expr::ExpressionTraitExprClass:
@@ -8338,7 +8370,7 @@
     // constant expressions, but they can never be ICEs because an ICE cannot
     // contain an operand of (pointer to) function type.
     const CallExpr *CE = cast<CallExpr>(E);
-    if (CE->isBuiltinCall())
+    if (CE->getBuiltinCallee())
       return CheckEvalInICE(E, Ctx);
     return ICEDiag(IK_NotICE, E->getLocStart());
   }
@@ -8555,7 +8587,7 @@
     // extension.  See GCC PR38377 for discussion.
     if (const CallExpr *CallCE
         = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
-      if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+      if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
         return CheckEvalInICE(E, Ctx);
     ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
     if (CondResult.Kind == IK_NotICE)
@@ -8665,6 +8697,28 @@
   return IsConstExpr;
 }
 
+bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
+                                    const FunctionDecl *Callee,
+                                    llvm::ArrayRef<const Expr*> Args) const {
+  Expr::EvalStatus Status;
+  EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated);
+
+  ArgVector ArgValues(Args.size());
+  for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
+       I != E; ++I) {
+    if (!Evaluate(ArgValues[I - Args.begin()], Info, *I))
+      // If evaluation fails, throw away the argument entirely.
+      ArgValues[I - Args.begin()] = APValue();
+    if (Info.EvalStatus.HasSideEffects)
+      return false;
+  }
+
+  // Build fake call to Callee.
+  CallStackFrame Frame(Info, Callee->getLocation(), Callee, /*This*/0,
+                       ArgValues.data());
+  return Evaluate(Value, Info, this) && !Info.EvalStatus.HasSideEffects;
+}
+
 bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
                                    SmallVectorImpl<
                                      PartialDiagnosticAt> &Diags) {
@@ -8705,3 +8759,27 @@
 
   return Diags.empty();
 }
+
+bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
+                                              const FunctionDecl *FD,
+                                              SmallVectorImpl<
+                                                PartialDiagnosticAt> &Diags) {
+  Expr::EvalStatus Status;
+  Status.Diag = &Diags;
+
+  EvalInfo Info(FD->getASTContext(), Status,
+                EvalInfo::EM_PotentialConstantExpressionUnevaluated);
+
+  // Fabricate a call stack frame to give the arguments a plausible cover story.
+  ArrayRef<const Expr*> Args;
+  ArgVector ArgValues(0);
+  bool Success = EvaluateArgs(Args, ArgValues, Info);
+  (void)Success;
+  assert(Success &&
+         "Failed to set up arguments for potential constant evaluation");
+  CallStackFrame Frame(Info, SourceLocation(), FD, 0, ArgValues.data());
+
+  APValue ResultScratch;
+  Evaluate(ResultScratch, Info, E);
+  return Diags.empty();
+}
diff --git a/lib/AST/InheritViz.cpp b/lib/AST/InheritViz.cpp
index 3d64310..84cc167 100644
--- a/lib/AST/InheritViz.cpp
+++ b/lib/AST/InheritViz.cpp
@@ -93,26 +93,25 @@
   // Display the base classes.
   const CXXRecordDecl *Decl
     = static_cast<const CXXRecordDecl *>(Type->getAs<RecordType>()->getDecl());
-  for (CXXRecordDecl::base_class_const_iterator Base = Decl->bases_begin();
-       Base != Decl->bases_end(); ++Base) {
-    QualType CanonBaseType = Context.getCanonicalType(Base->getType());
+  for (const auto &Base : Decl->bases()) {
+    QualType CanonBaseType = Context.getCanonicalType(Base.getType());
 
     // If this is not virtual inheritance, bump the direct base
     // count for the type.
-    if (!Base->isVirtual())
+    if (!Base.isVirtual())
       ++DirectBaseCount[CanonBaseType];
 
     // Write out the node (if we need to).
-    WriteNode(Base->getType(), Base->isVirtual());
+    WriteNode(Base.getType(), Base.isVirtual());
 
     // Write out the edge.
     Out << "  ";
     WriteNodeReference(Type, FromVirtual);
     Out << " -> ";
-    WriteNodeReference(Base->getType(), Base->isVirtual());
+    WriteNodeReference(Base.getType(), Base.isVirtual());
 
     // Write out edge attributes to show the kind of inheritance.
-    if (Base->isVirtual()) {
+    if (Base.isVirtual()) {
       Out << " [ style=\"dashed\" ]";
     }
     Out << ";";
diff --git a/lib/AST/ItaniumCXXABI.cpp b/lib/AST/ItaniumCXXABI.cpp
index 5784660..ffa2ddc 100644
--- a/lib/AST/ItaniumCXXABI.cpp
+++ b/lib/AST/ItaniumCXXABI.cpp
@@ -33,12 +33,17 @@
 /// literals within a particular context.
 class ItaniumNumberingContext : public MangleNumberingContext {
   llvm::DenseMap<IdentifierInfo*, unsigned> VarManglingNumbers;
+  llvm::DenseMap<IdentifierInfo*, unsigned> TagManglingNumbers;
 
 public:
   /// Variable decls are numbered by identifier.
-  virtual unsigned getManglingNumber(const VarDecl *VD) {
+  unsigned getManglingNumber(const VarDecl *VD, unsigned) override {
     return ++VarManglingNumbers[VD->getIdentifier()];
   }
+
+  unsigned getManglingNumber(const TagDecl *TD, unsigned) override {
+    return ++TagManglingNumbers[TD->getIdentifier()];
+  }
 };
 
 class ItaniumCXXABI : public CXXABI {
@@ -48,7 +53,7 @@
   ItaniumCXXABI(ASTContext &Ctx) : Context(Ctx) { }
 
   std::pair<uint64_t, unsigned>
-  getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const {
+  getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const override {
     const TargetInfo &Target = Context.getTargetInfo();
     TargetInfo::IntType PtrDiff = Target.getPtrDiffType(0);
     uint64_t Width = Target.getTypeWidth(PtrDiff);
@@ -58,13 +63,17 @@
     return std::make_pair(Width, Align);
   }
 
-  CallingConv getDefaultMethodCallConv(bool isVariadic) const {
+  CallingConv getDefaultMethodCallConv(bool isVariadic) const override {
+    const llvm::Triple &T = Context.getTargetInfo().getTriple();
+    if (!isVariadic && T.isWindowsGNUEnvironment() &&
+        T.getArch() == llvm::Triple::x86)
+      return CC_X86ThisCall;
     return CC_C;
   }
 
   // We cheat and just check that the class has a vtable pointer, and that it's
   // only big enough to have a vtable pointer and nothing more (or less).
-  bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+  bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
 
     // Check that the class has a vtable pointer.
     if (!RD->isDynamicClass())
@@ -76,7 +85,7 @@
     return Layout.getNonVirtualSize() == PointerSize;
   }
 
-  virtual MangleNumberingContext *createMangleNumberingContext() const {
+  MangleNumberingContext *createMangleNumberingContext() const override {
     return new ItaniumNumberingContext();
   }
 };
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index 0621d7b..27ba2be 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -11,7 +11,7 @@
 // which is used in GCC 3.2 and newer (and many compilers that are
 // ABI-compatible with GCC):
 //
-//   http://www.codesourcery.com/public/cxx-abi/abi.html
+//   http://mentorembedded.github.io/cxx-abi/abi.html#mangling
 //
 //===----------------------------------------------------------------------===//
 #include "clang/AST/Mangle.h"
@@ -21,6 +21,7 @@
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/DeclObjC.h"
 #include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
 #include "clang/AST/ExprCXX.h"
 #include "clang/AST/ExprObjC.h"
 #include "clang/AST/TypeLoc.h"
@@ -101,11 +102,18 @@
   const FunctionDecl *fn = dyn_cast_or_null<FunctionDecl>(decl);
   return (fn ? getStructor(fn) : decl);
 }
-                                                    
+
+static bool isLambda(const NamedDecl *ND) {
+  const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+  if (!Record)
+    return false;
+
+  return Record->isLambda();
+}
+
 static const unsigned UnknownArity = ~0U;
 
 class ItaniumMangleContextImpl : public ItaniumMangleContext {
-  llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
   typedef std::pair<const DeclContext*, IdentifierInfo*> DiscriminatorKeyTy;
   llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
   llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
@@ -115,52 +123,46 @@
                                     DiagnosticsEngine &Diags)
       : ItaniumMangleContext(Context, Diags) {}
 
-  uint64_t getAnonymousStructId(const TagDecl *TD) {
-    std::pair<llvm::DenseMap<const TagDecl *,
-      uint64_t>::iterator, bool> Result =
-      AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
-    return Result.first->second;
-  }
-
   /// @name Mangler Entry Points
   /// @{
 
-  bool shouldMangleCXXName(const NamedDecl *D);
-  void mangleCXXName(const NamedDecl *D, raw_ostream &);
-  void mangleThunk(const CXXMethodDecl *MD,
-                   const ThunkInfo &Thunk,
-                   raw_ostream &);
+  bool shouldMangleCXXName(const NamedDecl *D) override;
+  bool shouldMangleStringLiteral(const StringLiteral *) override {
+    return false;
+  }
+  void mangleCXXName(const NamedDecl *D, raw_ostream &) override;
+  void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
+                   raw_ostream &) override;
   void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
                           const ThisAdjustment &ThisAdjustment,
-                          raw_ostream &);
-  void mangleReferenceTemporary(const VarDecl *D,
-                                raw_ostream &);
-  void mangleCXXVTable(const CXXRecordDecl *RD,
-                       raw_ostream &);
-  void mangleCXXVTT(const CXXRecordDecl *RD,
-                    raw_ostream &);
+                          raw_ostream &) override;
+  void mangleReferenceTemporary(const VarDecl *D, raw_ostream &) override;
+  void mangleCXXVTable(const CXXRecordDecl *RD, raw_ostream &) override;
+  void mangleCXXVTT(const CXXRecordDecl *RD, raw_ostream &) override;
   void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
-                           const CXXRecordDecl *Type,
-                           raw_ostream &);
-  void mangleCXXRTTI(QualType T, raw_ostream &);
-  void mangleCXXRTTIName(QualType T, raw_ostream &);
-  void mangleTypeName(QualType T, raw_ostream &);
+                           const CXXRecordDecl *Type, raw_ostream &) override;
+  void mangleCXXRTTI(QualType T, raw_ostream &) override;
+  void mangleCXXRTTIName(QualType T, raw_ostream &) override;
+  void mangleTypeName(QualType T, raw_ostream &) override;
   void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
-                     raw_ostream &);
+                     raw_ostream &) override;
   void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
-                     raw_ostream &);
+                     raw_ostream &) override;
 
-  void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &);
-  void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out);
-  void mangleDynamicAtExitDestructor(const VarDecl *D, raw_ostream &Out);
-  void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &);
-  void mangleItaniumThreadLocalWrapper(const VarDecl *D, raw_ostream &);
+  void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &) override;
+  void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
+  void mangleDynamicAtExitDestructor(const VarDecl *D,
+                                     raw_ostream &Out) override;
+  void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &) override;
+  void mangleItaniumThreadLocalWrapper(const VarDecl *D,
+                                       raw_ostream &) override;
+
+  void mangleStringLiteral(const StringLiteral *, raw_ostream &) override;
 
   bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
     // Lambda closure types are already numbered.
-    if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND))
-      if (RD->isLambda())
-        return false;
+    if (isLambda(ND))
+      return false;
 
     // Anonymous tags are already numbered.
     if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
@@ -538,14 +540,6 @@
   return 0;
 }
 
-static bool isLambda(const NamedDecl *ND) {
-  const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
-  if (!Record)
-    return false;
-  
-  return Record->isLambda();
-}
-
 void CXXNameMangler::mangleName(const NamedDecl *ND) {
   //  <name> ::= <nested-name>
   //         ::= <unscoped-name>
@@ -833,6 +827,7 @@
     switch (type->getTypeClass()) {
     case Type::Builtin:
     case Type::Complex:
+    case Type::Adjusted:
     case Type::Decayed:
     case Type::Pointer:
     case Type::BlockPointer:
@@ -1032,10 +1027,9 @@
   assert(RD->isAnonymousStructOrUnion() &&
          "Expected anonymous struct or union!");
   
-  for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
-       I != E; ++I) {
+  for (const auto *I : RD->fields()) {
     if (I->getIdentifier())
-      return *I;
+      return I;
     
     if (const RecordType *RT = I->getType()->getAs<RecordType>())
       if (const FieldDecl *NamedDataMember = 
@@ -1148,7 +1142,7 @@
     }
 
     // Get a unique id for the anonymous struct.
-    uint64_t AnonStructId = Context.getAnonymousStructId(TD);
+    unsigned AnonStructId = Context.getAnonymousStructId(TD);
 
     // Mangle it as a source name in the form
     // [n] $_<id>
@@ -1919,7 +1913,7 @@
   //                 ::= x  # long long, __int64
   //                 ::= y  # unsigned long long, __int64
   //                 ::= n  # __int128
-  // UNSUPPORTED:    ::= o  # unsigned __int128
+  //                 ::= o  # unsigned __int128
   //                 ::= f  # float
   //                 ::= d  # double
   //                 ::= e  # long double, __float80
@@ -2012,11 +2006,11 @@
   // <bare-function-type> ::= <signature type>+
   if (MangleReturnType) {
     FunctionTypeDepth.enterResultType();
-    mangleType(Proto->getResultType());
+    mangleType(Proto->getReturnType());
     FunctionTypeDepth.leaveResultType();
   }
 
-  if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+  if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
     //   <builtin-type> ::= v   # void
     Out << 'v';
 
@@ -2024,10 +2018,8 @@
     return;
   }
 
-  for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
-                                         ArgEnd = Proto->arg_type_end();
-       Arg != ArgEnd; ++Arg)
-    mangleType(Context.getASTContext().getSignatureParameterType(*Arg));
+  for (const auto &Arg : Proto->param_types())
+    mangleType(Context.getASTContext().getSignatureParameterType(Arg));
 
   FunctionTypeDepth.pop(saved);
 
@@ -2161,8 +2153,17 @@
   const char *EltName = 0;
   if (T->getVectorKind() == VectorType::NeonPolyVector) {
     switch (cast<BuiltinType>(EltType)->getKind()) {
-    case BuiltinType::SChar:     EltName = "poly8_t"; break;
-    case BuiltinType::Short:     EltName = "poly16_t"; break;
+    case BuiltinType::SChar:
+    case BuiltinType::UChar:
+      EltName = "poly8_t";
+      break;
+    case BuiltinType::Short:
+    case BuiltinType::UShort:
+      EltName = "poly16_t";
+      break;
+    case BuiltinType::ULongLong:
+      EltName = "poly64_t";
+      break;
     default: llvm_unreachable("unexpected Neon polynomial vector element type");
     }
   } else {
@@ -2175,6 +2176,7 @@
     case BuiltinType::UInt:      EltName = "uint32_t"; break;
     case BuiltinType::LongLong:  EltName = "int64_t"; break;
     case BuiltinType::ULongLong: EltName = "uint64_t"; break;
+    case BuiltinType::Double:    EltName = "float64_t"; break;
     case BuiltinType::Float:     EltName = "float32_t"; break;
     case BuiltinType::Half:      EltName = "float16_t";break;
     default:
@@ -2202,6 +2204,7 @@
     return "Int16";
   case BuiltinType::Int:
     return "Int32";
+  case BuiltinType::Long:
   case BuiltinType::LongLong:
     return "Int64";
   case BuiltinType::UChar:
@@ -2210,6 +2213,7 @@
     return "Uint16";
   case BuiltinType::UInt:
     return "Uint32";
+  case BuiltinType::ULong:
   case BuiltinType::ULongLong:
     return "Uint64";
   case BuiltinType::Half:
@@ -2245,7 +2249,7 @@
     case BuiltinType::UShort:
       EltName = "Poly16";
       break;
-    case BuiltinType::ULongLong:
+    case BuiltinType::ULong:
       EltName = "Poly64";
       break;
     default:
@@ -2270,8 +2274,12 @@
 void CXXNameMangler::mangleType(const VectorType *T) {
   if ((T->getVectorKind() == VectorType::NeonVector ||
        T->getVectorKind() == VectorType::NeonPolyVector)) {
-    if (getASTContext().getTargetInfo().getTriple().getArch() ==
-        llvm::Triple::aarch64)
+    llvm::Triple Target = getASTContext().getTargetInfo().getTriple();
+    llvm::Triple::ArchType Arch =
+        getASTContext().getTargetInfo().getTriple().getArch();
+    if (Arch == llvm::Triple::aarch64 ||
+        Arch == llvm::Triple::aarch64_be ||
+        (Arch == llvm::Triple::arm64 && !Target.isOSDarwin()))
       mangleAArch64NeonVectorType(T);
     else
       mangleNeonVectorType(T);
@@ -2311,9 +2319,8 @@
     SmallString<64> QualStr;
     llvm::raw_svector_ostream QualOS(QualStr);
     QualOS << "objcproto";
-    ObjCObjectType::qual_iterator i = T->qual_begin(), e = T->qual_end();
-    for ( ; i != e; ++i) {
-      StringRef name = (*i)->getName();
+    for (const auto *I : T->quals()) {
+      StringRef name = I->getName();
       QualOS << name.size() << name;
     }
     QualOS.flush();
@@ -2438,7 +2445,7 @@
 }
 
 void CXXNameMangler::mangleType(const AtomicType *T) {
-  // <type> ::= U <source-name> <type>	# vendor extended type qualifier
+  // <type> ::= U <source-name> <type>  # vendor extended type qualifier
   // (Until there's a standardized mangling...)
   Out << "U7_Atomic";
   mangleType(T->getValueType());
@@ -2581,8 +2588,6 @@
   case Expr::ShuffleVectorExprClass:
   case Expr::ConvertVectorExprClass:
   case Expr::StmtExprClass:
-  case Expr::UnaryTypeTraitExprClass:
-  case Expr::BinaryTypeTraitExprClass:
   case Expr::TypeTraitExprClass:
   case Expr::ArrayTypeTraitExprClass:
   case Expr::ExpressionTraitExprClass:
@@ -3789,6 +3794,10 @@
   mangleCXXRTTIName(Ty, Out);
 }
 
+void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) {
+  llvm_unreachable("Can't mangle string literals");
+}
+
 ItaniumMangleContext *
 ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
   return new ItaniumMangleContextImpl(Context, Diags);
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index 231ef03..fdc00e3 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -11,13 +11,13 @@
 //
 //===----------------------------------------------------------------------===//
 #include "clang/AST/Attr.h"
-#include "clang/AST/Mangle.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/Decl.h"
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/DeclObjC.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/AST/ExprCXX.h"
+#include "clang/AST/Mangle.h"
 #include "clang/Basic/ABI.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Basic/TargetInfo.h"
@@ -64,7 +64,7 @@
 static StdOrFastCC getStdOrFastCallMangling(const ASTContext &Context,
                                             const NamedDecl *ND) {
   const TargetInfo &TI = Context.getTargetInfo();
-  llvm::Triple Triple = TI.getTriple();
+  const llvm::Triple &Triple = TI.getTriple();
   if (!Triple.isOSWindows() || Triple.getArch() != llvm::Triple::x86)
     return SOF_OTHER;
 
@@ -163,13 +163,9 @@
   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
     if (!MD->isStatic())
       ++ArgWords;
-  for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
-         ArgEnd = Proto->arg_type_end();
-       Arg != ArgEnd; ++Arg) {
-    QualType AT = *Arg;
+  for (const auto &AT : Proto->param_types())
     // Size should be aligned to DWORD boundary
     ArgWords += llvm::RoundUpToAlignment(ASTContext.getTypeSize(AT), 32) / 32;
-  }
   Out << 4 * ArgWords;
 }
 
@@ -246,7 +242,9 @@
   OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
   if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
     OS << '(' << *CID << ')';
-  OS << ' ' << MD->getSelector().getAsString() << ']';
+  OS << ' ';
+  MD->getSelector().print(OS);
+  OS << ']';
   
   Out << OS.str().size() << OS.str();
 }
diff --git a/lib/AST/MangleNumberingContext.cpp b/lib/AST/MangleNumberingContext.cpp
index 91ef0e2..b46a085 100644
--- a/lib/AST/MangleNumberingContext.cpp
+++ b/lib/AST/MangleNumberingContext.cpp
@@ -24,7 +24,7 @@
     = CallOperator->getType()->getAs<FunctionProtoType>();
   ASTContext &Context = CallOperator->getASTContext();
 
-  QualType Key = Context.getFunctionType(Context.VoidTy, Proto->getArgTypes(),
+  QualType Key = Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(),
                                          FunctionProtoType::ExtProtoInfo());
   Key = Context.getCanonicalType(Key);
   return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
@@ -38,6 +38,8 @@
 }
 
 unsigned
-MangleNumberingContext::getManglingNumber(const TagDecl *TD) {
-  return ++TagManglingNumbers[TD->getIdentifier()];
+MangleNumberingContext::getStaticLocalNumber(const VarDecl *VD) {
+  // FIXME: Compute a BlockPointerType?  Not obvious how.
+  const Type *Ty = 0;
+  return ++ManglingNumbers[Ty];
 }
diff --git a/lib/AST/MicrosoftCXXABI.cpp b/lib/AST/MicrosoftCXXABI.cpp
index 4a93ea1..359e864 100644
--- a/lib/AST/MicrosoftCXXABI.cpp
+++ b/lib/AST/MicrosoftCXXABI.cpp
@@ -13,8 +13,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "CXXABI.h"
-#include "clang/AST/Attr.h"
 #include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/MangleNumberingContext.h"
 #include "clang/AST/RecordLayout.h"
@@ -28,15 +28,15 @@
 /// \brief Numbers things which need to correspond across multiple TUs.
 /// Typically these are things like static locals, lambdas, or blocks.
 class MicrosoftNumberingContext : public MangleNumberingContext {
-  unsigned NumStaticLocals;
-
 public:
-  MicrosoftNumberingContext() : NumStaticLocals(0) { }
+  unsigned getManglingNumber(const VarDecl *VD,
+                             unsigned MSLocalManglingNumber) override {
+    return MSLocalManglingNumber;
+  }
 
-  /// Static locals are numbered by source order.
-  virtual unsigned getManglingNumber(const VarDecl *VD) {
-    assert(VD->isStaticLocal());
-    return ++NumStaticLocals;
+  unsigned getManglingNumber(const TagDecl *TD,
+                             unsigned MSLocalManglingNumber) override {
+    return MSLocalManglingNumber;
   }
 };
 
@@ -46,16 +46,16 @@
   MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
 
   std::pair<uint64_t, unsigned>
-  getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const;
+  getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const override;
 
-  CallingConv getDefaultMethodCallConv(bool isVariadic) const {
+  CallingConv getDefaultMethodCallConv(bool isVariadic) const override {
     if (!isVariadic &&
         Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
       return CC_X86ThisCall;
     return CC_C;
   }
 
-  bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+  bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
     // FIXME: Audit the corners
     if (!RD->isDynamicClass())
       return false;
@@ -69,7 +69,7 @@
       Layout.getNonVirtualSize() == PointerSize * 2;
   }    
 
-  MangleNumberingContext *createMangleNumberingContext() const {
+  MangleNumberingContext *createMangleNumberingContext() const override {
     return new MicrosoftNumberingContext();
   }
 };
@@ -92,26 +92,27 @@
   return false;
 }
 
-static MSInheritanceModel MSInheritanceAttrToModel(attr::Kind Kind) {
-  switch (Kind) {
-  default: llvm_unreachable("expected MS inheritance attribute");
-  case attr::SingleInheritance:      return MSIM_Single;
-  case attr::MultipleInheritance:    return MSIM_Multiple;
-  case attr::VirtualInheritance:     return MSIM_Virtual;
-  case attr::UnspecifiedInheritance: return MSIM_Unspecified;
-  }
+MSInheritanceAttr::Spelling CXXRecordDecl::calculateInheritanceModel() const {
+  if (!hasDefinition())
+    return MSInheritanceAttr::Keyword_unspecified_inheritance;
+  if (getNumVBases() > 0)
+    return MSInheritanceAttr::Keyword_virtual_inheritance;
+  if (usesMultipleInheritanceModel(this))
+    return MSInheritanceAttr::Keyword_multiple_inheritance;
+  return MSInheritanceAttr::Keyword_single_inheritance;
 }
 
-MSInheritanceModel CXXRecordDecl::getMSInheritanceModel() const {
-  if (Attr *IA = this->getAttr<MSInheritanceAttr>())
-    return MSInheritanceAttrToModel(IA->getKind());
-  // If there was no explicit attribute, the record must be defined already, and
-  // we can figure out the inheritance model from its other properties.
-  if (this->getNumVBases() > 0)
-    return MSIM_Virtual;
-  if (usesMultipleInheritanceModel(this))
-    return this->isPolymorphic() ? MSIM_MultiplePolymorphic : MSIM_Multiple;
-  return this->isPolymorphic() ? MSIM_SinglePolymorphic : MSIM_Single;
+MSInheritanceAttr::Spelling
+CXXRecordDecl::getMSInheritanceModel() const {
+  MSInheritanceAttr *IA = getAttr<MSInheritanceAttr>();
+  assert(IA && "Expected MSInheritanceAttr on the CXXRecordDecl!");
+  return IA->getSemanticSpelling();
+}
+
+MSVtorDispAttr::Mode CXXRecordDecl::getMSVtorDispMode() const {
+  if (MSVtorDispAttr *VDA = getAttr<MSVtorDispAttr>())
+    return VDA->getVtorDispMode();
+  return MSVtorDispAttr::Mode(getASTContext().getLangOpts().VtorDispMode);
 }
 
 // Returns the number of pointer and integer slots used to represent a member
@@ -133,49 +134,32 @@
 //     // offset.
 //     int NonVirtualBaseAdjustment;
 //
+//     // The offset of the vb-table pointer within the object.  Only needed for
+//     // incomplete types.
+//     int VBPtrOffset;
+//
 //     // An offset within the vb-table that selects the virtual base containing
 //     // the member.  Loading from this offset produces a new offset that is
 //     // added to the address of the vb-table pointer to produce the base.
 //     int VirtualBaseAdjustmentOffset;
-//
-//     // The offset of the vb-table pointer within the object.  Only needed for
-//     // incomplete types.
-//     int VBPtrOffset;
 //   };
 static std::pair<unsigned, unsigned>
 getMSMemberPointerSlots(const MemberPointerType *MPT) {
-  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
-  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
-  unsigned Ptrs;
+  const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+  MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+  unsigned Ptrs = 0;
   unsigned Ints = 0;
-  if (MPT->isMemberFunctionPointer()) {
-    // Member function pointers are a struct of a function pointer followed by a
-    // variable number of ints depending on the inheritance model used.  The
-    // function pointer is a real function if it is non-virtual and a vftable
-    // slot thunk if it is virtual.  The ints select the object base passed for
-    // the 'this' pointer.
-    Ptrs = 1;  // First slot is always a function pointer.
-    switch (Inheritance) {
-    case MSIM_Unspecified: ++Ints;  // VBTableOffset
-    case MSIM_Virtual:     ++Ints;  // VirtualBaseAdjustmentOffset
-    case MSIM_MultiplePolymorphic:
-    case MSIM_Multiple:    ++Ints;  // NonVirtualBaseAdjustment
-    case MSIM_SinglePolymorphic:
-    case MSIM_Single:      break;   // Nothing
-    }
-  } else {
-    // Data pointers are an aggregate of ints.  The first int is an offset
-    // followed by vbtable-related offsets.
-    Ptrs = 0;
-    switch (Inheritance) {
-    case MSIM_Unspecified: ++Ints;  // VBTableOffset
-    case MSIM_Virtual:     ++Ints;  // VirtualBaseAdjustmentOffset
-    case MSIM_MultiplePolymorphic:
-    case MSIM_Multiple:             // Nothing
-    case MSIM_SinglePolymorphic:
-    case MSIM_Single:      ++Ints;  // Field offset
-    }
-  }
+  if (MPT->isMemberFunctionPointer())
+    Ptrs = 1;
+  else
+    Ints = 1;
+  if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
+                                          Inheritance))
+    Ints++;
+  if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
+    Ints++;
+  if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
+    Ints++;
   return std::make_pair(Ptrs, Ints);
 }
 
@@ -185,14 +169,26 @@
   assert(Target.getTriple().getArch() == llvm::Triple::x86 ||
          Target.getTriple().getArch() == llvm::Triple::x86_64);
   unsigned Ptrs, Ints;
-  llvm::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT);
+  std::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT);
   // The nominal struct is laid out with pointers followed by ints and aligned
   // to a pointer width if any are present and an int width otherwise.
   unsigned PtrSize = Target.getPointerWidth(0);
   unsigned IntSize = Target.getIntWidth();
   uint64_t Width = Ptrs * PtrSize + Ints * IntSize;
-  unsigned Align = Ptrs > 0 ? Target.getPointerAlign(0) : Target.getIntAlign();
-  Width = llvm::RoundUpToAlignment(Width, Align);
+  unsigned Align;
+
+  // When MSVC does x86_32 record layout, it aligns aggregate member pointers to
+  // 8 bytes.  However, __alignof usually returns 4 for data memptrs and 8 for
+  // function memptrs.
+  if (Ptrs + Ints > 1 && Target.getTriple().getArch() == llvm::Triple::x86)
+    Align = 8 * 8;
+  else if (Ptrs)
+    Align = Target.getPointerAlign(0);
+  else
+    Align = Target.getIntAlign();
+
+  if (Target.getTriple().getArch() == llvm::Triple::x86_64)
+    Width = llvm::RoundUpToAlignment(Width, Align);
   return std::make_pair(Width, Align);
 }
 
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 5256501..061fc13 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -14,17 +14,21 @@
 #include "clang/AST/Mangle.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/Attr.h"
-#include "clang/AST/CharUnits.h"
 #include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
 #include "clang/AST/Decl.h"
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/DeclObjC.h"
 #include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
 #include "clang/AST/ExprCXX.h"
+#include "clang/AST/VTableBuilder.h"
 #include "clang/Basic/ABI.h"
 #include "clang/Basic/DiagnosticOptions.h"
 #include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringMap.h"
+#include "llvm/Support/MathExtras.h"
 
 using namespace clang;
 
@@ -71,10 +75,100 @@
   return fn;
 }
 
+static bool isLambda(const NamedDecl *ND) {
+  const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+  if (!Record)
+    return false;
+
+  return Record->isLambda();
+}
+
+/// MicrosoftMangleContextImpl - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
+  typedef std::pair<const DeclContext *, IdentifierInfo *> DiscriminatorKeyTy;
+  llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
+  llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+  llvm::DenseMap<const CXXRecordDecl *, unsigned> LambdaIds;
+
+public:
+  MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags)
+      : MicrosoftMangleContext(Context, Diags) {}
+  bool shouldMangleCXXName(const NamedDecl *D) override;
+  bool shouldMangleStringLiteral(const StringLiteral *SL) override;
+  void mangleCXXName(const NamedDecl *D, raw_ostream &Out) override;
+  void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD, raw_ostream &) override;
+  void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
+                   raw_ostream &) override;
+  void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+                          const ThisAdjustment &ThisAdjustment,
+                          raw_ostream &) override;
+  void mangleCXXVFTable(const CXXRecordDecl *Derived,
+                        ArrayRef<const CXXRecordDecl *> BasePath,
+                        raw_ostream &Out) override;
+  void mangleCXXVBTable(const CXXRecordDecl *Derived,
+                        ArrayRef<const CXXRecordDecl *> BasePath,
+                        raw_ostream &Out) override;
+  void mangleCXXRTTI(QualType T, raw_ostream &) override;
+  void mangleCXXRTTIName(QualType T, raw_ostream &) override;
+  void mangleTypeName(QualType T, raw_ostream &) override;
+  void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+                     raw_ostream &) override;
+  void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+                     raw_ostream &) override;
+  void mangleReferenceTemporary(const VarDecl *, raw_ostream &) override;
+  void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override;
+  void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
+  void mangleDynamicAtExitDestructor(const VarDecl *D,
+                                     raw_ostream &Out) override;
+  void mangleStringLiteral(const StringLiteral *SL, raw_ostream &Out) override;
+  bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+    // Lambda closure types are already numbered.
+    if (isLambda(ND))
+      return false;
+
+    const DeclContext *DC = getEffectiveDeclContext(ND);
+    if (!DC->isFunctionOrMethod())
+      return false;
+
+    // Use the canonical number for externally visible decls.
+    if (ND->isExternallyVisible()) {
+      disc = getASTContext().getManglingNumber(ND);
+      return true;
+    }
+
+    // Anonymous tags are already numbered.
+    if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
+      if (Tag->getName().empty() && !Tag->getTypedefNameForAnonDecl())
+        return false;
+    }
+
+    // Make up a reasonable number for internal decls.
+    unsigned &discriminator = Uniquifier[ND];
+    if (!discriminator)
+      discriminator = ++Discriminator[std::make_pair(DC, ND->getIdentifier())];
+    disc = discriminator;
+    return true;
+  }
+
+  unsigned getLambdaId(const CXXRecordDecl *RD) {
+    assert(RD->isLambda() && "RD must be a lambda!");
+    assert(!RD->isExternallyVisible() && "RD must not be visible!");
+    assert(RD->getLambdaManglingNumber() == 0 &&
+           "RD must not have a mangling number!");
+    std::pair<llvm::DenseMap<const CXXRecordDecl *, unsigned>::iterator, bool>
+        Result = LambdaIds.insert(std::make_pair(RD, LambdaIds.size()));
+    return Result.first->second;
+  }
+
+private:
+  void mangleInitFiniStub(const VarDecl *D, raw_ostream &Out, char CharCode);
+};
+
 /// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
 /// Microsoft Visual C++ ABI.
 class MicrosoftCXXNameMangler {
-  MangleContext &Context;
+  MicrosoftMangleContextImpl &Context;
   raw_ostream &Out;
 
   /// The "structor" is the top-level declaration being mangled, if
@@ -99,14 +193,14 @@
 public:
   enum QualifierMangleMode { QMM_Drop, QMM_Mangle, QMM_Escape, QMM_Result };
 
-  MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_)
+  MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_)
     : Context(C), Out(Out_),
       Structor(0), StructorType(-1),
       UseNameBackReferences(true),
       PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
                        64) { }
 
-  MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_,
+  MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
                           const CXXDestructorDecl *D, CXXDtorType Type)
     : Context(C), Out(Out_),
       Structor(getStructor(D)), StructorType(Type),
@@ -118,15 +212,20 @@
 
   void mangle(const NamedDecl *D, StringRef Prefix = "\01?");
   void mangleName(const NamedDecl *ND);
-  void mangleDeclaration(const NamedDecl *ND);
   void mangleFunctionEncoding(const FunctionDecl *FD);
   void mangleVariableEncoding(const VarDecl *VD);
+  void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD);
+  void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
+                                   const CXXMethodDecl *MD);
+  void mangleVirtualMemPtrThunk(
+      const CXXMethodDecl *MD,
+      const MicrosoftVTableContext::MethodVFTableLocation &ML);
   void mangleNumber(int64_t Number);
   void mangleType(QualType T, SourceRange Range,
                   QualifierMangleMode QMM = QMM_Mangle);
   void mangleFunctionType(const FunctionType *T, const FunctionDecl *D = 0,
                           bool ForceInstMethod = false);
-  void manglePostfix(const DeclContext *DC, bool NoFunction = false);
+  void mangleNestedName(const NamedDecl *ND);
 
 private:
   void disableBackReferences() { UseNameBackReferences = false; }
@@ -138,13 +237,13 @@
   void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
   void mangleCXXDtorType(CXXDtorType T);
   void mangleQualifiers(Qualifiers Quals, bool IsMember);
-  void manglePointerQualifiers(Qualifiers Quals);
+  void manglePointerCVQualifiers(Qualifiers Quals);
+  void manglePointerExtQualifiers(Qualifiers Quals, const Type *PointeeType);
 
   void mangleUnscopedTemplateName(const TemplateDecl *ND);
   void mangleTemplateInstantiationName(const TemplateDecl *TD,
                                       const TemplateArgumentList &TemplateArgs);
   void mangleObjCMethodName(const ObjCMethodDecl *MD);
-  void mangleLocalName(const FunctionDecl *FD);
 
   void mangleArgumentType(QualType T, SourceRange Range);
 
@@ -157,7 +256,7 @@
 #undef ABSTRACT_TYPE
 #undef NON_CANONICAL_TYPE
 #undef TYPE
-  
+
   void mangleType(const TagDecl *TD);
   void mangleDecayedArrayType(const ArrayType *T);
   void mangleArrayType(const ArrayType *T);
@@ -171,47 +270,6 @@
                           const TemplateArgumentList &TemplateArgs);
   void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA);
 };
-
-/// MicrosoftMangleContextImpl - Overrides the default MangleContext for the
-/// Microsoft Visual C++ ABI.
-class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
-public:
-  MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags)
-      : MicrosoftMangleContext(Context, Diags) {}
-  virtual bool shouldMangleCXXName(const NamedDecl *D);
-  virtual void mangleCXXName(const NamedDecl *D, raw_ostream &Out);
-  virtual void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
-                                        uint64_t OffsetInVFTable,
-                                        raw_ostream &);
-  virtual void mangleThunk(const CXXMethodDecl *MD,
-                           const ThunkInfo &Thunk,
-                           raw_ostream &);
-  virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
-                                  const ThisAdjustment &ThisAdjustment,
-                                  raw_ostream &);
-  virtual void mangleCXXVFTable(const CXXRecordDecl *Derived,
-                                ArrayRef<const CXXRecordDecl *> BasePath,
-                                raw_ostream &Out);
-  virtual void mangleCXXVBTable(const CXXRecordDecl *Derived,
-                                ArrayRef<const CXXRecordDecl *> BasePath,
-                                raw_ostream &Out);
-  virtual void mangleCXXRTTI(QualType T, raw_ostream &);
-  virtual void mangleCXXRTTIName(QualType T, raw_ostream &);
-  virtual void mangleTypeName(QualType T, raw_ostream &);
-  virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
-                             raw_ostream &);
-  virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
-                             raw_ostream &);
-  virtual void mangleReferenceTemporary(const VarDecl *, raw_ostream &);
-  virtual void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out);
-  virtual void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out);
-  virtual void mangleDynamicAtExitDestructor(const VarDecl *D,
-                                             raw_ostream &Out);
-
-private:
-  void mangleInitFiniStub(const VarDecl *D, raw_ostream &Out, char CharCode);
-};
-
 }
 
 bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
@@ -267,6 +325,13 @@
   return true;
 }
 
+bool
+MicrosoftMangleContextImpl::shouldMangleStringLiteral(const StringLiteral *SL) {
+  return SL->isAscii() || SL->isWide();
+  // TODO: This needs to be updated when MSVC gains support for Unicode
+  // literals.
+}
+
 void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
                                      StringRef Prefix) {
   // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
@@ -324,7 +389,7 @@
   //                 ::= 2  # public static member
   //                 ::= 3  # global
   //                 ::= 4  # static local
-  
+
   // The first character in the encoding (after the name) is the storage class.
   if (VD->isStaticDataMember()) {
     // If it's a static member, it also encodes the access level.
@@ -345,12 +410,12 @@
   // Pointers and references are odd. The type of 'int * const foo;' gets
   // mangled as 'QAHA' instead of 'PAHB', for example.
   TypeLoc TL = VD->getTypeSourceInfo()->getTypeLoc();
-  QualType Ty = TL.getType();
+  QualType Ty = VD->getType();
   if (Ty->isPointerType() || Ty->isReferenceType() ||
       Ty->isMemberPointerType()) {
     mangleType(Ty, TL.getSourceRange(), QMM_Drop);
-    if (PointersAre64Bit)
-      Out << 'E';
+    manglePointerExtQualifiers(
+        Ty.getDesugaredType(getASTContext()).getLocalQualifiers(), 0);
     if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) {
       mangleQualifiers(MPT->getPointeeType().getQualifiers(), true);
       // Member pointers are suffixed with a back reference to the member
@@ -371,20 +436,130 @@
   }
 }
 
+void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
+                                                      const ValueDecl *VD) {
+  // <member-data-pointer> ::= <integer-literal>
+  //                       ::= $F <number> <number>
+  //                       ::= $G <number> <number> <number>
+
+  int64_t FieldOffset;
+  int64_t VBTableOffset;
+  MSInheritanceAttr::Spelling IM = RD->getMSInheritanceModel();
+  if (VD) {
+    FieldOffset = getASTContext().getFieldOffset(VD);
+    assert(FieldOffset % getASTContext().getCharWidth() == 0 &&
+           "cannot take address of bitfield");
+    FieldOffset /= getASTContext().getCharWidth();
+
+    VBTableOffset = 0;
+  } else {
+    FieldOffset = RD->nullFieldOffsetIsZero() ? 0 : -1;
+
+    VBTableOffset = -1;
+  }
+
+  char Code = '\0';
+  switch (IM) {
+  case MSInheritanceAttr::Keyword_single_inheritance:      Code = '0'; break;
+  case MSInheritanceAttr::Keyword_multiple_inheritance:    Code = '0'; break;
+  case MSInheritanceAttr::Keyword_virtual_inheritance:     Code = 'F'; break;
+  case MSInheritanceAttr::Keyword_unspecified_inheritance: Code = 'G'; break;
+  }
+
+  Out << '$' << Code;
+
+  mangleNumber(FieldOffset);
+
+  if (MSInheritanceAttr::hasVBPtrOffsetField(IM))
+    mangleNumber(0);
+  if (MSInheritanceAttr::hasVBTableOffsetField(IM))
+    mangleNumber(VBTableOffset);
+}
+
+void
+MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
+                                                     const CXXMethodDecl *MD) {
+  // <member-function-pointer> ::= $1? <name>
+  //                           ::= $H? <name> <number>
+  //                           ::= $I? <name> <number> <number>
+  //                           ::= $J? <name> <number> <number> <number>
+  //                           ::= $0A@
+
+  MSInheritanceAttr::Spelling IM = RD->getMSInheritanceModel();
+
+  // The null member function pointer is $0A@ in function templates and crashes
+  // MSVC when used in class templates, so we don't know what they really look
+  // like.
+  if (!MD) {
+    Out << "$0A@";
+    return;
+  }
+
+  char Code = '\0';
+  switch (IM) {
+  case MSInheritanceAttr::Keyword_single_inheritance:      Code = '1'; break;
+  case MSInheritanceAttr::Keyword_multiple_inheritance:    Code = 'H'; break;
+  case MSInheritanceAttr::Keyword_virtual_inheritance:     Code = 'I'; break;
+  case MSInheritanceAttr::Keyword_unspecified_inheritance: Code = 'J'; break;
+  }
+
+  Out << '$' << Code << '?';
+
+  // If non-virtual, mangle the name.  If virtual, mangle as a virtual memptr
+  // thunk.
+  uint64_t NVOffset = 0;
+  uint64_t VBTableOffset = 0;
+  if (MD->isVirtual()) {
+    MicrosoftVTableContext *VTContext =
+        cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
+    const MicrosoftVTableContext::MethodVFTableLocation &ML =
+        VTContext->getMethodVFTableLocation(GlobalDecl(MD));
+    mangleVirtualMemPtrThunk(MD, ML);
+    NVOffset = ML.VFPtrOffset.getQuantity();
+    VBTableOffset = ML.VBTableIndex * 4;
+    if (ML.VBase) {
+      DiagnosticsEngine &Diags = Context.getDiags();
+      unsigned DiagID = Diags.getCustomDiagID(
+          DiagnosticsEngine::Error,
+          "cannot mangle pointers to member functions from virtual bases");
+      Diags.Report(MD->getLocation(), DiagID);
+    }
+  } else {
+    mangleName(MD);
+    mangleFunctionEncoding(MD);
+  }
+
+  if (MSInheritanceAttr::hasNVOffsetField(/*IsMemberFunction=*/true, IM))
+    mangleNumber(NVOffset);
+  if (MSInheritanceAttr::hasVBPtrOffsetField(IM))
+    mangleNumber(0);
+  if (MSInheritanceAttr::hasVBTableOffsetField(IM))
+    mangleNumber(VBTableOffset);
+}
+
+void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
+    const CXXMethodDecl *MD,
+    const MicrosoftVTableContext::MethodVFTableLocation &ML) {
+  // Get the vftable offset.
+  CharUnits PointerWidth = getASTContext().toCharUnitsFromBits(
+      getASTContext().getTargetInfo().getPointerWidth(0));
+  uint64_t OffsetInVFTable = ML.Index * PointerWidth.getQuantity();
+
+  Out << "?_9";
+  mangleName(MD->getParent());
+  Out << "$B";
+  mangleNumber(OffsetInVFTable);
+  Out << 'A';
+  Out << (PointersAre64Bit ? 'A' : 'E');
+}
+
 void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
   // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
-  const DeclContext *DC = ND->getDeclContext();
 
   // Always start with the unqualified name.
-  mangleUnqualifiedName(ND);    
+  mangleUnqualifiedName(ND);
 
-  // If this is an extern variable declared locally, the relevant DeclContext
-  // is that of the containing namespace, or the translation unit.
-  if (isa<FunctionDecl>(DC) && ND->hasLinkage())
-    while (!DC->isNamespace() && !DC->isTranslationUnit())
-      DC = DC->getParent();
-
-  manglePostfix(DC);
+  mangleNestedName(ND);
 
   // Terminate the whole name with an '@'.
   Out << '@';
@@ -438,6 +613,13 @@
     return Spec->getSpecializedTemplate();
   }
 
+  // Check if we have a variable template.
+  if (const VarTemplateSpecializationDecl *Spec =
+          dyn_cast<VarTemplateSpecializationDecl>(ND)) {
+    TemplateArgs = &Spec->getTemplateArgs();
+    return Spec->getSpecializedTemplate();
+  }
+
   return 0;
 }
 
@@ -461,7 +643,6 @@
       return;
     }
 
-    // We have a class template.
     // Here comes the tricky thing: if we need to mangle something like
     //   void foo(A::X<Y>, B::X<Y>),
     // the X<Y> part is aliased. However, if you need to mangle
@@ -506,17 +687,31 @@
         mangleSourceName(II->getName());
         break;
       }
-      
+
       // Otherwise, an anonymous entity.  We must have a declaration.
       assert(ND && "mangling empty name without declaration");
-      
+
       if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
         if (NS->isAnonymousNamespace()) {
           Out << "?A@";
           break;
         }
       }
-      
+
+      if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+        // We must have an anonymous union or struct declaration.
+        const CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl();
+        assert(RD && "expected variable decl to have a record type");
+        // Anonymous types with no tag or typedef get the name of their
+        // declarator mangled in.  If they have no declarator, number them with
+        // a $S prefix.
+        llvm::SmallString<64> Name("$S");
+        // Get a unique id for the anonymous struct.
+        Name += llvm::utostr(Context.getAnonymousStructId(RD) + 1);
+        mangleSourceName(Name.str());
+        break;
+      }
+
       // We must have an anonymous struct.
       const TagDecl *TD = cast<TagDecl>(ND);
       if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
@@ -528,26 +723,43 @@
         break;
       }
 
+      if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
+        if (Record->isLambda()) {
+          llvm::SmallString<10> Name("<lambda_");
+          unsigned LambdaId;
+          if (Record->getLambdaManglingNumber())
+            LambdaId = Record->getLambdaManglingNumber();
+          else
+            LambdaId = Context.getLambdaId(Record);
+
+          Name += llvm::utostr(LambdaId);
+          Name += ">";
+
+          mangleSourceName(Name);
+          break;
+        }
+      }
+
+      llvm::SmallString<64> Name("<unnamed-type-");
       if (TD->hasDeclaratorForAnonDecl()) {
         // Anonymous types with no tag or typedef get the name of their
-        // declarator mangled in.
-        llvm::SmallString<64> Name("<unnamed-type-");
+        // declarator mangled in if they have one.
         Name += TD->getDeclaratorForAnonDecl()->getName();
-        Name += ">";
-        mangleSourceName(Name.str());
       } else {
-        // Anonymous types with no tag, no typedef, or declarator get
-        // '<unnamed-tag>'.
-        mangleSourceName("<unnamed-tag>");
+        // Otherwise, number the types using a $S prefix.
+        Name += "$S";
+        Name += llvm::utostr(Context.getAnonymousStructId(TD));
       }
+      Name += ">";
+      mangleSourceName(Name.str());
       break;
     }
-      
+
     case DeclarationName::ObjCZeroArgSelector:
     case DeclarationName::ObjCOneArgSelector:
     case DeclarationName::ObjCMultiArgSelector:
       llvm_unreachable("Can't mangle Objective-C selector names here!");
-      
+
     case DeclarationName::CXXConstructorName:
       if (ND == Structor) {
         assert(StructorType == Ctor_Complete &&
@@ -555,7 +767,7 @@
       }
       Out << "?0";
       break;
-      
+
     case DeclarationName::CXXDestructorName:
       if (ND == Structor)
         // If the named decl is the C++ destructor we're mangling,
@@ -566,17 +778,17 @@
         // class with a destructor is declared within a destructor.
         mangleCXXDtorType(Dtor_Base);
       break;
-      
+
     case DeclarationName::CXXConversionFunctionName:
       // <operator-name> ::= ?B # (cast)
       // The target type is encoded as the return type.
       Out << "?B";
       break;
-      
+
     case DeclarationName::CXXOperatorName:
       mangleOperatorName(Name.getCXXOverloadedOperator(), ND->getLocation());
       break;
-      
+
     case DeclarationName::CXXLiteralOperatorName: {
       // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
       DiagnosticsEngine Diags = Context.getDiags();
@@ -585,51 +797,53 @@
       Diags.Report(ND->getLocation(), DiagID);
       break;
     }
-      
+
     case DeclarationName::CXXUsingDirective:
       llvm_unreachable("Can't mangle a using directive name!");
   }
 }
 
-void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
-                                            bool NoFunction) {
+void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
   // <postfix> ::= <unqualified-name> [<postfix>]
   //           ::= <substitution> [<postfix>]
+  if (isLambda(ND))
+    return;
 
-  if (!DC) return;
+  const DeclContext *DC = ND->getDeclContext();
 
-  while (isa<LinkageSpecDecl>(DC))
+  while (!DC->isTranslationUnit()) {
+    if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
+      unsigned Disc;
+      if (Context.getNextDiscriminator(ND, Disc)) {
+        Out << '?';
+        mangleNumber(Disc);
+        Out << '?';
+      }
+    }
+
+    if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+      DiagnosticsEngine Diags = Context.getDiags();
+      unsigned DiagID =
+          Diags.getCustomDiagID(DiagnosticsEngine::Error,
+                                "cannot mangle a local inside this block yet");
+      Diags.Report(BD->getLocation(), DiagID);
+
+      // FIXME: This is completely, utterly, wrong; see ItaniumMangle
+      // for how this should be done.
+      Out << "__block_invoke" << Context.getBlockId(BD, false);
+      Out << '@';
+      continue;
+    } else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
+      mangleObjCMethodName(Method);
+    } else if (isa<NamedDecl>(DC)) {
+      ND = cast<NamedDecl>(DC);
+      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+        mangle(FD, "?");
+        break;
+      } else
+        mangleUnqualifiedName(ND);
+    }
     DC = DC->getParent();
-
-  if (DC->isTranslationUnit())
-    return;
-
-  if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
-    DiagnosticsEngine Diags = Context.getDiags();
-    unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
-      "cannot mangle a local inside this block yet");
-    Diags.Report(BD->getLocation(), DiagID);
-
-    // FIXME: This is completely, utterly, wrong; see ItaniumMangle
-    // for how this should be done.
-    Out << "__block_invoke" << Context.getBlockId(BD, false);
-    Out << '@';
-    return manglePostfix(DC->getParent(), NoFunction);
-  } else if (isa<CapturedDecl>(DC)) {
-    // Skip CapturedDecl context.
-    manglePostfix(DC->getParent(), NoFunction);
-    return;
-  }
-
-  if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
-    return;
-  else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
-    mangleObjCMethodName(Method);
-  else if (const FunctionDecl *Func = dyn_cast<FunctionDecl>(DC))
-    mangleLocalName(Func);
-  else {
-    mangleUnqualifiedName(cast<NamedDecl>(DC));
-    manglePostfix(DC->getParent(), NoFunction);
   }
 }
 
@@ -768,7 +982,7 @@
   case OO_Array_New: Out << "?_U"; break;
   // <operator-name> ::= ?_V # delete[]
   case OO_Array_Delete: Out << "?_V"; break;
-    
+
   case OO_Conditional: {
     DiagnosticsEngine &Diags = Context.getDiags();
     unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -776,7 +990,7 @@
     Diags.Report(Loc, DiagID);
     break;
   }
-    
+
   case OO_None:
   case NUM_OVERLOADED_OPERATORS:
     llvm_unreachable("Not an overloaded operator");
@@ -803,44 +1017,6 @@
   Context.mangleObjCMethodName(MD, Out);
 }
 
-// Find out how many function decls live above this one and return an integer
-// suitable for use as the number in a numbered anonymous scope.
-// TODO: Memoize.
-static unsigned getLocalNestingLevel(const FunctionDecl *FD) {
-  const DeclContext *DC = FD->getParent();
-  int level = 1;
-
-  while (DC && !DC->isTranslationUnit()) {
-    if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) level++;
-    DC = DC->getParent();
-  }
-
-  return 2*level;
-}
-
-void MicrosoftCXXNameMangler::mangleLocalName(const FunctionDecl *FD) {
-  // <nested-name> ::= <numbered-anonymous-scope> ? <mangled-name>
-  // <numbered-anonymous-scope> ::= ? <number>
-  // Even though the name is rendered in reverse order (e.g.
-  // A::B::C is rendered as C@B@A), VC numbers the scopes from outermost to
-  // innermost. So a method bar in class C local to function foo gets mangled
-  // as something like:
-  // ?bar@C@?1??foo@@YAXXZ@QAEXXZ
-  // This is more apparent when you have a type nested inside a method of a
-  // type nested inside a function. A method baz in class D local to method
-  // bar of class C local to function foo gets mangled as:
-  // ?baz@D@?3??bar@C@?1??foo@@YAXXZ@QAEXXZ@QAEXXZ
-  // This scheme is general enough to support GCC-style nested
-  // functions. You could have a method baz of class C inside a function bar
-  // inside a function foo, like so:
-  // ?baz@C@?3??bar@?1??foo@@YAXXZ@YAXXZ@QAEXXZ
-  unsigned NestLevel = getLocalNestingLevel(FD);
-  Out << '?';
-  mangleNumber(NestLevel);
-  Out << '?';
-  mangle(FD, "?");
-}
-
 void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
                                                          const TemplateDecl *TD,
                      const TemplateArgumentList &TemplateArgs) {
@@ -925,20 +1101,25 @@
     << E->getStmtClassName() << E->getSourceRange();
 }
 
-void
-MicrosoftCXXNameMangler::mangleTemplateArgs(const TemplateDecl *TD,
-                                     const TemplateArgumentList &TemplateArgs) {
-  // <template-args> ::= {<type> | <integer-literal>}+ @
-  unsigned NumTemplateArgs = TemplateArgs.size();
-  for (unsigned i = 0; i < NumTemplateArgs; ++i) {
-    const TemplateArgument &TA = TemplateArgs[i];
+void MicrosoftCXXNameMangler::mangleTemplateArgs(
+    const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
+  // <template-args> ::= <template-arg>+ @
+  for (const TemplateArgument &TA : TemplateArgs.asArray())
     mangleTemplateArg(TD, TA);
-  }
   Out << '@';
 }
 
 void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
                                                 const TemplateArgument &TA) {
+  // <template-arg> ::= <type>
+  //                ::= <integer-literal>
+  //                ::= <member-data-pointer>
+  //                ::= <member-function-pointer>
+  //                ::= $E? <name> <type-encoding>
+  //                ::= $1? <name> <type-encoding>
+  //                ::= $0A@
+  //                ::= <template-args>
+
   switch (TA.getKind()) {
   case TemplateArgument::Null:
     llvm_unreachable("Can't mangle null template arguments!");
@@ -951,16 +1132,38 @@
   }
   case TemplateArgument::Declaration: {
     const NamedDecl *ND = cast<NamedDecl>(TA.getAsDecl());
-    mangle(ND, TA.isDeclForReferenceParam() ? "$E?" : "$1?");
+    if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) {
+      mangleMemberDataPointer(
+          cast<CXXRecordDecl>(ND->getDeclContext())->getMostRecentDecl(),
+          cast<ValueDecl>(ND));
+    } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+      const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+      if (MD && MD->isInstance())
+        mangleMemberFunctionPointer(MD->getParent()->getMostRecentDecl(), MD);
+      else
+        mangle(FD, "$1?");
+    } else {
+      mangle(ND, TA.isDeclForReferenceParam() ? "$E?" : "$1?");
+    }
     break;
   }
   case TemplateArgument::Integral:
     mangleIntegerLiteral(TA.getAsIntegral(),
                          TA.getIntegralType()->isBooleanType());
     break;
-  case TemplateArgument::NullPtr:
-    Out << "$0A@";
+  case TemplateArgument::NullPtr: {
+    QualType T = TA.getNullPtrType();
+    if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
+      const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+      if (MPT->isMemberFunctionPointerType())
+        mangleMemberFunctionPointer(RD, 0);
+      else
+        mangleMemberDataPointer(RD, 0);
+    } else {
+      Out << "$0A@";
+    }
     break;
+  }
   case TemplateArgument::Expression:
     mangleExpression(TA.getAsExpr());
     break;
@@ -1059,13 +1262,25 @@
   // FIXME: For now, just drop all extension qualifiers on the floor.
 }
 
-void MicrosoftCXXNameMangler::manglePointerQualifiers(Qualifiers Quals) {
-  // <pointer-cvr-qualifiers> ::= P  # no qualifiers
-  //                          ::= Q  # const
-  //                          ::= R  # volatile
-  //                          ::= S  # const volatile
+void
+MicrosoftCXXNameMangler::manglePointerExtQualifiers(Qualifiers Quals,
+                                                    const Type *PointeeType) {
+  bool HasRestrict = Quals.hasRestrict();
+  if (PointersAre64Bit && (!PointeeType || !PointeeType->isFunctionType()))
+    Out << 'E';
+
+  if (HasRestrict)
+    Out << 'I';
+}
+
+void MicrosoftCXXNameMangler::manglePointerCVQualifiers(Qualifiers Quals) {
+  // <pointer-cv-qualifiers> ::= P  # no qualifiers
+  //                         ::= Q  # const
+  //                         ::= R  # volatile
+  //                         ::= S  # const volatile
   bool HasConst = Quals.hasConst(),
        HasVolatile = Quals.hasVolatile();
+
   if (HasConst && HasVolatile) {
     Out << 'S';
   } else if (HasVolatile) {
@@ -1165,8 +1380,10 @@
   }
 
   // We have to mangle these now, while we still have enough information.
-  if (IsPointer)
-    manglePointerQualifiers(Quals);
+  if (IsPointer) {
+    manglePointerCVQualifiers(Quals);
+    manglePointerExtQualifiers(Quals, T->getPointeeType().getTypePtr());
+  }
   const Type *ty = T.getTypePtr();
 
   switch (ty->getTypeClass()) {
@@ -1254,7 +1471,7 @@
   case BuiltinType::OCLImage3d: Out << "PAUocl_image3d@@"; break;
   case BuiltinType::OCLSampler: Out << "PAUocl_sampler@@"; break;
   case BuiltinType::OCLEvent: Out << "PAUocl_event@@"; break;
- 
+
   case BuiltinType::NullPtr: Out << "$$T"; break;
 
   case BuiltinType::Char16:
@@ -1306,9 +1523,9 @@
   // If this is a C++ instance method, mangle the CVR qualifiers for the
   // this pointer.
   if (IsInstMethod) {
-    if (PointersAre64Bit)
-      Out << 'E';
-    mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
+    Qualifiers Quals = Qualifiers::fromCVRMask(Proto->getTypeQuals());
+    manglePointerExtQualifiers(Quals, 0);
+    mangleQualifiers(Quals, false);
   }
 
   mangleCallingConvention(T);
@@ -1327,23 +1544,30 @@
     }
     Out << '@';
   } else {
-    QualType ResultType = Proto->getResultType();
-    if (ResultType->isVoidType())
-      ResultType = ResultType.getUnqualifiedType();
-    mangleType(ResultType, Range, QMM_Result);
+    QualType ResultType = Proto->getReturnType();
+    if (const auto *AT =
+            dyn_cast_or_null<AutoType>(ResultType->getContainedAutoType())) {
+      Out << '?';
+      mangleQualifiers(ResultType.getLocalQualifiers(), /*IsMember=*/false);
+      Out << '?';
+      mangleSourceName(AT->isDecltypeAuto() ? "<decltype-auto>" : "<auto>");
+      Out << '@';
+    } else {
+      if (ResultType->isVoidType())
+        ResultType = ResultType.getUnqualifiedType();
+      mangleType(ResultType, Range, QMM_Result);
+    }
   }
 
   // <argument-list> ::= X # void
   //                 ::= <type>+ @
   //                 ::= <type>* Z # varargs
-  if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+  if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
     Out << 'X';
   } else {
     // Happens for function pointer type arguments for example.
-    for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
-         ArgEnd = Proto->arg_type_end();
-         Arg != ArgEnd; ++Arg)
-      mangleArgumentType(*Arg, Range);
+    for (const auto &Arg : Proto->param_types())
+      mangleArgumentType(Arg, Range);
     // <builtin-type>      ::= Z  # ellipsis
     if (Proto->isVariadic())
       Out << 'Z';
@@ -1465,7 +1689,7 @@
 // <union-type>  ::= T <name>
 // <struct-type> ::= U <name>
 // <class-type>  ::= V <name>
-// <enum-type>   ::= W <size> <name>
+// <enum-type>   ::= W4 <name>
 void MicrosoftCXXNameMangler::mangleType(const EnumType *T, SourceRange) {
   mangleType(cast<TagType>(T)->getDecl());
 }
@@ -1485,9 +1709,7 @@
       Out << 'V';
       break;
     case TTK_Enum:
-      Out << 'W';
-      Out << getASTContext().getTypeSizeInChars(
-                cast<EnumDecl>(TD)->getIntegerType()).getQuantity();
+      Out << "W4";
       break;
   }
   mangleName(TD);
@@ -1503,7 +1725,7 @@
 void MicrosoftCXXNameMangler::mangleDecayedArrayType(const ArrayType *T) {
   // This isn't a recursive mangling, so now we have to do it all in this
   // one call.
-  manglePointerQualifiers(T->getElementType().getQualifiers());
+  manglePointerCVQualifiers(T->getElementType().getQualifiers());
   mangleType(T->getElementType(), SourceRange());
 }
 void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T,
@@ -1575,8 +1797,6 @@
     mangleName(T->getClass()->castAs<RecordType>()->getDecl());
     mangleFunctionType(FPT, 0, true);
   } else {
-    if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
-      Out << 'E';
     mangleQualifiers(PointeeType.getQualifiers(), true);
     mangleName(T->getClass()->castAs<RecordType>()->getDecl());
     mangleType(PointeeType, Range, QMM_Drop);
@@ -1604,42 +1824,37 @@
 
 // <type> ::= <pointer-type>
 // <pointer-type> ::= E? <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
-//                       # the E is required for 64-bit non static pointers
+//                       # the E is required for 64-bit non-static pointers
 void MicrosoftCXXNameMangler::mangleType(const PointerType *T,
                                          SourceRange Range) {
   QualType PointeeTy = T->getPointeeType();
-  if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
-    Out << 'E';
   mangleType(PointeeTy, Range);
 }
 void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
                                          SourceRange Range) {
   // Object pointers never have qualifiers.
   Out << 'A';
-  if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
-    Out << 'E';
+  manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr());
   mangleType(T->getPointeeType(), Range);
 }
 
 // <type> ::= <reference-type>
 // <reference-type> ::= A E? <cvr-qualifiers> <type>
-//                 # the E is required for 64-bit non static lvalue references
+//                 # the E is required for 64-bit non-static lvalue references
 void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
                                          SourceRange Range) {
   Out << 'A';
-  if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
-    Out << 'E';
+  manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr());
   mangleType(T->getPointeeType(), Range);
 }
 
 // <type> ::= <r-value-reference-type>
 // <r-value-reference-type> ::= $$Q E? <cvr-qualifiers> <type>
-//                 # the E is required for 64-bit non static rvalue references
+//                 # the E is required for 64-bit non-static rvalue references
 void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T,
                                          SourceRange Range) {
   Out << "$$Q";
-  if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
-    Out << 'E';
+  manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr());
   mangleType(T->getPointeeType(), Range);
 }
 
@@ -1804,6 +2019,8 @@
 }
 
 void MicrosoftCXXNameMangler::mangleType(const AutoType *T, SourceRange Range) {
+  assert(T->getDeducedType().isNull() && "expecting a dependent type!");
+
   DiagnosticsEngine &Diags = Context.getDiags();
   unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
     "cannot mangle this 'auto' type yet");
@@ -1923,17 +2140,17 @@
   }
 }
 
-void MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(
-    const CXXMethodDecl *MD, uint64_t OffsetInVFTable, raw_ostream &Out) {
-  bool Is64Bit = getASTContext().getTargetInfo().getPointerWidth(0) == 64;
+void
+MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
+                                                     raw_ostream &Out) {
+  MicrosoftVTableContext *VTContext =
+      cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
+  const MicrosoftVTableContext::MethodVFTableLocation &ML =
+      VTContext->getMethodVFTableLocation(GlobalDecl(MD));
 
   MicrosoftCXXNameMangler Mangler(*this, Out);
-  Mangler.getStream() << "\01??_9";
-  Mangler.mangleName(MD->getParent());
-  Mangler.getStream() << "$B";
-  Mangler.mangleNumber(OffsetInVFTable);
-  Mangler.getStream() << "A";
-  Mangler.getStream() << (Is64Bit ? "A" : "E");
+  Mangler.getStream() << "\01?";
+  Mangler.mangleVirtualMemPtrThunk(MD, ML);
 }
 
 void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
@@ -2050,7 +2267,16 @@
 
 void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD,
                                                            raw_ostream &Out) {
-  // <guard-name> ::= ?_B <postfix> @51
+  // TODO: This is not correct, especially with respect to MSVC2013.  MSVC2013
+  // utilizes thread local variables to implement thread safe, re-entrant
+  // initialization for statics.  They no longer differentiate between an
+  // externally visible and non-externally visible static with respect to
+  // mangling, they all get $TSS <number>.
+  //
+  // N.B. This means that they can get more than 32 static variable guards in a
+  // scope.  It also means that they broke compatibility with their own ABI.
+
+  // <guard-name> ::= ?_B <postfix> @5 <scope-depth>
   //              ::= ?$S <guard-num> @ <postfix> @4IA
 
   // The first mangling is what MSVC uses to guard static locals in inline
@@ -2064,8 +2290,17 @@
   bool Visible = VD->isExternallyVisible();
   // <operator-name> ::= ?_B # local static guard
   Mangler.getStream() << (Visible ? "\01??_B" : "\01?$S1@");
-  Mangler.manglePostfix(VD->getDeclContext());
-  Mangler.getStream() << (Visible ? "@51" : "@4IA");
+  unsigned ScopeDepth = 0;
+  if (Visible && !getNextDiscriminator(VD, ScopeDepth))
+    // If we do not have a discriminator and are emitting a guard variable for
+    // use at global scope, then mangling the nested name will not be enough to
+    // remove ambiguities.
+    Mangler.mangle(VD, "");
+  else
+    Mangler.mangleNestedName(VD);
+  Mangler.getStream() << (Visible ? "@5" : "@4IA");
+  if (ScopeDepth)
+    Mangler.mangleNumber(ScopeDepth);
 }
 
 void MicrosoftMangleContextImpl::mangleInitFiniStub(const VarDecl *D,
@@ -2074,6 +2309,10 @@
   MicrosoftCXXNameMangler Mangler(*this, Out);
   Mangler.getStream() << "\01??__" << CharCode;
   Mangler.mangleName(D);
+  if (D->isStaticDataMember()) {
+    Mangler.mangleVariableEncoding(D);
+    Mangler.getStream() << '@';
+  }
   // This is the function class mangling.  These stubs are global, non-variadic,
   // cdecl functions that return void and take no args.
   Mangler.getStream() << "YAXXZ";
@@ -2092,6 +2331,172 @@
   mangleInitFiniStub(D, Out, 'F');
 }
 
+void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
+                                                     raw_ostream &Out) {
+  // <char-type> ::= 0   # char
+  //             ::= 1   # wchar_t
+  //             ::= ??? # char16_t/char32_t will need a mangling too...
+  //
+  // <literal-length> ::= <non-negative integer>  # the length of the literal
+  //
+  // <encoded-crc>    ::= <hex digit>+ @          # crc of the literal including
+  //                                              # null-terminator
+  //
+  // <encoded-string> ::= <simple character>           # uninteresting character
+  //                  ::= '?$' <hex digit> <hex digit> # these two nibbles
+  //                                                   # encode the byte for the
+  //                                                   # character
+  //                  ::= '?' [a-z]                    # \xe1 - \xfa
+  //                  ::= '?' [A-Z]                    # \xc1 - \xda
+  //                  ::= '?' [0-9]                    # [,/\:. \n\t'-]
+  //
+  // <literal> ::= '??_C@_' <char-type> <literal-length> <encoded-crc>
+  //               <encoded-string> '@'
+  MicrosoftCXXNameMangler Mangler(*this, Out);
+  Mangler.getStream() << "\01??_C@_";
+
+  // <char-type>: The "kind" of string literal is encoded into the mangled name.
+  // TODO: This needs to be updated when MSVC gains support for unicode
+  // literals.
+  if (SL->isAscii())
+    Mangler.getStream() << '0';
+  else if (SL->isWide())
+    Mangler.getStream() << '1';
+  else
+    llvm_unreachable("unexpected string literal kind!");
+
+  // <literal-length>: The next part of the mangled name consists of the length
+  // of the string.
+  // The StringLiteral does not consider the NUL terminator byte(s) but the
+  // mangling does.
+  // N.B. The length is in terms of bytes, not characters.
+  Mangler.mangleNumber(SL->getByteLength() + SL->getCharByteWidth());
+
+  // We will use the "Rocksoft^tm Model CRC Algorithm" to describe the
+  // properties of our CRC:
+  //   Width  : 32
+  //   Poly   : 04C11DB7
+  //   Init   : FFFFFFFF
+  //   RefIn  : True
+  //   RefOut : True
+  //   XorOut : 00000000
+  //   Check  : 340BC6D9
+  uint32_t CRC = 0xFFFFFFFFU;
+
+  auto UpdateCRC = [&CRC](char Byte) {
+    for (unsigned i = 0; i < 8; ++i) {
+      bool Bit = CRC & 0x80000000U;
+      if (Byte & (1U << i))
+        Bit = !Bit;
+      CRC <<= 1;
+      if (Bit)
+        CRC ^= 0x04C11DB7U;
+    }
+  };
+
+  auto GetLittleEndianByte = [&Mangler, &SL](unsigned Index) {
+    unsigned CharByteWidth = SL->getCharByteWidth();
+    uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
+    unsigned OffsetInCodeUnit = Index % CharByteWidth;
+    return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
+  };
+
+  auto GetBigEndianByte = [&Mangler, &SL](unsigned Index) {
+    unsigned CharByteWidth = SL->getCharByteWidth();
+    uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
+    unsigned OffsetInCodeUnit = (CharByteWidth - 1) - (Index % CharByteWidth);
+    return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
+  };
+
+  // CRC all the bytes of the StringLiteral.
+  for (unsigned I = 0, E = SL->getByteLength(); I != E; ++I)
+    UpdateCRC(GetLittleEndianByte(I));
+
+  // The NUL terminator byte(s) were not present earlier,
+  // we need to manually process those bytes into the CRC.
+  for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
+       ++NullTerminator)
+    UpdateCRC('\x00');
+
+  // The literature refers to the process of reversing the bits in the final CRC
+  // output as "reflection".
+  CRC = llvm::reverseBits(CRC);
+
+  // <encoded-crc>: The CRC is encoded utilizing the standard number mangling
+  // scheme.
+  Mangler.mangleNumber(CRC);
+
+  // <encoded-string>: The mangled name also contains the first 32 _characters_
+  // (including null-terminator bytes) of the StringLiteral.
+  // Each character is encoded by splitting them into bytes and then encoding
+  // the constituent bytes.
+  auto MangleByte = [&Mangler](char Byte) {
+    // There are five different manglings for characters:
+    // - [a-zA-Z0-9_$]: A one-to-one mapping.
+    // - ?[a-z]: The range from \xe1 to \xfa.
+    // - ?[A-Z]: The range from \xc1 to \xda.
+    // - ?[0-9]: The set of [,/\:. \n\t'-].
+    // - ?$XX: A fallback which maps nibbles.
+    if (isIdentifierBody(Byte, /*AllowDollar=*/true)) {
+      Mangler.getStream() << Byte;
+    } else if (isLetter(Byte & 0x7f)) {
+      Mangler.getStream() << '?' << static_cast<char>(Byte & 0x7f);
+    } else {
+      switch (Byte) {
+        case ',':
+          Mangler.getStream() << "?0";
+          break;
+        case '/':
+          Mangler.getStream() << "?1";
+          break;
+        case '\\':
+          Mangler.getStream() << "?2";
+          break;
+        case ':':
+          Mangler.getStream() << "?3";
+          break;
+        case '.':
+          Mangler.getStream() << "?4";
+          break;
+        case ' ':
+          Mangler.getStream() << "?5";
+          break;
+        case '\n':
+          Mangler.getStream() << "?6";
+          break;
+        case '\t':
+          Mangler.getStream() << "?7";
+          break;
+        case '\'':
+          Mangler.getStream() << "?8";
+          break;
+        case '-':
+          Mangler.getStream() << "?9";
+          break;
+        default:
+          Mangler.getStream() << "?$";
+          Mangler.getStream() << static_cast<char>('A' + ((Byte >> 4) & 0xf));
+          Mangler.getStream() << static_cast<char>('A' + (Byte & 0xf));
+          break;
+      }
+    }
+  };
+
+  // Enforce our 32 character max.
+  unsigned NumCharsToMangle = std::min(32U, SL->getLength());
+  for (unsigned I = 0, E = NumCharsToMangle * SL->getCharByteWidth(); I != E;
+       ++I)
+    MangleByte(GetBigEndianByte(I));
+
+  // Encode the NUL terminator if there is room.
+  if (NumCharsToMangle < 32)
+    for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
+         ++NullTerminator)
+      MangleByte(0);
+
+  Mangler.getStream() << '@';
+}
+
 MicrosoftMangleContext *
 MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
   return new MicrosoftMangleContextImpl(Context, Diags);
diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp
index 1fa7cea..24b129a 100644
--- a/lib/AST/RawCommentList.cpp
+++ b/lib/AST/RawCommentList.cpp
@@ -95,10 +95,9 @@
   unsigned BeginOffset;
   unsigned EndOffset;
 
-  llvm::tie(BeginFileID, BeginOffset) =
+  std::tie(BeginFileID, BeginOffset) =
       SourceMgr.getDecomposedLoc(Range.getBegin());
-  llvm::tie(EndFileID, EndOffset) =
-      SourceMgr.getDecomposedLoc(Range.getEnd());
+  std::tie(EndFileID, EndOffset) = SourceMgr.getDecomposedLoc(Range.getEnd());
 
   const unsigned Length = EndOffset - BeginOffset;
   if (Length < 2)
@@ -252,3 +251,15 @@
     Comments.push_back(new (Allocator) RawComment(RC));
   }
 }
+
+void RawCommentList::addDeserializedComments(ArrayRef<RawComment *> DeserializedComments) {
+  std::vector<RawComment *> MergedComments;
+  MergedComments.reserve(Comments.size() + DeserializedComments.size());
+
+  std::merge(Comments.begin(), Comments.end(),
+             DeserializedComments.begin(), DeserializedComments.end(),
+             std::back_inserter(MergedComments),
+             BeforeThanCompare<RawComment>(SourceMgr));
+  std::swap(Comments, MergedComments);
+}
+
diff --git a/lib/AST/RecordLayout.cpp b/lib/AST/RecordLayout.cpp
index 71e44ec..9d46cc6 100644
--- a/lib/AST/RecordLayout.cpp
+++ b/lib/AST/RecordLayout.cpp
@@ -29,10 +29,13 @@
 }
 
 ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
-                                 CharUnits alignment, CharUnits datasize,
+                                 CharUnits alignment, 
+                                 CharUnits requiredAlignment,
+                                 CharUnits datasize,
                                  const uint64_t *fieldoffsets,
                                  unsigned fieldcount)
-  : Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0),
+  : Size(size), DataSize(datasize), Alignment(alignment),
+    RequiredAlignment(requiredAlignment), FieldOffsets(0),
     FieldCount(fieldcount), CXXInfo(0) {
   if (FieldCount > 0)  {
     FieldOffsets = new (Ctx) uint64_t[FieldCount];
@@ -43,21 +46,24 @@
 // Constructor for C++ records.
 ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
                                  CharUnits size, CharUnits alignment,
+                                 CharUnits requiredAlignment,
                                  bool hasOwnVFPtr, bool hasExtendableVFPtr,
                                  CharUnits vbptroffset,
                                  CharUnits datasize,
                                  const uint64_t *fieldoffsets,
                                  unsigned fieldcount,
                                  CharUnits nonvirtualsize,
-                                 CharUnits nonvirtualalign,
+                                 CharUnits nonvirtualalignment,
                                  CharUnits SizeOfLargestEmptySubobject,
                                  const CXXRecordDecl *PrimaryBase,
                                  bool IsPrimaryBaseVirtual,
                                  const CXXRecordDecl *BaseSharingVBPtr,
-                                 bool AlignAfterVBases,
+                                 bool HasZeroSizedSubObject,
+                                 bool LeadsWithZeroSizedBase,
                                  const BaseOffsetsMapTy& BaseOffsets,
                                  const VBaseOffsetsMapTy& VBaseOffsets)
-  : Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0),
+  : Size(size), DataSize(datasize), Alignment(alignment),
+    RequiredAlignment(requiredAlignment), FieldOffsets(0),
     FieldCount(fieldcount), CXXInfo(new (Ctx) CXXRecordLayoutInfo)
 {
   if (FieldCount > 0)  {
@@ -68,7 +74,7 @@
   CXXInfo->PrimaryBase.setPointer(PrimaryBase);
   CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual);
   CXXInfo->NonVirtualSize = nonvirtualsize;
-  CXXInfo->NonVirtualAlign = nonvirtualalign;
+  CXXInfo->NonVirtualAlignment = nonvirtualalignment;
   CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
   CXXInfo->BaseOffsets = BaseOffsets;
   CXXInfo->VBaseOffsets = VBaseOffsets;
@@ -76,7 +82,8 @@
   CXXInfo->VBPtrOffset = vbptroffset;
   CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr;
   CXXInfo->BaseSharingVBPtr = BaseSharingVBPtr;
-  CXXInfo->AlignAfterVBases = AlignAfterVBases;
+  CXXInfo->HasZeroSizedSubObject = HasZeroSizedSubObject;
+  CXXInfo->LeadsWithZeroSizedBase = LeadsWithZeroSizedBase;
 
 
 #ifndef NDEBUG
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 4390e66..ce55d2e 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -140,10 +140,9 @@
 
 void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
   // Check the bases.
-  for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
-       E = Class->bases_end(); I != E; ++I) {
+  for (const auto &I : Class->bases()) {
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     CharUnits EmptySize;
     const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
@@ -160,9 +159,7 @@
   }
 
   // Check the fields.
-  for (CXXRecordDecl::field_iterator I = Class->field_begin(),
-       E = Class->field_end(); I != E; ++I) {
-
+  for (const auto *I : Class->fields()) {
     const RecordType *RT =
       Context.getBaseElementType(I->getType())->getAs<RecordType>();
 
@@ -348,13 +345,12 @@
   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
 
   // Traverse all non-virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    if (I->isVirtual())
+  for (const auto &I : RD->bases()) {
+    if (I.isVirtual())
       continue;
 
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
     if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
@@ -363,10 +359,9 @@
 
   if (RD == Class) {
     // This is the most derived class, traverse virtual bases as well.
-    for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
-         E = RD->vbases_end(); I != E; ++I) {
+    for (const auto &I : RD->vbases()) {
       const CXXRecordDecl *VBaseDecl =
-        cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+        cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
       
       CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
       if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
@@ -460,13 +455,12 @@
   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
 
   // Traverse all non-virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    if (I->isVirtual())
+  for (const auto &I : RD->bases()) {
+    if (I.isVirtual())
       continue;
 
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
     UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
@@ -474,10 +468,9 @@
 
   if (RD == Class) {
     // This is the most derived class, traverse virtual bases as well.
-    for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
-         E = RD->vbases_end(); I != E; ++I) {
+    for (const auto &I : RD->vbases()) {
       const CXXRecordDecl *VBaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
       
       CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
       UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
@@ -783,16 +776,15 @@
 
 void
 RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-         E = RD->bases_end(); I != E; ++I) {
-    assert(!I->getType()->isDependentType() &&
+  for (const auto &I : RD->bases()) {
+    assert(!I.getType()->isDependentType() &&
            "Cannot layout class with dependent bases.");
 
     const CXXRecordDecl *Base =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     // Check if this is a nearly empty virtual base.
-    if (I->isVirtual() && Context.isNearlyEmpty(Base)) {
+    if (I.isVirtual() && Context.isNearlyEmpty(Base)) {
       // If it's not an indirect primary base, then we've found our primary
       // base.
       if (!IndirectPrimaryBases.count(Base)) {
@@ -825,14 +817,13 @@
   // If the record has a dynamic base class, attempt to choose a primary base
   // class. It is the first (in direct base class order) non-virtual dynamic
   // base class, if one exists.
-  for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
-         e = RD->bases_end(); i != e; ++i) {
+  for (const auto &I : RD->bases()) {
     // Ignore virtual bases.
-    if (i->isVirtual())
+    if (I.isVirtual())
       continue;
 
     const CXXRecordDecl *Base =
-      cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     if (Base->isDynamicClass()) {
       // We found it.
@@ -918,12 +909,11 @@
   }
 
   // Now go through all direct bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    bool IsVirtual = I->isVirtual();
+  for (const auto &I : RD->bases()) {
+    bool IsVirtual = I.isVirtual();
     
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
     
     Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info));
   }
@@ -944,12 +934,11 @@
 }
 
 void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    bool IsVirtual = I->isVirtual();
+  for (const auto &I : RD->bases()) {
+    bool IsVirtual = I.isVirtual();
 
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
     
     // Compute the base subobject info for this base.
     BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, 0);
@@ -1033,15 +1022,13 @@
   }
 
   // Now lay out the non-virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-         E = RD->bases_end(); I != E; ++I) {
+  for (const auto &I : RD->bases()) {
 
     // Ignore virtual bases.
-    if (I->isVirtual())
+    if (I.isVirtual())
       continue;
 
-    const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+    const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
 
     // Skip the primary base, because we've already laid it out.  The
     // !PrimaryBaseIsVirtual check is required because we might have a
@@ -1118,15 +1105,13 @@
     PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
   }
 
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-         E = RD->bases_end(); I != E; ++I) {
-    assert(!I->getType()->isDependentType() &&
+  for (const auto &I : RD->bases()) {
+    assert(!I.getType()->isDependentType() &&
            "Cannot layout class with dependent bases.");
 
-    const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+    const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
 
-    if (I->isVirtual()) {
+    if (I.isVirtual()) {
       if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
         bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl);
 
@@ -1191,7 +1176,7 @@
     }
   }
   
-  CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlign();
+  CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlignment();
   CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
  
   // If we have an empty base class, try to place it at offset 0.
@@ -1326,22 +1311,20 @@
 
 #ifndef NDEBUG
   // Check that we have base offsets for all bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    if (I->isVirtual())
+  for (const auto &I : RD->bases()) {
+    if (I.isVirtual())
       continue;
 
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     assert(Bases.count(BaseDecl) && "Did not find base offset!");
   }
 
   // And all virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
-       E = RD->vbases_end(); I != E; ++I) {
+  for (const auto &I : RD->vbases()) {
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     assert(VBases.count(BaseDecl) && "Did not find base offset!");
   }
@@ -1374,9 +1357,8 @@
 void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
   // Layout each field, for now, just sequentially, respecting alignment.  In
   // the future, this will need to be tweakable by targets.
-  for (RecordDecl::field_iterator Field = D->field_begin(),
-       FieldEnd = D->field_end(); Field != FieldEnd; ++Field)
-    LayoutField(*Field);
+  for (const auto *Field : D->fields())
+    LayoutField(Field);
 }
 
 void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
@@ -1452,127 +1434,224 @@
   uint64_t TypeSize = FieldInfo.first;
   unsigned FieldAlign = FieldInfo.second;
 
+  // UnfilledBitsInLastUnit is the difference between the end of the
+  // last allocated bitfield (i.e. the first bit offset available for
+  // bitfields) and the end of the current data size in bits (i.e. the
+  // first bit offset available for non-bitfields).  The current data
+  // size in bits is always a multiple of the char size; additionally,
+  // for ms_struct records it's also a multiple of the
+  // LastBitfieldTypeSize (if set).
+
+  // The struct-layout algorithm is dictated by the platform ABI,
+  // which in principle could use almost any rules it likes.  In
+  // practice, UNIXy targets tend to inherit the algorithm described
+  // in the System V generic ABI.  The basic bitfield layout rule in
+  // System V is to place bitfields at the next available bit offset
+  // where the entire bitfield would fit in an aligned storage unit of
+  // the declared type; it's okay if an earlier or later non-bitfield
+  // is allocated in the same storage unit.  However, some targets
+  // (those that !useBitFieldTypeAlignment(), e.g. ARM APCS) don't
+  // require this storage unit to be aligned, and therefore always put
+  // the bitfield at the next available bit offset.
+
+  // ms_struct basically requests a complete replacement of the
+  // platform ABI's struct-layout algorithm, with the high-level goal
+  // of duplicating MSVC's layout.  For non-bitfields, this follows
+  // the the standard algorithm.  The basic bitfield layout rule is to
+  // allocate an entire unit of the bitfield's declared type
+  // (e.g. 'unsigned long'), then parcel it up among successive
+  // bitfields whose declared types have the same size, making a new
+  // unit as soon as the last can no longer store the whole value.
+  // Since it completely replaces the platform ABI's algorithm,
+  // settings like !useBitFieldTypeAlignment() do not apply.
+
+  // A zero-width bitfield forces the use of a new storage unit for
+  // later bitfields.  In general, this occurs by rounding up the
+  // current size of the struct as if the algorithm were about to
+  // place a non-bitfield of the field's formal type.  Usually this
+  // does not change the alignment of the struct itself, but it does
+  // on some targets (those that useZeroLengthBitfieldAlignment(),
+  // e.g. ARM).  In ms_struct layout, zero-width bitfields are
+  // ignored unless they follow a non-zero-width bitfield.
+
+  // A field alignment restriction (e.g. from #pragma pack) or
+  // specification (e.g. from __attribute__((aligned))) changes the
+  // formal alignment of the field.  For System V, this alters the
+  // required alignment of the notional storage unit that must contain
+  // the bitfield.  For ms_struct, this only affects the placement of
+  // new storage units.  In both cases, the effect of #pragma pack is
+  // ignored on zero-width bitfields.
+
+  // On System V, a packed field (e.g. from #pragma pack or
+  // __attribute__((packed))) always uses the next available bit
+  // offset.
+
+  // In an ms_struct struct, the alignment of a fundamental type is
+  // always equal to its size.  This is necessary in order to mimic
+  // the i386 alignment rules on targets which might not fully align
+  // all types (e.g. Darwin PPC32, where alignof(long long) == 4).
+
+  // First, some simple bookkeeping to perform for ms_struct structs.
   if (IsMsStruct) {
-    // The field alignment for integer types in ms_struct structs is
-    // always the size.
+    // The field alignment for integer types is always the size.
     FieldAlign = TypeSize;
-    // Ignore zero-length bitfields after non-bitfields in ms_struct structs.
-    if (!FieldSize && !LastBitfieldTypeSize)
-      FieldAlign = 1;
-    // If a bitfield is followed by a bitfield of a different size, don't
-    // pack the bits together in ms_struct structs.
+
+    // If the previous field was not a bitfield, or was a bitfield
+    // with a different storage unit size, we're done with that
+    // storage unit.
     if (LastBitfieldTypeSize != TypeSize) {
+      // Also, ignore zero-length bitfields after non-bitfields.
+      if (!LastBitfieldTypeSize && !FieldSize)
+        FieldAlign = 1;
+
       UnfilledBitsInLastUnit = 0;
       LastBitfieldTypeSize = 0;
     }
   }
 
-  uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
-  uint64_t FieldOffset = IsUnion ? 0 : UnpaddedFieldOffset;
-
-  bool ZeroLengthBitfield = false;
-  if (!Context.getTargetInfo().useBitFieldTypeAlignment() &&
-      Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
-      FieldSize == 0) {
-    // The alignment of a zero-length bitfield affects the alignment
-    // of the next member.  The alignment is the max of the zero 
-    // length bitfield's alignment and a target specific fixed value.
-    ZeroLengthBitfield = true;
-    unsigned ZeroLengthBitfieldBoundary =
-      Context.getTargetInfo().getZeroLengthBitfieldBoundary();
-    if (ZeroLengthBitfieldBoundary > FieldAlign)
-      FieldAlign = ZeroLengthBitfieldBoundary;
-  }
-
+  // If the field is wider than its declared type, it follows
+  // different rules in all cases.
   if (FieldSize > TypeSize) {
     LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D);
     return;
   }
 
-  // The align if the field is not packed. This is to check if the attribute
-  // was unnecessary (-Wpacked).
+  // Compute the next available bit offset.
+  uint64_t FieldOffset =
+    IsUnion ? 0 : (getDataSizeInBits() - UnfilledBitsInLastUnit);
+
+  // Handle targets that don't honor bitfield type alignment.
+  if (!IsMsStruct && !Context.getTargetInfo().useBitFieldTypeAlignment()) {
+    // Some such targets do honor it on zero-width bitfields.
+    if (FieldSize == 0 &&
+        Context.getTargetInfo().useZeroLengthBitfieldAlignment()) {
+      // The alignment to round up to is the max of the field's natural
+      // alignment and a target-specific fixed value (sometimes zero).
+      unsigned ZeroLengthBitfieldBoundary =
+        Context.getTargetInfo().getZeroLengthBitfieldBoundary();
+      FieldAlign = std::max(FieldAlign, ZeroLengthBitfieldBoundary);
+
+    // If that doesn't apply, just ignore the field alignment.
+    } else {
+      FieldAlign = 1;
+    }
+  }
+
+  // Remember the alignment we would have used if the field were not packed.
   unsigned UnpackedFieldAlign = FieldAlign;
-  uint64_t UnpackedFieldOffset = FieldOffset;
-  if (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield)
-    UnpackedFieldAlign = 1;
 
-  if (FieldPacked || 
-      (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield))
+  // Ignore the field alignment if the field is packed unless it has zero-size.
+  if (!IsMsStruct && FieldPacked && FieldSize != 0)
     FieldAlign = 1;
-  FieldAlign = std::max(FieldAlign, D->getMaxAlignment());
-  UnpackedFieldAlign = std::max(UnpackedFieldAlign, D->getMaxAlignment());
 
-  // The maximum field alignment overrides the aligned attribute.
-  if (!MaxFieldAlignment.isZero() && FieldSize != 0) {
+  // But, if there's an 'aligned' attribute on the field, honor that.
+  if (unsigned ExplicitFieldAlign = D->getMaxAlignment()) {
+    FieldAlign = std::max(FieldAlign, ExplicitFieldAlign);
+    UnpackedFieldAlign = std::max(UnpackedFieldAlign, ExplicitFieldAlign);
+  }
+
+  // But, if there's a #pragma pack in play, that takes precedent over
+  // even the 'aligned' attribute, for non-zero-width bitfields.
+  if (!MaxFieldAlignment.isZero() && FieldSize) {
     unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment);
     FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
     UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
   }
 
-  // ms_struct bitfields always have to start at a round alignment.
-  if (IsMsStruct && !LastBitfieldTypeSize) {
-    FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
-    UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
-                                                   UnpackedFieldAlign);
+  // For purposes of diagnostics, we're going to simultaneously
+  // compute the field offsets that we would have used if we weren't
+  // adding any alignment padding or if the field weren't packed.
+  uint64_t UnpaddedFieldOffset = FieldOffset;
+  uint64_t UnpackedFieldOffset = FieldOffset;
+
+  // Check if we need to add padding to fit the bitfield within an
+  // allocation unit with the right size and alignment.  The rules are
+  // somewhat different here for ms_struct structs.
+  if (IsMsStruct) {
+    // If it's not a zero-width bitfield, and we can fit the bitfield
+    // into the active storage unit (and we haven't already decided to
+    // start a new storage unit), just do so, regardless of any other
+    // other consideration.  Otherwise, round up to the right alignment.
+    if (FieldSize == 0 || FieldSize > UnfilledBitsInLastUnit) {
+      FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+      UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+                                                     UnpackedFieldAlign);
+      UnfilledBitsInLastUnit = 0;
+    }
+
+  } else {
+    // #pragma pack, with any value, suppresses the insertion of padding.
+    bool AllowPadding = MaxFieldAlignment.isZero();
+
+    // Compute the real offset.
+    if (FieldSize == 0 || 
+        (AllowPadding &&
+         (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)) {
+      FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+    }
+
+    // Repeat the computation for diagnostic purposes.
+    if (FieldSize == 0 ||
+        (AllowPadding &&
+         (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
+      UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+                                                     UnpackedFieldAlign);
   }
 
-  // Check if we need to add padding to give the field the correct alignment.
-  if (FieldSize == 0 || 
-      (MaxFieldAlignment.isZero() &&
-       (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize))
-    FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
-
-  if (FieldSize == 0 ||
-      (MaxFieldAlignment.isZero() &&
-       (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
-    UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
-                                                   UnpackedFieldAlign);
-
-  // Padding members don't affect overall alignment, unless zero length bitfield
-  // alignment is enabled.
-  if (!D->getIdentifier() &&
-      !Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
-      !IsMsStruct)
-    FieldAlign = UnpackedFieldAlign = 1;
-
+  // If we're using external layout, give the external layout a chance
+  // to override this information.
   if (ExternalLayout)
     FieldOffset = updateExternalFieldOffset(D, FieldOffset);
 
-  // Place this field at the current location.
+  // Okay, place the bitfield at the calculated offset.
   FieldOffsets.push_back(FieldOffset);
 
+  // Bookkeeping:
+
+  // Anonymous members don't affect the overall record alignment,
+  // except on targets where they do.
+  if (!IsMsStruct &&
+      !Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
+      !D->getIdentifier())
+    FieldAlign = UnpackedFieldAlign = 1;
+
+  // Diagnose differences in layout due to padding or packing.
   if (!ExternalLayout)
     CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
                       UnpackedFieldAlign, FieldPacked, D);
 
   // Update DataSize to include the last byte containing (part of) the bitfield.
+
+  // For unions, this is just a max operation, as usual.
   if (IsUnion) {
     // FIXME: I think FieldSize should be TypeSize here.
     setDataSize(std::max(getDataSizeInBits(), FieldSize));
-  } else {
-    if (IsMsStruct && FieldSize) {
-      // Under ms_struct, a bitfield always takes up space equal to the size
-      // of the type.  We can't just change the alignment computation on the
-      // other codepath because of the way this interacts with #pragma pack:
-      // in a packed struct, we need to allocate misaligned space in the
-      // struct to hold the bitfield.
-      if (!UnfilledBitsInLastUnit) {
-        setDataSize(FieldOffset + TypeSize);
-        UnfilledBitsInLastUnit = TypeSize - FieldSize;
-      } else if (UnfilledBitsInLastUnit < FieldSize) {
-        setDataSize(getDataSizeInBits() + TypeSize);
-        UnfilledBitsInLastUnit = TypeSize - FieldSize;
-      } else {
-        UnfilledBitsInLastUnit -= FieldSize;
-      }
-      LastBitfieldTypeSize = TypeSize;
-    } else {
-      uint64_t NewSizeInBits = FieldOffset + FieldSize;
-      uint64_t BitfieldAlignment = Context.getTargetInfo().getCharAlign();
-      setDataSize(llvm::RoundUpToAlignment(NewSizeInBits, BitfieldAlignment));
-      UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits;
-      LastBitfieldTypeSize = 0;
+
+  // For non-zero-width bitfields in ms_struct structs, allocate a new
+  // storage unit if necessary.
+  } else if (IsMsStruct && FieldSize) {
+    // We should have cleared UnfilledBitsInLastUnit in every case
+    // where we changed storage units.
+    if (!UnfilledBitsInLastUnit) {
+      setDataSize(FieldOffset + TypeSize);
+      UnfilledBitsInLastUnit = TypeSize;
     }
+    UnfilledBitsInLastUnit -= FieldSize;
+    LastBitfieldTypeSize = TypeSize;
+
+  // Otherwise, bump the data size up to include the bitfield,
+  // including padding up to char alignment, and then remember how
+  // bits we didn't use.
+  } else {
+    uint64_t NewSizeInBits = FieldOffset + FieldSize;
+    uint64_t CharAlignment = Context.getTargetInfo().getCharAlign();
+    setDataSize(llvm::RoundUpToAlignment(NewSizeInBits, CharAlignment));
+    UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits;
+
+    // The only time we can get here for an ms_struct is if this is a
+    // zero-width bitfield, which doesn't count as anything for the
+    // purposes of unfilled bits.
+    LastBitfieldTypeSize = 0;
   }
 
   // Update the size.
@@ -1878,20 +1957,18 @@
   if (!RD->isExternallyVisible())
     return 0;
 
-  // Template instantiations don't have key functions,see Itanium C++ ABI 5.2.6.
+  // Template instantiations don't have key functions per Itanium C++ ABI 5.2.6.
   // Same behavior as GCC.
   TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
   if (TSK == TSK_ImplicitInstantiation ||
+      TSK == TSK_ExplicitInstantiationDeclaration ||
       TSK == TSK_ExplicitInstantiationDefinition)
     return 0;
 
   bool allowInlineFunctions =
     Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline();
 
-  for (CXXRecordDecl::method_iterator I = RD->method_begin(),
-         E = RD->method_end(); I != E; ++I) {
-    const CXXMethodDecl *MD = *I;
-
+  for (const auto *MD : RD->methods()) {
     if (!MD->isVirtual())
       continue;
 
@@ -1985,7 +2062,7 @@
 //
 // * The alignment of bitfields in unions is ignored when computing the
 //   alignment of the union.
-// * The existance of zero-width bitfield that occurs after anything other than
+// * The existence of zero-width bitfield that occurs after anything other than
 //   a non-zero length bitfield is ignored.
 // * The Itanium equivalent vtable pointers are split into a vfptr (virtual
 //   function pointer) and a vbptr (virtual base pointer).  They can each be
@@ -2014,15 +2091,32 @@
 //   one.
 // * The last zero size virtual base may be placed at the end of the struct.
 //   and can potentially alias a zero sized type in the next struct.
-// * If the last field is a non-zero length bitfield and we have any virtual
-//   bases then some extra padding is added before the virtual bases for no
-//   obvious reason.
+// * If the last field is a non-zero length bitfield, all virtual bases will
+//   have extra padding added before them for no obvious reason.  The padding
+//   has the same number of bits as the type of the bitfield.
 // * When laying out empty non-virtual bases, an extra byte of padding is added
 //   if the non-virtual base before the empty non-virtual base has a vbptr.
-
+// * The ABI attempts to avoid aliasing of zero sized bases by adding padding
+//   between bases or vbases with specific properties.  The criteria for
+//   additional padding between two bases is that the first base is zero sized
+//   or has a zero sized subobject and the second base is zero sized or leads
+//   with a zero sized base (sharing of vfptrs can reorder the layout of the
+//   so the leading base is not always the first one declared).  The padding
+//   added for bases is 1 byte.  The padding added for vbases depends on the
+//   alignment of the object but is at least 4 bytes (in both 32 and 64 bit
+//   modes).
+// * There is no concept of non-virtual alignment or any distinction between
+//   data size and non-virtual size.
+// * __declspec(align) on bitfields has the effect of changing the bitfield's
+//   alignment instead of its required alignment.  This has implications on how
+//   it interacts with pragam pack.
 
 namespace {
 struct MicrosoftRecordLayoutBuilder {
+  struct ElementInfo {
+    CharUnits Size;
+    CharUnits Alignment;
+  };
   typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
   MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {}
 private:
@@ -2030,19 +2124,20 @@
   LLVM_DELETED_FUNCTION;
   void operator=(const MicrosoftRecordLayoutBuilder &) LLVM_DELETED_FUNCTION;
 public:
-
   void layout(const RecordDecl *RD);
   void cxxLayout(const CXXRecordDecl *RD);
   /// \brief Initializes size and alignment and honors some flags.
   void initializeLayout(const RecordDecl *RD);
   /// \brief Initialized C++ layout, compute alignment and virtual alignment and
-  /// existance of vfptrs and vbptrs.  Alignment is needed before the vfptr is
+  /// existence of vfptrs and vbptrs.  Alignment is needed before the vfptr is
   /// laid out.
   void initializeCXXLayout(const CXXRecordDecl *RD);
-  void layoutVFPtr(const CXXRecordDecl *RD);
   void layoutNonVirtualBases(const CXXRecordDecl *RD);
-  void layoutNonVirtualBase(const CXXRecordDecl *RD);
-  void layoutVBPtr(const CXXRecordDecl *RD);
+  void layoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+                            const ASTRecordLayout &BaseLayout,
+                            const ASTRecordLayout *&PreviousBaseLayout);
+  void injectVFPtr(const CXXRecordDecl *RD);
+  void injectVBPtr(const CXXRecordDecl *RD);
   /// \brief Lays out the fields of the record.  Also rounds size up to
   /// alignment.
   void layoutFields(const RecordDecl *RD);
@@ -2052,22 +2147,15 @@
   /// special cases associated with zero-width bit-fields.
   void layoutZeroWidthBitField(const FieldDecl *FD);
   void layoutVirtualBases(const CXXRecordDecl *RD);
-  void layoutVirtualBase(const CXXRecordDecl *RD, bool HasVtordisp);
-  /// \brief Flushes the lazy virtual base and conditionally rounds up to
-  /// alignment.
-  void finalizeCXXLayout(const CXXRecordDecl *RD);
-  void honorDeclspecAlign(const RecordDecl *RD);
-
-  /// \brief Updates the alignment of the type.  This function doesn't take any
-  /// properties (such as packedness) into account.  getAdjustedFieldInfo()
-  /// adjustes for packedness.
-  void updateAlignment(CharUnits NewAlignment) {
-    Alignment = std::max(Alignment, NewAlignment);
-  }
-  /// \brief Gets the size and alignment taking attributes into account.
-  std::pair<CharUnits, CharUnits> getAdjustedFieldInfo(const FieldDecl *FD);
-  /// \brief Places a field at offset 0.
-  void placeFieldAtZero() { FieldOffsets.push_back(0); }
+  void finalizeLayout(const RecordDecl *RD);
+  /// \brief Gets the size and alignment of a base taking pragma pack and
+  /// __declspec(align) into account.
+  ElementInfo getAdjustedElementInfo(const ASTRecordLayout &Layout,
+                                     bool AsBase = true);
+  /// \brief Gets the size and alignment of a field taking pragma  pack and
+  /// __declspec(align) into account.  It also updates RequiredAlignment as a
+  /// side effect because it is most convenient to do so here.
+  ElementInfo getAdjustedElementInfo(const FieldDecl *FD);
   /// \brief Places a field at an offset in CharUnits.
   void placeFieldAtOffset(CharUnits FieldOffset) {
     FieldOffsets.push_back(Context.toBits(FieldOffset));
@@ -2079,334 +2167,289 @@
   /// \brief Compute the set of virtual bases for which vtordisps are required.
   llvm::SmallPtrSet<const CXXRecordDecl *, 2>
   computeVtorDispSet(const CXXRecordDecl *RD);
-
   const ASTContext &Context;
   /// \brief The size of the record being laid out.
   CharUnits Size;
+  /// \brief The non-virtual size of the record layout.
+  CharUnits NonVirtualSize;
+  /// \brief The data size of the record layout.
+  CharUnits DataSize;
   /// \brief The current alignment of the record layout.
   CharUnits Alignment;
-  /// \brief The collection of field offsets.
-  SmallVector<uint64_t, 16> FieldOffsets;
   /// \brief The maximum allowed field alignment. This is set by #pragma pack.
   CharUnits MaxFieldAlignment;
-  /// \brief Alignment does not occur for virtual bases unless something
-  /// forces it to by explicitly using __declspec(align())
-  bool AlignAfterVBases : 1;
-  bool IsUnion : 1;
-  /// \brief True if the last field laid out was a bitfield and was not 0
-  /// width.
-  bool LastFieldIsNonZeroWidthBitfield : 1;
+  /// \brief The alignment that this record must obey.  This is imposed by
+  /// __declspec(align()) on the record itself or one of its fields or bases.
+  CharUnits RequiredAlignment;
   /// \brief The size of the allocation of the currently active bitfield.
   /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield
   /// is true.
   CharUnits CurrentBitfieldSize;
-  /// \brief The number of remaining bits in our last bitfield allocation.
-  /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is
-  /// true.
-  unsigned RemainingBitsInField;
-
-  /// \brief The data alignment of the record layout.
-  CharUnits DataSize;
-  /// \brief The alignment of the non-virtual portion of the record layout
-  /// without the impact of the virtual pointers.
-  /// Only used for C++ layouts.
-  CharUnits BasesAndFieldsAlignment;
-  /// \brief The alignment of the non-virtual portion of the record layout
-  /// Only used for C++ layouts.
-  CharUnits NonVirtualAlignment;
-  /// \brief The additional alignment imposed by the virtual bases.
-  CharUnits VirtualAlignment;
+  /// \brief Offset to the virtual base table pointer (if one exists).
+  CharUnits VBPtrOffset;
+  /// \brief The size and alignment info of a pointer.
+  ElementInfo PointerInfo;
   /// \brief The primary base class (if one exists).
   const CXXRecordDecl *PrimaryBase;
   /// \brief The class we share our vb-pointer with.
   const CXXRecordDecl *SharedVBPtrBase;
-  /// \brief True if the class has a vftable pointer that can be extended
-  /// by this class or classes derived from it.  Such a vfptr will always occur
-  /// at offset 0.
-  bool HasExtendableVFPtr : 1;
-  /// \brief True if the class has a (not necessarily its own) vbtable pointer.
-  bool HasVBPtr : 1;
-  /// \brief Offset to the virtual base table pointer (if one exists).
-  CharUnits VBPtrOffset;
+  /// \brief The collection of field offsets.
+  SmallVector<uint64_t, 16> FieldOffsets;
   /// \brief Base classes and their offsets in the record.
   BaseOffsetsMapTy Bases;
   /// \brief virtual base classes and their offsets in the record.
   ASTRecordLayout::VBaseOffsetsMapTy VBases;
-  /// \brief The size of a pointer.
-  CharUnits PointerSize;
-  /// \brief The alignment of a pointer.
-  CharUnits PointerAlignment;
-  /// \brief Holds an empty base we haven't yet laid out.
-  const CXXRecordDecl *LazyEmptyBase;
-  /// \brief Lets us know if the last base we laid out was empty.  Only used
-  /// when adjusting the placement of a last zero-sized base in 64 bit mode.
-  bool LastBaseWasEmpty;
+  /// \brief The number of remaining bits in our last bitfield allocation.
+  /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is
+  /// true.
+  unsigned RemainingBitsInField;
+  bool IsUnion : 1;
+  /// \brief True if the last field laid out was a bitfield and was not 0
+  /// width.
+  bool LastFieldIsNonZeroWidthBitfield : 1;
+  /// \brief True if the class has its own vftable pointer.
+  bool HasOwnVFPtr : 1;
+  /// \brief True if the class has a vbtable pointer.
+  bool HasVBPtr : 1;
   /// \brief Lets us know if we're in 64-bit mode
-  bool Is64BitMode;
-  /// \brief True if the last non-virtual base has a vbptr.
-  bool LastNonVirtualBaseHasVBPtr;
+  bool Is64BitMode : 1;
+  /// \brief True if this class contains a zero sized member or base or a base
+  /// with a zero sized member or base.  Only used for MS-ABI.
+  bool HasZeroSizedSubObject : 1;
+  /// \brief True if this class is zero sized or first base is zero sized or
+  /// has this property.  Only used for MS-ABI.
+  bool LeadsWithZeroSizedBase : 1;
 };
 } // namespace
 
-std::pair<CharUnits, CharUnits>
-MicrosoftRecordLayoutBuilder::getAdjustedFieldInfo(const FieldDecl *FD) {
-  std::pair<CharUnits, CharUnits> FieldInfo =
-      Context.getTypeInfoInChars(FD->getType());
-
-  // If we're not on win32 and using ms_struct the field alignment will be wrong
-  // for 64 bit types, so we fix that here.
-  if (FD->getASTContext().getTargetInfo().getTriple().getOS() !=
-      llvm::Triple::Win32) {
-    QualType T = Context.getBaseElementType(FD->getType());
-    if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
-      CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
-      if (TypeSize > FieldInfo.second)
-        FieldInfo.second = TypeSize;
-    }
-  }
-
-  // Respect packed attribute.
-  if (FD->hasAttr<PackedAttr>())
-    FieldInfo.second = CharUnits::One();
-  // Respect pack pragma.
-  else if (!MaxFieldAlignment.isZero())
-    FieldInfo.second = std::min(FieldInfo.second, MaxFieldAlignment);
-  // Respect alignment attributes.
-  if (unsigned fieldAlign = FD->getMaxAlignment()) {
-    CharUnits FieldAlign = Context.toCharUnitsFromBits(fieldAlign);
-    AlignAfterVBases = true;
-    FieldInfo.second = std::max(FieldInfo.second, FieldAlign);
-  }
-  return FieldInfo;
+MicrosoftRecordLayoutBuilder::ElementInfo
+MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
+    const ASTRecordLayout &Layout, bool AsBase) {
+  ElementInfo Info;
+  Info.Alignment = Layout.getAlignment();
+  // Respect pragma pack.
+  if (!MaxFieldAlignment.isZero())
+    Info.Alignment = std::min(Info.Alignment, MaxFieldAlignment);
+  // Track zero-sized subobjects here where it's already available.
+  if (Layout.hasZeroSizedSubObject())
+    HasZeroSizedSubObject = true;
+  // Respect required alignment, this is necessary because we may have adjusted
+  // the alignment in the case of pragam pack.  Note that the required alignment
+  // doesn't actually apply to the struct alignment at this point.
+  Alignment = std::max(Alignment, Info.Alignment);
+  Info.Alignment = std::max(Info.Alignment, Layout.getRequiredAlignment());
+  Info.Size = AsBase ? Layout.getNonVirtualSize() : Layout.getSize();
+  return Info;
 }
 
-void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
-  IsUnion = RD->isUnion();
-  Is64BitMode = Context.getTargetInfo().getPointerWidth(0) == 64;
-
-  Size = CharUnits::Zero();
-  Alignment = CharUnits::One();
-  AlignAfterVBases = false;
-
-  // Compute the maximum field alignment.
-  MaxFieldAlignment = CharUnits::Zero();
-  // Honor the default struct packing maximum alignment flag.
-  if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct)
-    MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
-  // Honor the packing attribute.
-  if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>())
-    MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
-  // Packed attribute forces max field alignment to be 1.
-  if (RD->hasAttr<PackedAttr>())
-    MaxFieldAlignment = CharUnits::One();
+MicrosoftRecordLayoutBuilder::ElementInfo
+MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
+    const FieldDecl *FD) {
+  ElementInfo Info;
+  std::tie(Info.Size, Info.Alignment) =
+      Context.getTypeInfoInChars(FD->getType());
+  // Respect align attributes.
+  CharUnits FieldRequiredAlignment = 
+      Context.toCharUnitsFromBits(FD->getMaxAlignment());
+  // Respect attributes applied to subobjects of the field.
+  if (const RecordType *RT =
+      FD->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
+    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RT->getDecl());
+    // Get the element info for a layout, respecting pack.
+    Info.Alignment = getAdjustedElementInfo(Layout, false).Alignment;
+    // Capture required alignment as a side-effect.
+    RequiredAlignment = std::max(RequiredAlignment,
+                                 Layout.getRequiredAlignment());
+  } else {
+    if (FD->isBitField() && FD->getMaxAlignment() != 0)
+      Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
+    // Respect pragma pack.
+    if (!MaxFieldAlignment.isZero())
+      Info.Alignment = std::min(Info.Alignment, MaxFieldAlignment);
+  }
+  // Respect packed field attribute.
+  if (FD->hasAttr<PackedAttr>())
+    Info.Alignment = CharUnits::One();
+  // Take required alignment into account.  __declspec(align) on bitfields
+  // impacts the alignment rather than the required alignment.
+  if (!FD->isBitField()) {
+    Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
+    // Capture required alignment as a side-effect.
+    RequiredAlignment = std::max(RequiredAlignment, FieldRequiredAlignment);
+  }
+  // TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
+  if (!(FD->isBitField() && IsUnion)) {
+    Alignment = std::max(Alignment, Info.Alignment);
+    if (!MaxFieldAlignment.isZero())
+      Alignment = std::min(Alignment, MaxFieldAlignment);
+  }
+  return Info;
 }
 
 void MicrosoftRecordLayoutBuilder::layout(const RecordDecl *RD) {
   initializeLayout(RD);
   layoutFields(RD);
-  honorDeclspecAlign(RD);
+  DataSize = Size = Size.RoundUpToAlignment(Alignment);
+  RequiredAlignment = std::max(
+      RequiredAlignment, Context.toCharUnitsFromBits(RD->getMaxAlignment()));
+  finalizeLayout(RD);
 }
 
 void MicrosoftRecordLayoutBuilder::cxxLayout(const CXXRecordDecl *RD) {
   initializeLayout(RD);
   initializeCXXLayout(RD);
-  layoutVFPtr(RD);
   layoutNonVirtualBases(RD);
-  layoutVBPtr(RD);
   layoutFields(RD);
-  DataSize = Size;
-  NonVirtualAlignment = Alignment;
+  injectVBPtr(RD);
+  injectVFPtr(RD);
+  if (HasOwnVFPtr || (HasVBPtr && !SharedVBPtrBase))
+    Alignment = std::max(Alignment, PointerInfo.Alignment);
+  NonVirtualSize = Size = Size.RoundUpToAlignment(Alignment);
+  RequiredAlignment = std::max(
+      RequiredAlignment, Context.toCharUnitsFromBits(RD->getMaxAlignment()));
   layoutVirtualBases(RD);
-  finalizeCXXLayout(RD);
-  honorDeclspecAlign(RD);
+  finalizeLayout(RD);
+}
+
+void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
+  IsUnion = RD->isUnion();
+  Is64BitMode = Context.getTargetInfo().getPointerWidth(0) == 64;
+  Size = CharUnits::Zero();
+  Alignment = CharUnits::One();
+  // In 64-bit mode we always perform an alignment step after laying out vbases.
+  // In 32-bit mode we do not.  The check to see if we need to perform alignment
+  // checks the RequiredAlignment field and performs alignment if it isn't 0.
+  RequiredAlignment = Is64BitMode ? CharUnits::One() : CharUnits::Zero();
+  // Compute the maximum field alignment.
+  MaxFieldAlignment = CharUnits::Zero();
+  // Honor the default struct packing maximum alignment flag.
+  if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct)
+      MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
+  // Honor the packing attribute.  The MS-ABI ignores pragma pack if its larger
+  // than the pointer size.
+  if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>()){
+    unsigned PackedAlignment = MFAA->getAlignment();
+    if (PackedAlignment <= Context.getTargetInfo().getPointerWidth(0))
+      MaxFieldAlignment = Context.toCharUnitsFromBits(PackedAlignment);
+  }
+  // Packed attribute forces max field alignment to be 1.
+  if (RD->hasAttr<PackedAttr>())
+    MaxFieldAlignment = CharUnits::One();
 }
 
 void
 MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) {
-  // Calculate pointer size and alignment.
-  PointerSize =
-      Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
-  PointerAlignment = PointerSize;
-  if (!MaxFieldAlignment.isZero())
-    PointerAlignment = std::min(PointerAlignment, MaxFieldAlignment);
-
-  // Initialize information about the bases.
+  HasZeroSizedSubObject = false;
+  LeadsWithZeroSizedBase = false;
+  HasOwnVFPtr = false;
   HasVBPtr = false;
-  HasExtendableVFPtr = false;
-  SharedVBPtrBase = 0;
   PrimaryBase = 0;
-  VirtualAlignment = CharUnits::One();
-  AlignAfterVBases = Is64BitMode;
-
-  // If the record has a dynamic base class, attempt to choose a primary base
-  // class. It is the first (in direct base class order) non-virtual dynamic
-  // base class, if one exists.
-  for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
-                                                e = RD->bases_end();
-       i != e; ++i) {
-    const CXXRecordDecl *BaseDecl =
-        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
-    const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
-    // Handle forced alignment.
-    if (Layout.getAlignAfterVBases())
-      AlignAfterVBases = true;
-    // Handle virtual bases.
-    if (i->isVirtual()) {
-      VirtualAlignment = std::max(VirtualAlignment, Layout.getAlignment());
-      HasVBPtr = true;
-      continue;
-    }
-    // We located a primary base class!
-    if (!PrimaryBase && Layout.hasExtendableVFPtr()) {
-      PrimaryBase = BaseDecl;
-      HasExtendableVFPtr = true;
-    }
-    // We located a base to share a VBPtr with!
-    if (!SharedVBPtrBase && Layout.hasVBPtr()) {
-      SharedVBPtrBase = BaseDecl;
-      HasVBPtr = true;
-    }
-    updateAlignment(Layout.getAlignment());
-  }
-
-  // Use LayoutFields to compute the alignment of the fields.  The layout
-  // is discarded.  This is the simplest way to get all of the bit-field
-  // behavior correct and is not actually very expensive.
-  layoutFields(RD);
-  Size = CharUnits::Zero();
-  BasesAndFieldsAlignment = Alignment;
-  FieldOffsets.clear();
-}
-
-void MicrosoftRecordLayoutBuilder::layoutVFPtr(const CXXRecordDecl *RD) {
-  // If we have a primary base then our VFPtr was already laid out
-  if (PrimaryBase)
-    return;
-
-  // Look at all of our methods to determine if we need a VFPtr.  We need a
-  // vfptr if we define a new virtual function.
-  if (!HasExtendableVFPtr && RD->isDynamicClass())
-    for (CXXRecordDecl::method_iterator i = RD->method_begin(),
-                                        e = RD->method_end();
-         !HasExtendableVFPtr && i != e; ++i)
-      HasExtendableVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
-  if (!HasExtendableVFPtr)
-    return;
-
-  // MSVC 32 (but not 64) potentially over-aligns the vf-table pointer by giving
-  // it the max alignment of all the non-virtual data in the class.  The
-  // resulting layout is essentially { vftbl, { nvdata } }.  This is completely
-  // unnecessary, but we're not here to pass judgment.
-  updateAlignment(PointerAlignment);
-  if (Is64BitMode)
-    Size = Size.RoundUpToAlignment(PointerAlignment) + PointerSize;
-  else
-    Size = Size.RoundUpToAlignment(PointerAlignment) + Alignment;
+  SharedVBPtrBase = 0;
+  // Calculate pointer size and alignment.  These are used for vfptr and vbprt
+  // injection.
+  PointerInfo.Size =
+      Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+  PointerInfo.Alignment = PointerInfo.Size;
+  // Respect pragma pack.
+  if (!MaxFieldAlignment.isZero())
+    PointerInfo.Alignment = std::min(PointerInfo.Alignment, MaxFieldAlignment);
 }
 
 void
 MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
-  LazyEmptyBase = 0;
-  LastBaseWasEmpty = false;
-  LastNonVirtualBaseHasVBPtr = false;
-
-  // Lay out the primary base first.
-  if (PrimaryBase)
-    layoutNonVirtualBase(PrimaryBase);
-
+  // The MS-ABI lays out all bases that contain leading vfptrs before it lays
+  // out any bases that do not contain vfptrs.  We implement this as two passes
+  // over the bases.  This approach guarantees that the primary base is laid out
+  // first.  We use these passes to calculate some additional aggregated
+  // information about the bases, such as reqruied alignment and the presence of
+  // zero sized members.
+  const ASTRecordLayout* PreviousBaseLayout = 0;
   // Iterate through the bases and lay out the non-virtual ones.
-  for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
-                                                e = RD->bases_end();
-       i != e; ++i) {
-    if (i->isVirtual())
+  for (const auto &I : RD->bases()) {
+    const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+    const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+    // Mark and skip virtual bases.
+    if (I.isVirtual()) {
+      HasVBPtr = true;
       continue;
-    const CXXRecordDecl *BaseDecl =
-        cast<CXXRecordDecl>(i->getType()->castAs<RecordType>()->getDecl());
-    if (BaseDecl != PrimaryBase)
-      layoutNonVirtualBase(BaseDecl);
-  }
-}
-
-void
-MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(const CXXRecordDecl *RD) {
-  const ASTRecordLayout *Layout = RD ? &Context.getASTRecordLayout(RD) : 0;
-
-  // If we have a lazy empty base we haven't laid out yet, do that now.
-  if (LazyEmptyBase) {
-    const ASTRecordLayout &LazyLayout =
-        Context.getASTRecordLayout(LazyEmptyBase);
-    Size = Size.RoundUpToAlignment(LazyLayout.getAlignment());
-    // If the last non-virtual base has a vbptr we add a byte of padding for no
-    // obvious reason.
-    if (LastNonVirtualBaseHasVBPtr)
-      Size++;
-    Bases.insert(std::make_pair(LazyEmptyBase, Size));
-    // Empty bases only consume space when followed by another empty base.
-    if (RD && Layout->getNonVirtualSize().isZero()) {
-      LastBaseWasEmpty = true;
-      Size++;
     }
-    LazyEmptyBase = 0;
-    LastNonVirtualBaseHasVBPtr = false;
+    // Track RequiredAlignment for all bases in this pass.
+    RequiredAlignment = std::max(RequiredAlignment,
+                                 BaseLayout.getRequiredAlignment());
+    // Check fo a base to share a VBPtr with.
+    if (!SharedVBPtrBase && BaseLayout.hasVBPtr()) {
+      SharedVBPtrBase = BaseDecl;
+      HasVBPtr = true;
+    }
+    // Only lay out bases with extendable VFPtrs on the first pass.
+    if (!BaseLayout.hasExtendableVFPtr())
+      continue;
+    // If we don't have a primary base, this one qualifies.
+    if (!PrimaryBase) {
+      PrimaryBase = BaseDecl;
+      LeadsWithZeroSizedBase = BaseLayout.leadsWithZeroSizedBase();
+    }
+    // Lay out the base.
+    layoutNonVirtualBase(BaseDecl, BaseLayout, PreviousBaseLayout);
   }
-
-  // RD is null when flushing the final lazy base.
-  if (!RD)
-    return;
-
-  if (Layout->getNonVirtualSize().isZero()) {
-    LazyEmptyBase = RD;
-    return;
+  // Figure out if we need a fresh VFPtr for this class.
+  if (!PrimaryBase && RD->isDynamicClass())
+    for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+                                        e = RD->method_end();
+         !HasOwnVFPtr && i != e; ++i)
+      HasOwnVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
+  // If we don't have a primary base then we have a leading object that could
+  // itself lead with a zero-sized object, something we track.
+  bool CheckLeadingLayout = !PrimaryBase;
+  // Iterate through the bases and lay out the non-virtual ones.
+  for (const auto &I : RD->bases()) {
+    if (I.isVirtual())
+      continue;
+    const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+    const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+    // Only lay out bases without extendable VFPtrs on the second pass.
+    if (BaseLayout.hasExtendableVFPtr())
+      continue;
+    // If this is the first layout, check to see if it leads with a zero sized
+    // object.  If it does, so do we.
+    if (CheckLeadingLayout) {
+      CheckLeadingLayout = false;
+      LeadsWithZeroSizedBase = BaseLayout.leadsWithZeroSizedBase();
+    }
+    // Lay out the base.
+    layoutNonVirtualBase(BaseDecl, BaseLayout, PreviousBaseLayout);
   }
-
-  // Insert the base here.
-  CharUnits BaseOffset = Size.RoundUpToAlignment(Layout->getAlignment());
-  Bases.insert(std::make_pair(RD, BaseOffset));
-  Size = BaseOffset + Layout->getDataSize();
-  // Note: we don't update alignment here because it was accounted
-  // for during initalization.
-  LastBaseWasEmpty = false;
-  LastNonVirtualBaseHasVBPtr = Layout->hasVBPtr();
-}
-
-void MicrosoftRecordLayoutBuilder::layoutVBPtr(const CXXRecordDecl *RD) {
+  // Set our VBPtroffset if we know it at this point.
   if (!HasVBPtr)
     VBPtrOffset = CharUnits::fromQuantity(-1);
   else if (SharedVBPtrBase) {
     const ASTRecordLayout &Layout = Context.getASTRecordLayout(SharedVBPtrBase);
     VBPtrOffset = Bases[SharedVBPtrBase] + Layout.getVBPtrOffset();
-  } else {
-    VBPtrOffset = Size.RoundUpToAlignment(PointerAlignment);
-    CharUnits OldSize = Size;
-    Size = VBPtrOffset + PointerSize;
-    if (BasesAndFieldsAlignment <= PointerAlignment) {
-      // Handle strange padding rules for the lazily placed base.  I have no
-      // explanation for why the last virtual base is padded in such an odd way.
-      // Two things to note about this padding are that the rules are different
-      // if the alignment of the bases+fields is <= to the alignemnt of a
-      // pointer and that the rule in 64-bit mode behaves differently depending
-      // on if the second to last base was also zero sized.
-      Size += OldSize % BasesAndFieldsAlignment.getQuantity();
-    } else {
-      if (Is64BitMode)
-        Size += LastBaseWasEmpty ? CharUnits::One() : CharUnits::Zero();
-      else
-        Size = OldSize + BasesAndFieldsAlignment;
-    }
-    updateAlignment(PointerAlignment);
   }
+}
 
-  // Flush the lazy empty base.
-  layoutNonVirtualBase(0);
+void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
+    const CXXRecordDecl *BaseDecl,
+    const ASTRecordLayout &BaseLayout,
+    const ASTRecordLayout *&PreviousBaseLayout) {
+  // Insert padding between two bases if the left first one is zero sized or
+  // contains a zero sized subobject and the right is zero sized or one leads
+  // with a zero sized base.
+  if (PreviousBaseLayout && PreviousBaseLayout->hasZeroSizedSubObject() &&
+      BaseLayout.leadsWithZeroSizedBase())
+    Size++;
+  ElementInfo Info = getAdjustedElementInfo(BaseLayout);
+  CharUnits BaseOffset = Size.RoundUpToAlignment(Info.Alignment);
+  Bases.insert(std::make_pair(BaseDecl, BaseOffset));
+  Size = BaseOffset + BaseLayout.getNonVirtualSize();
+  PreviousBaseLayout = &BaseLayout;
+  VBPtrOffset = Size;
 }
 
 void MicrosoftRecordLayoutBuilder::layoutFields(const RecordDecl *RD) {
   LastFieldIsNonZeroWidthBitfield = false;
-  for (RecordDecl::field_iterator Field = RD->field_begin(),
-                                  FieldEnd = RD->field_end();
-       Field != FieldEnd; ++Field)
-    layoutField(*Field);
-  Size = Size.RoundUpToAlignment(Alignment);
+  for (const auto *Field : RD->fields())
+    layoutField(Field);
 }
 
 void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) {
@@ -2415,20 +2458,14 @@
     return;
   }
   LastFieldIsNonZeroWidthBitfield = false;
-
-  std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
-  CharUnits FieldSize = FieldInfo.first;
-  CharUnits FieldAlign = FieldInfo.second;
-
-  updateAlignment(FieldAlign);
+  ElementInfo Info = getAdjustedElementInfo(FD);
   if (IsUnion) {
-    placeFieldAtZero();
-    Size = std::max(Size, FieldSize);
+    placeFieldAtOffset(CharUnits::Zero());
+    Size = std::max(Size, Info.Size);
   } else {
-    // Round up the current record size to the field's alignment boundary.
-    CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
+    CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
     placeFieldAtOffset(FieldOffset);
-    Size = FieldOffset + FieldSize;
+    Size = FieldOffset + Info.Size;
   }
 }
 
@@ -2438,39 +2475,31 @@
     layoutZeroWidthBitField(FD);
     return;
   }
-
-  std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
-  CharUnits FieldSize = FieldInfo.first;
-  CharUnits FieldAlign = FieldInfo.second;
-
+  ElementInfo Info = getAdjustedElementInfo(FD);
   // Clamp the bitfield to a containable size for the sake of being able
   // to lay them out.  Sema will throw an error.
-  if (Width > Context.toBits(FieldSize))
-    Width = Context.toBits(FieldSize);
-
+  if (Width > Context.toBits(Info.Size))
+    Width = Context.toBits(Info.Size);
   // Check to see if this bitfield fits into an existing allocation.  Note:
   // MSVC refuses to pack bitfields of formal types with different sizes
   // into the same allocation.
   if (!IsUnion && LastFieldIsNonZeroWidthBitfield &&
-      CurrentBitfieldSize == FieldSize && Width <= RemainingBitsInField) {
+      CurrentBitfieldSize == Info.Size && Width <= RemainingBitsInField) {
     placeFieldAtBitOffset(Context.toBits(Size) - RemainingBitsInField);
     RemainingBitsInField -= Width;
     return;
   }
-
   LastFieldIsNonZeroWidthBitfield = true;
-  CurrentBitfieldSize = FieldSize;
+  CurrentBitfieldSize = Info.Size;
   if (IsUnion) {
-    placeFieldAtZero();
-    Size = std::max(Size, FieldSize);
-    // TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
+    placeFieldAtOffset(CharUnits::Zero());
+    Size = std::max(Size, Info.Size);
   } else {
     // Allocate a new block of memory and place the bitfield in it.
-    CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
+    CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
     placeFieldAtOffset(FieldOffset);
-    Size = FieldOffset + FieldSize;
-    updateAlignment(FieldAlign);
-    RemainingBitsInField = Context.toBits(FieldSize) - Width;
+    Size = FieldOffset + Info.Size;
+    RemainingBitsInField = Context.toBits(Info.Size) - Width;
   }
 }
 
@@ -2478,120 +2507,139 @@
 MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) {
   // Zero-width bitfields are ignored unless they follow a non-zero-width
   // bitfield.
-  std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
-  CharUnits FieldSize = FieldInfo.first;
-  CharUnits FieldAlign = FieldInfo.second;
-
   if (!LastFieldIsNonZeroWidthBitfield) {
     placeFieldAtOffset(IsUnion ? CharUnits::Zero() : Size);
     // TODO: Add a Sema warning that MS ignores alignment for zero
-    // sized bitfields that occur after zero-size bitfields or non bitfields.
+    // sized bitfields that occur after zero-size bitfields or non-bitfields.
     return;
   }
-
   LastFieldIsNonZeroWidthBitfield = false;
+  ElementInfo Info = getAdjustedElementInfo(FD);
   if (IsUnion) {
-    placeFieldAtZero();
-    Size = std::max(Size, FieldSize);
+    placeFieldAtOffset(CharUnits::Zero());
+    Size = std::max(Size, Info.Size);
   } else {
     // Round up the current record size to the field's alignment boundary.
-    CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
+    CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
     placeFieldAtOffset(FieldOffset);
     Size = FieldOffset;
-    updateAlignment(FieldAlign);
   }
 }
 
+void MicrosoftRecordLayoutBuilder::injectVBPtr(const CXXRecordDecl *RD) {
+  if (!HasVBPtr || SharedVBPtrBase)
+    return;
+  // Inject the VBPointer at the injection site.
+  CharUnits InjectionSite = VBPtrOffset;
+  // But before we do, make sure it's properly aligned.
+  VBPtrOffset = VBPtrOffset.RoundUpToAlignment(PointerInfo.Alignment);
+  // Determine where the first field should be laid out after the vbptr.
+  CharUnits FieldStart = VBPtrOffset + PointerInfo.Size;
+  // Make sure that the amount we push the fields back by is a multiple of the
+  // alignment.
+  CharUnits Offset = (FieldStart - InjectionSite).RoundUpToAlignment(
+      std::max(RequiredAlignment, Alignment));
+  // Increase the size of the object and push back all fields by the offset
+  // amount.
+  Size += Offset;
+  for (SmallVector<uint64_t, 16>::iterator i = FieldOffsets.begin(),
+                                           e = FieldOffsets.end();
+       i != e; ++i)
+    *i += Context.toBits(Offset);
+  for (BaseOffsetsMapTy::iterator i = Bases.begin(), e = Bases.end();
+       i != e; ++i)
+       if (i->second >= InjectionSite)
+         i->second += Offset;
+}
+
+void MicrosoftRecordLayoutBuilder::injectVFPtr(const CXXRecordDecl *RD) {
+  if (!HasOwnVFPtr)
+    return;
+  // Make sure that the amount we push the struct back by is a multiple of the
+  // alignment.
+  CharUnits Offset = PointerInfo.Size.RoundUpToAlignment(
+      std::max(RequiredAlignment, Alignment));
+  // Increase the size of the object and push back all fields, the vbptr and all
+  // bases by the offset amount.
+  Size += Offset;
+  for (SmallVectorImpl<uint64_t>::iterator i = FieldOffsets.begin(),
+                                           e = FieldOffsets.end();
+       i != e; ++i)
+    *i += Context.toBits(Offset);
+  if (HasVBPtr)
+    VBPtrOffset += Offset;
+  for (BaseOffsetsMapTy::iterator i = Bases.begin(), e = Bases.end();
+       i != e; ++i)
+    i->second += Offset;
+}
+
 void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
   if (!HasVBPtr)
     return;
-
-  updateAlignment(VirtualAlignment);
-
-  // Zero-sized v-bases obey the alignment attribute so apply it here.  The
-  // alignment attribute is normally accounted for in FinalizeLayout.
-  if (unsigned MaxAlign = RD->getMaxAlignment())
-    updateAlignment(Context.toCharUnitsFromBits(MaxAlign));
-
-  llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordisp =
+  // Vtordisps are always 4 bytes (even in 64-bit mode)
+  CharUnits VtorDispSize = CharUnits::fromQuantity(4);
+  CharUnits VtorDispAlignment = VtorDispSize;
+  // vtordisps respect pragma pack.
+  if (!MaxFieldAlignment.isZero())
+    VtorDispAlignment = std::min(VtorDispAlignment, MaxFieldAlignment);
+  // The alignment of the vtordisp is at least the required alignment of the
+  // entire record.  This requirement may be present to support vtordisp
+  // injection.
+  for (const auto &I : RD->vbases()) {
+    const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+    const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+    RequiredAlignment =
+        std::max(RequiredAlignment, BaseLayout.getRequiredAlignment());
+  }
+  VtorDispAlignment = std::max(VtorDispAlignment, RequiredAlignment);
+  // Compute the vtordisp set.
+  llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordispSet =
       computeVtorDispSet(RD);
-
-  // If the last field we laid out was a non-zero length bitfield then add some
-  // extra padding for no obvious reason.
-  if (LastFieldIsNonZeroWidthBitfield)
-    Size += CurrentBitfieldSize;
-
   // Iterate through the virtual bases and lay them out.
-  for (CXXRecordDecl::base_class_const_iterator i = RD->vbases_begin(),
-                                                e = RD->vbases_end();
-       i != e; ++i) {
-    const CXXRecordDecl *BaseDecl =
-        cast<CXXRecordDecl>(i->getType()->castAs<RecordType>()->getDecl());
-    layoutVirtualBase(BaseDecl, HasVtordisp.count(BaseDecl));
+  const ASTRecordLayout* PreviousBaseLayout = 0;
+  for (const auto &I : RD->vbases()) {
+    const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+    const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+    bool HasVtordisp = HasVtordispSet.count(BaseDecl);
+    // If the last field we laid out was a non-zero length bitfield then add
+    // some extra padding for no obvious reason.
+    if (LastFieldIsNonZeroWidthBitfield)
+      Size += CurrentBitfieldSize;
+    // Insert padding between two bases if the left first one is zero sized or
+    // contains a zero sized subobject and the right is zero sized or one leads
+    // with a zero sized base.  The padding between virtual bases is 4
+    // bytes (in both 32 and 64 bits modes) and always involves rounding up to
+    // the required alignment, we don't know why.
+    if (PreviousBaseLayout && PreviousBaseLayout->hasZeroSizedSubObject() &&
+        BaseLayout.leadsWithZeroSizedBase())
+      Size = Size.RoundUpToAlignment(VtorDispAlignment) + VtorDispSize;
+    // Insert the vtordisp.
+    if (HasVtordisp)
+      Size = Size.RoundUpToAlignment(VtorDispAlignment) + VtorDispSize;
+    // Insert the virtual base.
+    HasZeroSizedSubObject = false;
+    ElementInfo Info = getAdjustedElementInfo(BaseLayout);
+    CharUnits BaseOffset = Size.RoundUpToAlignment(Info.Alignment);
+    VBases.insert(std::make_pair(BaseDecl,
+        ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
+    Size = BaseOffset + BaseLayout.getNonVirtualSize();
+    PreviousBaseLayout = &BaseLayout;
   }
 }
 
-void MicrosoftRecordLayoutBuilder::layoutVirtualBase(const CXXRecordDecl *RD,
-                                                     bool HasVtordisp) {
-  if (LazyEmptyBase) {
-    const ASTRecordLayout &LazyLayout =
-        Context.getASTRecordLayout(LazyEmptyBase);
-    Size = Size.RoundUpToAlignment(LazyLayout.getAlignment());
-    VBases.insert(
-        std::make_pair(LazyEmptyBase, ASTRecordLayout::VBaseInfo(Size, false)));
-    // Empty bases only consume space when followed by another empty base.
-    // The space consumed is in an Alignment sized/aligned block and the v-base
-    // is placed at its alignment offset into the chunk, unless its alignment
-    // is less than 4 bytes, at which it is placed at 4 byte offset in the
-    // chunk.  We have no idea why.
-    if (RD && Context.getASTRecordLayout(RD).getNonVirtualSize().isZero())
-      Size = Size.RoundUpToAlignment(Alignment) + CharUnits::fromQuantity(4);
-    LazyEmptyBase = 0;
-  }
-
-  // RD is null when flushing the final lazy virtual base.
-  if (!RD)
-    return;
-
-  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-  if (Layout.getNonVirtualSize().isZero() && !HasVtordisp) {
-    LazyEmptyBase = RD;
-    return;
-  }
-
-  CharUnits BaseNVSize = Layout.getNonVirtualSize();
-  CharUnits BaseAlign = Layout.getAlignment();
-
-  // vtordisps are always 4 bytes (even in 64-bit mode)
-  if (HasVtordisp)
-    Size = Size.RoundUpToAlignment(Alignment) + CharUnits::fromQuantity(4);
-  Size = Size.RoundUpToAlignment(BaseAlign);
-
-  // Insert the base here.
-  CharUnits BaseOffset = Size.RoundUpToAlignment(BaseAlign);
-  VBases.insert(
-      std::make_pair(RD, ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
-  Size = BaseOffset + BaseNVSize;
-  // Note: we don't update alignment here because it was accounted for in
-  // InitializeLayout.
-}
-
-void MicrosoftRecordLayoutBuilder::finalizeCXXLayout(const CXXRecordDecl *RD) {
-  // Flush the lazy virtual base.
-  layoutVirtualBase(0, false);
-
-  if (RD->vbases_begin() == RD->vbases_end() || AlignAfterVBases)
+void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) {
+  // Respect required alignment.  Note that in 32-bit mode Required alignment
+  // may be 0 nad cause size not to be updated.
+  DataSize = Size;
+  if (!RequiredAlignment.isZero()) {
+    Alignment = std::max(Alignment, RequiredAlignment);
     Size = Size.RoundUpToAlignment(Alignment);
-
-  if (Size.isZero())
+  }
+  // Zero-sized structures have size equal to their alignment.
+  if (Size.isZero()) {
+    HasZeroSizedSubObject = true;
+    LeadsWithZeroSizedBase = true;
     Size = Alignment;
-}
-
-void MicrosoftRecordLayoutBuilder::honorDeclspecAlign(const RecordDecl *RD) {
-  if (unsigned MaxAlign = RD->getMaxAlignment()) {
-    AlignAfterVBases = true;
-    updateAlignment(Context.toCharUnitsFromBits(MaxAlign));
-    Size = Size.RoundUpToAlignment(Alignment);
   }
 }
 
@@ -2602,37 +2650,53 @@
     return true;
   // If any of a virtual bases non-virtual bases (recursively) requires a
   // vtordisp than so does this virtual base.
-  for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
-                                                e = RD->bases_end();
-       i != e; ++i)
-    if (!i->isVirtual() &&
+  for (const auto &I : RD->bases())
+    if (!I.isVirtual() &&
         RequiresVtordisp(
             HasVtordisp,
-            cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl())))
+            cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl())))
       return true;
   return false;
 }
 
 llvm::SmallPtrSet<const CXXRecordDecl *, 2>
 MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) {
-  llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordisp;
+  llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordispSet;
+
+  // /vd0 or #pragma vtordisp(0): Never use vtordisps when used as a vbase.
+  if (RD->getMSVtorDispMode() == MSVtorDispAttr::Never)
+    return HasVtordispSet;
+
+  // /vd2 or #pragma vtordisp(2): Always use vtordisps for virtual bases with
+  // vftables.
+  if (RD->getMSVtorDispMode() == MSVtorDispAttr::ForVFTable) {
+    for (const auto &I : RD->vbases()) {
+      const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+      const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
+      if (Layout.hasExtendableVFPtr())
+        HasVtordispSet.insert(BaseDecl);
+    }
+    return HasVtordispSet;
+  }
+
+  // /vd1 or #pragma vtordisp(1): Try to guess based on whether we think it's
+  // possible for a partially constructed object with virtual base overrides to
+  // escape a non-trivial constructor.
+  assert(RD->getMSVtorDispMode() == MSVtorDispAttr::ForVBaseOverride);
 
   // If any of our bases need a vtordisp for this type, so do we.  Check our
   // direct bases for vtordisp requirements.
-  for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
-                                                e = RD->bases_end();
-       i != e; ++i) {
+  for (const auto &I : RD->bases()) {
     const CXXRecordDecl *BaseDecl =
-        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+        cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
     const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
     for (ASTRecordLayout::VBaseOffsetsMapTy::const_iterator
              bi = Layout.getVBaseOffsetsMap().begin(),
              be = Layout.getVBaseOffsetsMap().end();
          bi != be; ++bi)
       if (bi->second.hasVtorDisp())
-        HasVtordisp.insert(bi->first);
+        HasVtordispSet.insert(bi->first);
   }
-
   // If we define a constructor or destructor and override a function that is
   // defined in a virtual base's vtable, that virtual bases need a vtordisp.
   // Here we collect a list of classes with vtables for which our virtual bases
@@ -2643,36 +2707,31 @@
   if (RD->hasUserDeclaredConstructor() || RD->hasUserDeclaredDestructor()) {
     llvm::SmallPtrSet<const CXXMethodDecl *, 8> Work;
     // Seed the working set with our non-destructor virtual methods.
-    for (CXXRecordDecl::method_iterator i = RD->method_begin(),
-                                        e = RD->method_end();
-         i != e; ++i)
-      if ((*i)->isVirtual() && !isa<CXXDestructorDecl>(*i))
-        Work.insert(*i);
+    for (const auto *I : RD->methods())
+      if (I->isVirtual() && !isa<CXXDestructorDecl>(I))
+        Work.insert(I);
     while (!Work.empty()) {
       const CXXMethodDecl *MD = *Work.begin();
       CXXMethodDecl::method_iterator i = MD->begin_overridden_methods(),
                                      e = MD->end_overridden_methods();
       if (i == e)
         // If a virtual method has no-overrides it lives in its parent's vtable.
-        HasVtordisp.insert(MD->getParent());
+        HasVtordispSet.insert(MD->getParent());
       else
         Work.insert(i, e);
       // We've finished processing this element, remove it from the working set.
       Work.erase(MD);
     }
   }
-
   // Re-check all of our vbases for vtordisp requirements (in case their
   // non-virtual bases have vtordisp requirements).
-  for (CXXRecordDecl::base_class_const_iterator i = RD->vbases_begin(),
-                                                e = RD->vbases_end();
-       i != e; ++i) {
-    const CXXRecordDecl *BaseDecl =  i->getType()->getAsCXXRecordDecl();
-    if (!HasVtordisp.count(BaseDecl) && RequiresVtordisp(HasVtordisp, BaseDecl))
-      HasVtordisp.insert(BaseDecl);
+  for (const auto &I : RD->vbases()) {
+    const CXXRecordDecl *BaseDecl =  I.getType()->getAsCXXRecordDecl();
+    if (!HasVtordispSet.count(BaseDecl) &&
+        RequiresVtordisp(HasVtordispSet, BaseDecl))
+      HasVtordispSet.insert(BaseDecl);
   }
-
-  return HasVtordisp;
+  return HasVtordispSet;
 }
 
 /// \brief Get or compute information about the layout of the specified record
@@ -2684,19 +2743,20 @@
   if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
     Builder.cxxLayout(RD);
     return new (*this) ASTRecordLayout(
-        *this, Builder.Size, Builder.Alignment,
-        Builder.HasExtendableVFPtr && !Builder.PrimaryBase,
-        Builder.HasExtendableVFPtr,
-        Builder.VBPtrOffset, Builder.DataSize, Builder.FieldOffsets.data(),
-        Builder.FieldOffsets.size(), Builder.DataSize,
-        Builder.NonVirtualAlignment, CharUnits::Zero(), Builder.PrimaryBase,
-        false, Builder.SharedVBPtrBase, Builder.AlignAfterVBases, Builder.Bases,
-        Builder.VBases);
+        *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
+        Builder.HasOwnVFPtr,
+        Builder.HasOwnVFPtr || Builder.PrimaryBase,
+        Builder.VBPtrOffset, Builder.NonVirtualSize, Builder.FieldOffsets.data(),
+        Builder.FieldOffsets.size(), Builder.NonVirtualSize,
+        Builder.Alignment, CharUnits::Zero(), Builder.PrimaryBase,
+        false, Builder.SharedVBPtrBase,
+        Builder.HasZeroSizedSubObject, Builder.LeadsWithZeroSizedBase,
+        Builder.Bases, Builder.VBases);
   } else {
     Builder.layout(D);
     return new (*this) ASTRecordLayout(
-        *this, Builder.Size, Builder.Alignment, Builder.Size,
-        Builder.FieldOffsets.data(), Builder.FieldOffsets.size());
+        *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
+        Builder.Size, Builder.FieldOffsets.data(), Builder.FieldOffsets.size());
   }
 }
 
@@ -2747,6 +2807,8 @@
     NewEntry =
       new (*this) ASTRecordLayout(*this, Builder.getSize(), 
                                   Builder.Alignment,
+                                  /*RequiredAlignment : used by MS-ABI)*/
+                                  Builder.Alignment,
                                   Builder.HasOwnVFPtr,
                                   RD->isDynamicClass(),
                                   CharUnits::fromQuantity(-1),
@@ -2758,7 +2820,7 @@
                                   EmptySubobjects.SizeOfLargestEmptySubobject,
                                   Builder.PrimaryBase,
                                   Builder.PrimaryBaseIsVirtual,
-                                  0, true,
+                                  0, false, false,
                                   Builder.Bases, Builder.VBases);
   } else {
     RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
@@ -2767,6 +2829,8 @@
     NewEntry =
       new (*this) ASTRecordLayout(*this, Builder.getSize(), 
                                   Builder.Alignment,
+                                  /*RequiredAlignment : used by MS-ABI)*/
+                                  Builder.Alignment,
                                   Builder.getSize(),
                                   Builder.FieldOffsets.data(),
                                   Builder.FieldOffsets.size());
@@ -2830,10 +2894,8 @@
     const IndirectFieldDecl *IFD = cast<IndirectFieldDecl>(VD);
 
     OffsetInBits = 0;
-    for (IndirectFieldDecl::chain_iterator CI = IFD->chain_begin(),
-                                           CE = IFD->chain_end();
-         CI != CE; ++CI)
-      OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(*CI));
+    for (const auto *CI : IFD->chain())
+      OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(CI));
   }
 
   return OffsetInBits;
@@ -2876,6 +2938,8 @@
   const ASTRecordLayout *NewEntry =
     new (*this) ASTRecordLayout(*this, Builder.getSize(), 
                                 Builder.Alignment,
+                                /*RequiredAlignment : used by MS-ABI)*/
+                                Builder.Alignment,
                                 Builder.getDataSize(),
                                 Builder.FieldOffsets.data(),
                                 Builder.FieldOffsets.size());
@@ -2928,19 +2992,27 @@
     OS << '(' << *RD << " vftable pointer)\n";
   }
 
-  // Dump (non-virtual) bases
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-         E = RD->bases_end(); I != E; ++I) {
-    assert(!I->getType()->isDependentType() &&
+  // Collect nvbases.
+  SmallVector<const CXXRecordDecl *, 4> Bases;
+  for (const auto &I : RD->bases()) {
+    assert(!I.getType()->isDependentType() &&
            "Cannot layout class with dependent bases.");
-    if (I->isVirtual())
-      continue;
+    if (!I.isVirtual())
+      Bases.push_back(I.getType()->getAsCXXRecordDecl());
+  }
 
-    const CXXRecordDecl *Base =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+  // Sort nvbases by offset.
+  std::stable_sort(Bases.begin(), Bases.end(),
+                   [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
+    return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
+  });
 
+  // Dump (non-virtual) bases
+  for (SmallVectorImpl<const CXXRecordDecl *>::iterator I = Bases.begin(),
+                                                        E = Bases.end();
+       I != E; ++I) {
+    const CXXRecordDecl *Base = *I;
     CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
-
     DumpCXXRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
                         Base == PrimaryBase ? "(primary base)" : "(base)",
                         /*IncludeVirtualBases=*/false);
@@ -2979,11 +3051,10 @@
   // Dump virtual bases.
   const ASTRecordLayout::VBaseOffsetsMapTy &vtordisps = 
     Layout.getVBaseOffsetsMap();
-  for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
-         E = RD->vbases_end(); I != E; ++I) {
-    assert(I->isVirtual() && "Found non-virtual class!");
+  for (const auto &I : RD->vbases()) {
+    assert(I.isVirtual() && "Found non-virtual class!");
     const CXXRecordDecl *VBase =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
 
@@ -3006,7 +3077,7 @@
 
   PrintIndentNoOffset(OS, IndentLevel - 1);
   OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
-  OS << ", nvalign=" << Layout.getNonVirtualAlign().getQuantity() << "]\n";
+  OS << ", nvalign=" << Layout.getNonVirtualAlignment().getQuantity() << "]\n";
   OS << '\n';
 }
 
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index de85161..a03ef00 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -803,7 +803,7 @@
 Expr *CXXForRangeStmt::getRangeInit() {
   DeclStmt *RangeStmt = getRangeStmt();
   VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl());
-  assert(RangeDecl &&& "for-range should have a single var decl");
+  assert(RangeDecl && "for-range should have a single var decl");
   return RangeDecl->getInit();
 }
 
@@ -1100,15 +1100,14 @@
 }
 
 bool CapturedStmt::capturesVariable(const VarDecl *Var) const {
-  for (const_capture_iterator I = capture_begin(),
-                              E = capture_end(); I != E; ++I) {
-    if (!I->capturesVariable())
+  for (const auto &I : captures()) {
+    if (!I.capturesVariable())
       continue;
 
     // This does not handle variable redeclarations. This should be
     // extended to capture variables with redeclarations, for example
     // a thread-private variable in OpenMP.
-    if (I->getCapturedVar() == Var)
+    if (I.getCapturedVar() == Var)
       return true;
   }
 
@@ -1130,8 +1129,9 @@
                                            SourceLocation LParenLoc,
                                            SourceLocation EndLoc,
                                            ArrayRef<Expr *> VL) {
-  void *Mem = C.Allocate(sizeof(OMPPrivateClause) + sizeof(Expr *) * VL.size(),
-                         llvm::alignOf<OMPPrivateClause>());
+  void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
+                                                  llvm::alignOf<Expr *>()) +
+                         sizeof(Expr *) * VL.size());
   OMPPrivateClause *Clause = new (Mem) OMPPrivateClause(StartLoc, LParenLoc,
                                                         EndLoc, VL.size());
   Clause->setVarRefs(VL);
@@ -1140,8 +1140,9 @@
 
 OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
                                                 unsigned N) {
-  void *Mem = C.Allocate(sizeof(OMPPrivateClause) + sizeof(Expr *) * N,
-                         llvm::alignOf<OMPPrivateClause>());
+  void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
+                                                  llvm::alignOf<Expr *>()) +
+                         sizeof(Expr *) * N);
   return new (Mem) OMPPrivateClause(N);
 }
 
@@ -1150,9 +1151,9 @@
                                                      SourceLocation LParenLoc,
                                                      SourceLocation EndLoc,
                                                      ArrayRef<Expr *> VL) {
-  void *Mem = C.Allocate(sizeof(OMPFirstprivateClause) +
-                         sizeof(Expr *) * VL.size(),
-                         llvm::alignOf<OMPFirstprivateClause>());
+  void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
+                                                  llvm::alignOf<Expr *>()) +
+                         sizeof(Expr *) * VL.size());
   OMPFirstprivateClause *Clause = new (Mem) OMPFirstprivateClause(StartLoc,
                                                                   LParenLoc,
                                                                   EndLoc,
@@ -1163,8 +1164,9 @@
 
 OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
                                                           unsigned N) {
-  void *Mem = C.Allocate(sizeof(OMPFirstprivateClause) + sizeof(Expr *) * N,
-                         llvm::alignOf<OMPFirstprivateClause>());
+  void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
+                                                  llvm::alignOf<Expr *>()) +
+                         sizeof(Expr *) * N);
   return new (Mem) OMPFirstprivateClause(N);
 }
 
@@ -1173,8 +1175,9 @@
                                          SourceLocation LParenLoc,
                                          SourceLocation EndLoc,
                                          ArrayRef<Expr *> VL) {
-  void *Mem = C.Allocate(sizeof(OMPSharedClause) + sizeof(Expr *) * VL.size(),
-                         llvm::alignOf<OMPSharedClause>());
+  void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
+                                                  llvm::alignOf<Expr *>()) +
+                         sizeof(Expr *) * VL.size());
   OMPSharedClause *Clause = new (Mem) OMPSharedClause(StartLoc, LParenLoc,
                                                       EndLoc, VL.size());
   Clause->setVarRefs(VL);
@@ -1183,15 +1186,38 @@
 
 OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C,
                                               unsigned N) {
-  void *Mem = C.Allocate(sizeof(OMPSharedClause) + sizeof(Expr *) * N,
-                         llvm::alignOf<OMPSharedClause>());
+  void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
+                                                  llvm::alignOf<Expr *>()) +
+                         sizeof(Expr *) * N);
   return new (Mem) OMPSharedClause(N);
 }
 
+OMPCopyinClause *OMPCopyinClause::Create(const ASTContext &C,
+                                         SourceLocation StartLoc,
+                                         SourceLocation LParenLoc,
+                                         SourceLocation EndLoc,
+                                         ArrayRef<Expr *> VL) {
+  void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
+                                                  llvm::alignOf<Expr *>()) +
+                         sizeof(Expr *) * VL.size());
+  OMPCopyinClause *Clause = new (Mem) OMPCopyinClause(StartLoc, LParenLoc,
+                                                      EndLoc, VL.size());
+  Clause->setVarRefs(VL);
+  return Clause;
+}
+
+OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C,
+                                              unsigned N) {
+  void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
+                                                  llvm::alignOf<Expr *>()) +
+                         sizeof(Expr *) * N);
+  return new (Mem) OMPCopyinClause(N);
+}
+
 void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
-  assert(Clauses.size() == this->Clauses.size() &&
+  assert(Clauses.size() == getNumClauses() &&
          "Number of clauses is not the same as the preallocated buffer");
-  std::copy(Clauses.begin(), Clauses.end(), this->Clauses.begin());
+  std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
 }
 
 OMPParallelDirective *OMPParallelDirective::Create(
@@ -1200,9 +1226,10 @@
                                               SourceLocation EndLoc,
                                               ArrayRef<OMPClause *> Clauses,
                                               Stmt *AssociatedStmt) {
-  void *Mem = C.Allocate(sizeof(OMPParallelDirective) +
-                         sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *),
-                         llvm::alignOf<OMPParallelDirective>());
+  unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
+                                           llvm::alignOf<OMPClause *>());
+  void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+                         sizeof(Stmt *));
   OMPParallelDirective *Dir = new (Mem) OMPParallelDirective(StartLoc, EndLoc,
                                                              Clauses.size());
   Dir->setClauses(Clauses);
@@ -1211,10 +1238,39 @@
 }
 
 OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
-                                                        unsigned N,
+                                                        unsigned NumClauses,
                                                         EmptyShell) {
-  void *Mem = C.Allocate(sizeof(OMPParallelDirective) +
-                         sizeof(OMPClause *) * N + sizeof(Stmt *),
-                         llvm::alignOf<OMPParallelDirective>());
-  return new (Mem) OMPParallelDirective(N);
+  unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
+                                           llvm::alignOf<OMPClause *>());
+  void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+                         sizeof(Stmt *));
+  return new (Mem) OMPParallelDirective(NumClauses);
 }
+
+OMPSimdDirective *OMPSimdDirective::Create(const ASTContext &C,
+                                           SourceLocation StartLoc,
+                                           SourceLocation EndLoc,
+                                           ArrayRef<OMPClause *> Clauses,
+                                           Stmt *AssociatedStmt) {
+  unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
+                                           llvm::alignOf<OMPClause *>());
+  void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+                         sizeof(Stmt *));
+  OMPSimdDirective *Dir = new (Mem) OMPSimdDirective(StartLoc, EndLoc,
+                                                     1, Clauses.size());
+  Dir->setClauses(Clauses);
+  Dir->setAssociatedStmt(AssociatedStmt);
+  return Dir;
+}
+
+OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
+                                                unsigned NumClauses,
+                                                unsigned CollapsedNum,
+                                                EmptyShell) {
+  unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
+                                           llvm::alignOf<OMPClause *>());
+  void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+                         sizeof(Stmt *));
+  return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
+}
+
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 0ecb5b5..7ad5491 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -70,6 +70,7 @@
     void PrintCallArgs(CallExpr *E);
     void PrintRawSEHExceptHandler(SEHExceptStmt *S);
     void PrintRawSEHFinallyStmt(SEHFinallyStmt *S);
+    void PrintOMPExecutableDirective(OMPExecutableDirective *S);
 
     void PrintExpr(Expr *E) {
       if (E)
@@ -89,7 +90,7 @@
           return;
       else StmtVisitor<StmtPrinter>::Visit(S);
     }
-    
+
     void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
       Indent() << "<<unknown stmt type>>\n";
     }
@@ -113,9 +114,8 @@
 /// with no newline after the }.
 void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
   OS << "{\n";
-  for (CompoundStmt::body_iterator I = Node->body_begin(), E = Node->body_end();
-       I != E; ++I)
-    PrintStmt(*I);
+  for (auto *I : Node->body())
+    PrintStmt(I);
 
   Indent() << "}";
 }
@@ -125,11 +125,7 @@
 }
 
 void StmtPrinter::PrintRawDeclStmt(const DeclStmt *S) {
-  DeclStmt::const_decl_iterator Begin = S->decl_begin(), End = S->decl_end();
-  SmallVector<Decl*, 2> Decls;
-  for ( ; Begin != End; ++Begin)
-    Decls.push_back(*Begin);
-
+  SmallVector<Decl*, 2> Decls(S->decls());
   Decl::printGroup(Decls.data(), Decls.size(), OS, Policy, IndentLevel);
 }
 
@@ -330,7 +326,8 @@
   PrintExpr(Node->getRangeInit());
   OS << ") {\n";
   PrintStmt(Node->getBody());
-  Indent() << "}\n";
+  Indent() << "}";
+  if (Policy.IncludeNewlines) OS << "\n";
 }
 
 void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
@@ -350,21 +347,25 @@
 }
 
 void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
-  Indent() << "goto " << Node->getLabel()->getName() << ";\n";
+  Indent() << "goto " << Node->getLabel()->getName() << ";";
+  if (Policy.IncludeNewlines) OS << "\n";
 }
 
 void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) {
   Indent() << "goto *";
   PrintExpr(Node->getTarget());
-  OS << ";\n";
+  OS << ";";
+  if (Policy.IncludeNewlines) OS << "\n";
 }
 
 void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) {
-  Indent() << "continue;\n";
+  Indent() << "continue;";
+  if (Policy.IncludeNewlines) OS << "\n";
 }
 
 void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
-  Indent() << "break;\n";
+  Indent() << "break;";
+  if (Policy.IncludeNewlines) OS << "\n";
 }
 
 
@@ -374,7 +375,8 @@
     OS << " ";
     PrintExpr(Node->getRetValue());
   }
-  OS << ";\n";
+  OS << ";";
+  if (Policy.IncludeNewlines) OS << "\n";
 }
 
 
@@ -437,7 +439,8 @@
     VisitStringLiteral(Node->getClobberStringLiteral(i));
   }
 
-  OS << ");\n";
+  OS << ");";
+  if (Policy.IncludeNewlines) OS << "\n";
 }
 
 void StmtPrinter::VisitMSAsmStmt(MSAsmStmt *Node) {
@@ -586,16 +589,36 @@
 namespace {
 class OMPClausePrinter : public OMPClauseVisitor<OMPClausePrinter> {
   raw_ostream &OS;
+  const PrintingPolicy &Policy;
   /// \brief Process clauses with list of variables.
   template <typename T>
   void VisitOMPClauseList(T *Node, char StartSym);
 public:
-  OMPClausePrinter(raw_ostream &OS) : OS(OS) { }
+  OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
+    : OS(OS), Policy(Policy) { }
 #define OPENMP_CLAUSE(Name, Class)                              \
   void Visit##Class(Class *S);
 #include "clang/Basic/OpenMPKinds.def"
 };
 
+void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
+  OS << "if(";
+  Node->getCondition()->printPretty(OS, 0, Policy, 0);
+  OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
+  OS << "num_threads(";
+  Node->getNumThreads()->printPretty(OS, 0, Policy, 0);
+  OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
+  OS << "safelen(";
+  Node->getSafelen()->printPretty(OS, 0, Policy, 0);
+  OS << ")";
+}
+
 void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
   OS << "default("
      << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
@@ -606,9 +629,15 @@
 void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
   for (typename T::varlist_iterator I = Node->varlist_begin(),
                                     E = Node->varlist_end();
-         I != E; ++I)
-    OS << (I == Node->varlist_begin() ? StartSym : ',')
-       << *cast<NamedDecl>(cast<DeclRefExpr>(*I)->getDecl());
+         I != E; ++I) {
+    if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*I)) {
+      OS << (I == Node->varlist_begin() ? StartSym : ',');
+      cast<NamedDecl>(DRE->getDecl())->printQualifiedName(OS);
+    } else {
+      OS << (I == Node->varlist_begin() ? StartSym : ',');
+      (*I)->printPretty(OS, 0, Policy, 0);
+    }
+  }
 }
 
 void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
@@ -635,17 +664,23 @@
   }
 }
 
+void OMPClausePrinter::VisitOMPCopyinClause(OMPCopyinClause *Node) {
+  if (!Node->varlist_empty()) {
+    OS << "copyin";
+    VisitOMPClauseList(Node, '(');
+    OS << ")";
+  }
+}
+
 }
 
 //===----------------------------------------------------------------------===//
 //  OpenMP directives printing methods
 //===----------------------------------------------------------------------===//
 
-void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
-  Indent() << "#pragma omp parallel ";
-
-  OMPClausePrinter Printer(OS);
-  ArrayRef<OMPClause *> Clauses = Node->clauses();
+void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S) {
+  OMPClausePrinter Printer(OS, Policy);
+  ArrayRef<OMPClause *> Clauses = S->clauses();
   for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
        I != E; ++I)
     if (*I && !(*I)->isImplicit()) {
@@ -653,13 +688,24 @@
       OS << ' ';
     }
   OS << "\n";
-  if (Node->getAssociatedStmt()) {
-    assert(isa<CapturedStmt>(Node->getAssociatedStmt()) &&
+  if (S->getAssociatedStmt()) {
+    assert(isa<CapturedStmt>(S->getAssociatedStmt()) &&
            "Expected captured statement!");
-    Stmt *CS = cast<CapturedStmt>(Node->getAssociatedStmt())->getCapturedStmt();
+    Stmt *CS = cast<CapturedStmt>(S->getAssociatedStmt())->getCapturedStmt();
     PrintStmt(CS);
   }
 }
+
+void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
+  Indent() << "#pragma omp parallel ";
+  PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPSimdDirective(OMPSimdDirective *Node) {
+  Indent() << "#pragma omp simd ";
+  PrintOMPExecutableDirective(Node);
+}
+
 //===----------------------------------------------------------------------===//
 //  Expr printing methods.
 //===----------------------------------------------------------------------===//
@@ -709,13 +755,15 @@
 void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
   if (Node->isSuperReceiver())
     OS << "super.";
-  else if (Node->getBase()) {
+  else if (Node->isObjectReceiver() && Node->getBase()) {
     PrintExpr(Node->getBase());
     OS << ".";
+  } else if (Node->isClassReceiver() && Node->getClassReceiver()) {
+    OS << Node->getClassReceiver()->getName() << ".";
   }
 
   if (Node->isImplicitProperty())
-    OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+    Node->getImplicitPropertyGetter()->getSelector().print(OS);
   else
     OS << Node->getExplicitProperty()->getName();
 }
@@ -1041,7 +1089,7 @@
   PrintExpr(Node->getInitializer());
 }
 void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
-  // No need to print anything, simply forward to the sub expression.
+  // No need to print anything, simply forward to the subexpression.
   PrintExpr(Node->getSubExpr());
 }
 void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) {
@@ -1279,6 +1327,12 @@
 }
 
 void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
+  // If we have a conversion operator call only print the argument.
+  CXXMethodDecl *MD = Node->getMethodDecl();
+  if (MD && isa<CXXConversionDecl>(MD)) {
+    PrintExpr(Node->getImplicitObjectArgument());
+    return;
+  }
   VisitCallExpr(cast<CallExpr>(Node));
 }
 
@@ -1490,16 +1544,14 @@
     OS << " (";
     CXXMethodDecl *Method = Node->getCallOperator();
     NeedComma = false;
-    for (CXXMethodDecl::param_iterator P = Method->param_begin(),
-                                    PEnd = Method->param_end();
-         P != PEnd; ++P) {
+    for (auto P : Method->params()) {
       if (NeedComma) {
         OS << ", ";
       } else {
         NeedComma = true;
       }
-      std::string ParamStr = (*P)->getNameAsString();
-      (*P)->getOriginalType().print(OS, Policy, ParamStr);
+      std::string ParamStr = P->getNameAsString();
+      P->getOriginalType().print(OS, Policy, ParamStr);
     }
     if (Method->isVariadic()) {
       if (NeedComma)
@@ -1520,7 +1572,7 @@
     // Print the trailing return type if it was specified in the source.
     if (Node->hasExplicitResultType()) {
       OS << " -> ";
-      Proto->getResultType().print(OS, Policy);
+      Proto->getReturnType().print(OS, Policy);
     }
   }
 
@@ -1625,7 +1677,7 @@
 }
 
 void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
-  // Just forward to the sub expression.
+  // Just forward to the subexpression.
   PrintExpr(E->getSubExpr());
 }
 
@@ -1675,74 +1727,15 @@
         OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
 }
 
-static const char *getTypeTraitName(UnaryTypeTrait UTT) {
-  switch (UTT) {
-  case UTT_HasNothrowAssign:      return "__has_nothrow_assign";
-  case UTT_HasNothrowMoveAssign:  return "__has_nothrow_move_assign";
-  case UTT_HasNothrowConstructor: return "__has_nothrow_constructor";
-  case UTT_HasNothrowCopy:          return "__has_nothrow_copy";
-  case UTT_HasTrivialAssign:      return "__has_trivial_assign";
-  case UTT_HasTrivialMoveAssign:      return "__has_trivial_move_assign";
-  case UTT_HasTrivialMoveConstructor: return "__has_trivial_move_constructor";
-  case UTT_HasTrivialDefaultConstructor: return "__has_trivial_constructor";
-  case UTT_HasTrivialCopy:          return "__has_trivial_copy";
-  case UTT_HasTrivialDestructor:  return "__has_trivial_destructor";
-  case UTT_HasVirtualDestructor:  return "__has_virtual_destructor";
-  case UTT_IsAbstract:            return "__is_abstract";
-  case UTT_IsArithmetic:            return "__is_arithmetic";
-  case UTT_IsArray:                 return "__is_array";
-  case UTT_IsClass:               return "__is_class";
-  case UTT_IsCompleteType:          return "__is_complete_type";
-  case UTT_IsCompound:              return "__is_compound";
-  case UTT_IsConst:                 return "__is_const";
-  case UTT_IsEmpty:               return "__is_empty";
-  case UTT_IsEnum:                return "__is_enum";
-  case UTT_IsFinal:                 return "__is_final";
-  case UTT_IsFloatingPoint:         return "__is_floating_point";
-  case UTT_IsFunction:              return "__is_function";
-  case UTT_IsFundamental:           return "__is_fundamental";
-  case UTT_IsIntegral:              return "__is_integral";
-  case UTT_IsInterfaceClass:        return "__is_interface_class";
-  case UTT_IsLiteral:               return "__is_literal";
-  case UTT_IsLvalueReference:       return "__is_lvalue_reference";
-  case UTT_IsMemberFunctionPointer: return "__is_member_function_pointer";
-  case UTT_IsMemberObjectPointer:   return "__is_member_object_pointer";
-  case UTT_IsMemberPointer:         return "__is_member_pointer";
-  case UTT_IsObject:                return "__is_object";
-  case UTT_IsPOD:                 return "__is_pod";
-  case UTT_IsPointer:               return "__is_pointer";
-  case UTT_IsPolymorphic:         return "__is_polymorphic";
-  case UTT_IsReference:             return "__is_reference";
-  case UTT_IsRvalueReference:       return "__is_rvalue_reference";
-  case UTT_IsScalar:                return "__is_scalar";
-  case UTT_IsSealed:                return "__is_sealed";
-  case UTT_IsSigned:                return "__is_signed";
-  case UTT_IsStandardLayout:        return "__is_standard_layout";
-  case UTT_IsTrivial:               return "__is_trivial";
-  case UTT_IsTriviallyCopyable:     return "__is_trivially_copyable";
-  case UTT_IsUnion:               return "__is_union";
-  case UTT_IsUnsigned:              return "__is_unsigned";
-  case UTT_IsVoid:                  return "__is_void";
-  case UTT_IsVolatile:              return "__is_volatile";
-  }
-  llvm_unreachable("Type trait not covered by switch statement");
-}
-
-static const char *getTypeTraitName(BinaryTypeTrait BTT) {
-  switch (BTT) {
-  case BTT_IsBaseOf:              return "__is_base_of";
-  case BTT_IsConvertible:         return "__is_convertible";
-  case BTT_IsSame:                return "__is_same";
-  case BTT_TypeCompatible:        return "__builtin_types_compatible_p";
-  case BTT_IsConvertibleTo:       return "__is_convertible_to";
-  case BTT_IsTriviallyAssignable: return "__is_trivially_assignable";
-  }
-  llvm_unreachable("Binary type trait not covered by switch");
-}
-
 static const char *getTypeTraitName(TypeTrait TT) {
   switch (TT) {
-  case clang::TT_IsTriviallyConstructible:return "__is_trivially_constructible";
+#define TYPE_TRAIT_1(Spelling, Name, Key) \
+case clang::UTT_##Name: return #Spelling;
+#define TYPE_TRAIT_2(Spelling, Name, Key) \
+case clang::BTT_##Name: return #Spelling;
+#define TYPE_TRAIT_N(Spelling, Name, Key) \
+  case clang::TT_##Name: return #Spelling;
+#include "clang/Basic/TokenKinds.def"
   }
   llvm_unreachable("Type trait not covered by switch");
 }
@@ -1763,20 +1756,6 @@
   llvm_unreachable("Expression type trait not covered by switch");
 }
 
-void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
-  OS << getTypeTraitName(E->getTrait()) << '(';
-  E->getQueriedType().print(OS, Policy);
-  OS << ')';
-}
-
-void StmtPrinter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
-  OS << getTypeTraitName(E->getTrait()) << '(';
-  E->getLhsType().print(OS, Policy);
-  OS << ',';
-  E->getRhsType().print(OS, Policy);
-  OS << ')';
-}
-
 void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
   OS << getTypeTraitName(E->getTrait()) << "(";
   for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
@@ -1881,7 +1860,9 @@
 }
 
 void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
-  OS << "@selector(" << Node->getSelector().getAsString() << ')';
+  OS << "@selector(";
+  Node->getSelector().print(OS);
+  OS << ')';
 }
 
 void StmtPrinter::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index 6805e62..07ed86a 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -79,9 +79,8 @@
 
 void StmtProfiler::VisitDeclStmt(const DeclStmt *S) {
   VisitStmt(S);
-  for (DeclStmt::const_decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
-       D != DEnd; ++D)
-    VisitDecl(*D);
+  for (const auto *D : S->decls())
+    VisitDecl(D);
 }
 
 void StmtProfiler::VisitNullStmt(const NullStmt *S) {
@@ -265,14 +264,27 @@
 #include "clang/Basic/OpenMPKinds.def"
 };
 
+void OMPClauseProfiler::VisitOMPIfClause(const OMPIfClause *C) {
+  if (C->getCondition())
+    Profiler->VisitStmt(C->getCondition());
+}
+
+void OMPClauseProfiler::VisitOMPNumThreadsClause(const OMPNumThreadsClause *C) {
+  if (C->getNumThreads())
+    Profiler->VisitStmt(C->getNumThreads());
+}
+
+void OMPClauseProfiler::VisitOMPSafelenClause(const OMPSafelenClause *C) {
+  if (C->getSafelen())
+    Profiler->VisitStmt(C->getSafelen());
+}
+
 void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
 
 template<typename T>
 void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
-  for (typename T::varlist_const_iterator I = Node->varlist_begin(),
-                                          E = Node->varlist_end();
-         I != E; ++I)
-    Profiler->VisitStmt(*I);
+  for (auto *I : Node->varlists())
+    Profiler->VisitStmt(I);
 }
 
 void OMPClauseProfiler::VisitOMPPrivateClause(const OMPPrivateClause *C) {
@@ -285,10 +297,13 @@
 void OMPClauseProfiler::VisitOMPSharedClause(const OMPSharedClause *C) {
   VisitOMPClauseList(C);
 }
+void OMPClauseProfiler::VisitOMPCopyinClause(const OMPCopyinClause *C) {
+  VisitOMPClauseList(C);
+}
 }
 
 void
-StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
+StmtProfiler::VisitOMPExecutableDirective(const OMPExecutableDirective *S) {
   VisitStmt(S);
   OMPClauseProfiler P(this);
   ArrayRef<OMPClause *> Clauses = S->clauses();
@@ -298,6 +313,14 @@
       P.Visit(*I);
 }
 
+void StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
+  VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPSimdDirective(const OMPSimdDirective *S) {
+  VisitOMPExecutableDirective(S);
+}
+
 void StmtProfiler::VisitExpr(const Expr *S) {
   VisitStmt(S);
 }
@@ -948,19 +971,6 @@
   VisitOverloadExpr(S);
 }
 
-void StmtProfiler::VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *S) {
-  VisitExpr(S);
-  ID.AddInteger(S->getTrait());
-  VisitType(S->getQueriedType());
-}
-
-void StmtProfiler::VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *S) {
-  VisitExpr(S);
-  ID.AddInteger(S->getTrait());
-  VisitType(S->getLhsType());
-  VisitType(S->getRhsType());
-}
-
 void StmtProfiler::VisitTypeTraitExpr(const TypeTraitExpr *S) {
   VisitExpr(S);
   ID.AddInteger(S->getTrait());
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index 16efb79..52f95bf 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -345,7 +345,7 @@
                              raw_ostream &Out) const {
   switch (getKind()) {
   case Null:
-    Out << "<no value>";
+    Out << "(no value)";
     break;
     
   case Type: {
@@ -362,7 +362,7 @@
       // FIXME: distinguish between pointer and reference args?
       ND->printQualifiedName(Out);
     } else {
-      Out << "<anonymous>";
+      Out << "(anonymous)";
     }
     break;
   }
@@ -511,6 +511,8 @@
 const ASTTemplateArgumentListInfo *
 ASTTemplateArgumentListInfo::Create(ASTContext &C,
                                     const TemplateArgumentListInfo &List) {
+  assert(llvm::alignOf<ASTTemplateArgumentListInfo>() >=
+         llvm::alignOf<TemplateArgumentLoc>());
   std::size_t size = ASTTemplateArgumentListInfo::sizeFor(List.size());
   void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
   ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo();
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 7421bae..dc3fcc8 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -422,6 +422,10 @@
     return BPT->getPointeeType();
   if (const ReferenceType *RT = getAs<ReferenceType>())
     return RT->getPointeeType();
+  if (const MemberPointerType *MPT = getAs<MemberPointerType>())
+    return MPT->getPointeeType();
+  if (const DecayedType *DT = getAs<DecayedType>())
+    return DT->getPointeeType();
   return QualType();
 }
 
@@ -585,7 +589,7 @@
       return Visit(T->getElementType());
     }
     AutoType *VisitFunctionType(const FunctionType *T) {
-      return Visit(T->getResultType());
+      return Visit(T->getReturnType());
     }
     AutoType *VisitParenType(const ParenType *T) {
       return Visit(T->getInnerType());
@@ -593,6 +597,9 @@
     AutoType *VisitAttributedType(const AttributedType *T) {
       return Visit(T->getModifiedType());
     }
+    AutoType *VisitAdjustedType(const AdjustedType *T) {
+      return Visit(T->getOriginalType());
+    }
   };
 }
 
@@ -1516,7 +1523,7 @@
   case ULong:             return "unsigned long";
   case ULongLong:         return "unsigned long long";
   case UInt128:           return "unsigned __int128";
-  case Half:              return "half";
+  case Half:              return Policy.Half ? "half" : "__fp16";
   case Float:             return "float";
   case Double:            return "double";
   case LongDouble:        return "long double";
@@ -1582,41 +1589,37 @@
   llvm_unreachable("Invalid calling convention.");
 }
 
-FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> args,
+FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
                                      QualType canonical,
                                      const ExtProtoInfo &epi)
-  : FunctionType(FunctionProto, result, epi.TypeQuals,
-                 canonical,
-                 result->isDependentType(),
-                 result->isInstantiationDependentType(),
-                 result->isVariablyModifiedType(),
-                 result->containsUnexpandedParameterPack(),
-                 epi.ExtInfo),
-    NumArgs(args.size()), NumExceptions(epi.NumExceptions),
-    ExceptionSpecType(epi.ExceptionSpecType),
-    HasAnyConsumedArgs(epi.ConsumedArguments != 0),
-    Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn),
-    RefQualifier(epi.RefQualifier)
-{
-  assert(NumArgs == args.size() && "function has too many parameters");
+    : FunctionType(FunctionProto, result, epi.TypeQuals, canonical,
+                   result->isDependentType(),
+                   result->isInstantiationDependentType(),
+                   result->isVariablyModifiedType(),
+                   result->containsUnexpandedParameterPack(), epi.ExtInfo),
+      NumParams(params.size()), NumExceptions(epi.NumExceptions),
+      ExceptionSpecType(epi.ExceptionSpecType),
+      HasAnyConsumedParams(epi.ConsumedParameters != 0), Variadic(epi.Variadic),
+      HasTrailingReturn(epi.HasTrailingReturn), RefQualifier(epi.RefQualifier) {
+  assert(NumParams == params.size() && "function has too many parameters");
 
   // Fill in the trailing argument array.
   QualType *argSlot = reinterpret_cast<QualType*>(this+1);
-  for (unsigned i = 0; i != NumArgs; ++i) {
-    if (args[i]->isDependentType())
+  for (unsigned i = 0; i != NumParams; ++i) {
+    if (params[i]->isDependentType())
       setDependent();
-    else if (args[i]->isInstantiationDependentType())
+    else if (params[i]->isInstantiationDependentType())
       setInstantiationDependent();
-    
-    if (args[i]->containsUnexpandedParameterPack())
+
+    if (params[i]->containsUnexpandedParameterPack())
       setContainsUnexpandedParameterPack();
 
-    argSlot[i] = args[i];
+    argSlot[i] = params[i];
   }
 
   if (getExceptionSpecType() == EST_Dynamic) {
     // Fill in the exception array.
-    QualType *exnSlot = argSlot + NumArgs;
+    QualType *exnSlot = argSlot + NumParams;
     for (unsigned i = 0, e = epi.NumExceptions; i != e; ++i) {
       if (epi.Exceptions[i]->isDependentType())
         setDependent();
@@ -1630,7 +1633,7 @@
     }
   } else if (getExceptionSpecType() == EST_ComputedNoexcept) {
     // Store the noexcept expression and context.
-    Expr **noexSlot = reinterpret_cast<Expr**>(argSlot + NumArgs);
+    Expr **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
     *noexSlot = epi.NoexceptExpr;
     
     if (epi.NoexceptExpr) {
@@ -1643,7 +1646,8 @@
   } else if (getExceptionSpecType() == EST_Uninstantiated) {
     // Store the function decl from which we will resolve our
     // exception specification.
-    FunctionDecl **slot = reinterpret_cast<FunctionDecl**>(argSlot + NumArgs);
+    FunctionDecl **slot =
+        reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
     slot[0] = epi.ExceptionSpecDecl;
     slot[1] = epi.ExceptionSpecTemplate;
     // This exception specification doesn't make the type dependent, because
@@ -1651,14 +1655,15 @@
   } else if (getExceptionSpecType() == EST_Unevaluated) {
     // Store the function decl from which we will resolve our
     // exception specification.
-    FunctionDecl **slot = reinterpret_cast<FunctionDecl**>(argSlot + NumArgs);
+    FunctionDecl **slot =
+        reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
     slot[0] = epi.ExceptionSpecDecl;
   }
 
-  if (epi.ConsumedArguments) {
-    bool *consumedArgs = const_cast<bool*>(getConsumedArgsBuffer());
-    for (unsigned i = 0; i != NumArgs; ++i)
-      consumedArgs[i] = epi.ConsumedArguments[i];
+  if (epi.ConsumedParameters) {
+    bool *consumedParams = const_cast<bool *>(getConsumedParamsBuffer());
+    for (unsigned i = 0; i != NumParams; ++i)
+      consumedParams[i] = epi.ConsumedParameters[i];
   }
 }
 
@@ -1686,16 +1691,41 @@
   return value.getBoolValue() ? NR_Nothrow : NR_Throw;
 }
 
+bool FunctionProtoType::isNothrow(const ASTContext &Ctx,
+                                  bool ResultIfDependent) const {
+  ExceptionSpecificationType EST = getExceptionSpecType();
+  assert(EST != EST_Unevaluated && EST != EST_Uninstantiated);
+  if (EST == EST_DynamicNone || EST == EST_BasicNoexcept)
+    return true;
+
+  if (EST == EST_Dynamic && ResultIfDependent == true) {
+    // A dynamic exception specification is throwing unless every exception
+    // type is an (unexpanded) pack expansion type.
+    for (unsigned I = 0, N = NumExceptions; I != N; ++I)
+      if (!getExceptionType(I)->getAs<PackExpansionType>())
+        return false;
+    return ResultIfDependent;
+  }
+
+  if (EST != EST_ComputedNoexcept)
+    return false;
+
+  NoexceptResult NR = getNoexceptSpec(Ctx);
+  if (NR == NR_Dependent)
+    return ResultIfDependent;
+  return NR == NR_Nothrow;
+}
+
 bool FunctionProtoType::isTemplateVariadic() const {
-  for (unsigned ArgIdx = getNumArgs(); ArgIdx; --ArgIdx)
-    if (isa<PackExpansionType>(getArgType(ArgIdx - 1)))
+  for (unsigned ArgIdx = getNumParams(); ArgIdx; --ArgIdx)
+    if (isa<PackExpansionType>(getParamType(ArgIdx - 1)))
       return true;
   
   return false;
 }
 
 void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
-                                const QualType *ArgTys, unsigned NumArgs,
+                                const QualType *ArgTys, unsigned NumParams,
                                 const ExtProtoInfo &epi,
                                 const ASTContext &Context) {
 
@@ -1717,7 +1747,7 @@
   // whether the following bool is the EH spec or part of the arguments.
 
   ID.AddPointer(Result.getAsOpaquePtr());
-  for (unsigned i = 0; i != NumArgs; ++i)
+  for (unsigned i = 0; i != NumParams; ++i)
     ID.AddPointer(ArgTys[i].getAsOpaquePtr());
   // This method is relatively performance sensitive, so as a performance
   // shortcut, use one AddInteger call instead of four for the next four
@@ -1740,9 +1770,9 @@
              epi.ExceptionSpecType == EST_Unevaluated) {
     ID.AddPointer(epi.ExceptionSpecDecl->getCanonicalDecl());
   }
-  if (epi.ConsumedArguments) {
-    for (unsigned i = 0; i != NumArgs; ++i)
-      ID.AddBoolean(epi.ConsumedArguments[i]);
+  if (epi.ConsumedParameters) {
+    for (unsigned i = 0; i != NumParams; ++i)
+      ID.AddBoolean(epi.ConsumedParameters[i]);
   }
   epi.ExtInfo.Profile(ID);
   ID.AddBoolean(epi.HasTrailingReturn);
@@ -1750,7 +1780,7 @@
 
 void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
                                 const ASTContext &Ctx) {
-  Profile(ID, getResultType(), arg_type_begin(), NumArgs, getExtProtoInfo(),
+  Profile(ID, getReturnType(), param_type_begin(), NumParams, getExtProtoInfo(),
           Ctx);
 }
 
@@ -1819,11 +1849,9 @@
     decl(const_cast<TagDecl*>(D)) {}
 
 static TagDecl *getInterestingTagDecl(TagDecl *decl) {
-  for (TagDecl::redecl_iterator I = decl->redecls_begin(),
-                                E = decl->redecls_end();
-       I != E; ++I) {
+  for (auto I : decl->redecls()) {
     if (I->isCompleteDefinition() || I->isBeingDefined())
-      return *I;
+      return I;
   }
   // If there's no definition (not even in progress), return what we have.
   return decl;
@@ -2200,13 +2228,12 @@
   case Type::ExtVector:
     return Cache::get(cast<VectorType>(T)->getElementType());
   case Type::FunctionNoProto:
-    return Cache::get(cast<FunctionType>(T)->getResultType());
+    return Cache::get(cast<FunctionType>(T)->getReturnType());
   case Type::FunctionProto: {
     const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
-    CachedProperties result = Cache::get(FPT->getResultType());
-    for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(),
-           ae = FPT->arg_type_end(); ai != ae; ++ai)
-      result = merge(result, Cache::get(*ai));
+    CachedProperties result = Cache::get(FPT->getReturnType());
+    for (const auto &ai : FPT->param_types())
+      result = merge(result, Cache::get(ai));
     return result;
   }
   case Type::ObjCInterface: {
@@ -2285,13 +2312,12 @@
   case Type::ExtVector:
     return computeLinkageInfo(cast<VectorType>(T)->getElementType());
   case Type::FunctionNoProto:
-    return computeLinkageInfo(cast<FunctionType>(T)->getResultType());
+    return computeLinkageInfo(cast<FunctionType>(T)->getReturnType());
   case Type::FunctionProto: {
     const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
-    LinkageInfo LV = computeLinkageInfo(FPT->getResultType());
-    for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(),
-           ae = FPT->arg_type_end(); ai != ae; ++ai)
-      LV.merge(computeLinkageInfo(*ai));
+    LinkageInfo LV = computeLinkageInfo(FPT->getReturnType());
+    for (const auto &ai : FPT->param_types())
+      LV.merge(computeLinkageInfo(ai));
     return LV;
   }
   case Type::ObjCInterface:
@@ -2441,3 +2467,7 @@
 
   return DK_none;
 }
+
+CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
+  return getClass()->getAsCXXRecordDecl()->getMostRecentDecl();
+}
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 571e3db..89ec8d6 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -204,6 +204,7 @@
       NeedARCStrongQualifier = true;
       // Fall through
       
+    case Type::Adjusted:
     case Type::Decayed:
     case Type::Pointer:
     case Type::BlockPointer:
@@ -471,12 +472,21 @@
   printAfter(T->getElementType(), OS);
 }
 
+void TypePrinter::printAdjustedBefore(const AdjustedType *T, raw_ostream &OS) {
+  // Print the adjusted representation, otherwise the adjustment will be
+  // invisible.
+  printBefore(T->getAdjustedType(), OS);
+}
+void TypePrinter::printAdjustedAfter(const AdjustedType *T, raw_ostream &OS) {
+  printAfter(T->getAdjustedType(), OS);
+}
+
 void TypePrinter::printDecayedBefore(const DecayedType *T, raw_ostream &OS) {
   // Print as though it's a pointer.
-  printBefore(T->getDecayedType(), OS);
+  printAdjustedBefore(T, OS);
 }
 void TypePrinter::printDecayedAfter(const DecayedType *T, raw_ostream &OS) {
-  printAfter(T->getDecayedType(), OS);
+  printAdjustedAfter(T, OS);
 }
 
 void TypePrinter::printDependentSizedArrayBefore(
@@ -598,7 +608,7 @@
   } else {
     // If needed for precedence reasons, wrap the inner part in grouping parens.
     SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
-    printBefore(T->getResultType(), OS);
+    printBefore(T->getReturnType(), OS);
     if (!PrevPHIsEmpty.get())
       OS << '(';
   }
@@ -614,17 +624,17 @@
   OS << '(';
   {
     ParamPolicyRAII ParamPolicy(Policy);
-    for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
+    for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) {
       if (i) OS << ", ";
-      print(T->getArgType(i), OS, StringRef());
+      print(T->getParamType(i), OS, StringRef());
     }
   }
   
   if (T->isVariadic()) {
-    if (T->getNumArgs())
+    if (T->getNumParams())
       OS << ", ";
     OS << "...";
-  } else if (T->getNumArgs() == 0 && !Policy.LangOpts.CPlusPlus) {
+  } else if (T->getNumParams() == 0 && !Policy.LangOpts.CPlusPlus) {
     // Do not emit int() if we have a proto, emit 'int(void)'.
     OS << "void";
   }
@@ -703,17 +713,17 @@
   T->printExceptionSpecification(OS, Policy);
 
   if (T->hasTrailingReturn()) {
-    OS << " -> "; 
-    print(T->getResultType(), OS, StringRef());
+    OS << " -> ";
+    print(T->getReturnType(), OS, StringRef());
   } else
-    printAfter(T->getResultType(), OS);
+    printAfter(T->getReturnType(), OS);
 }
 
 void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T, 
                                              raw_ostream &OS) { 
   // If needed for precedence reasons, wrap the inner part in grouping parens.
   SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
-  printBefore(T->getResultType(), OS);
+  printBefore(T->getReturnType(), OS);
   if (!PrevPHIsEmpty.get())
     OS << '(';
 }
@@ -727,7 +737,7 @@
   OS << "()";
   if (T->getNoReturnAttr())
     OS << " __attribute__((noreturn))";
-  printAfter(T->getResultType(), OS);
+  printAfter(T->getReturnType(), OS);
 }
 
 void TypePrinter::printTypeSpec(const NamedDecl *D, raw_ostream &OS) {
@@ -838,7 +848,7 @@
     if (NS->getIdentifier())
       OS << NS->getName() << "::";
     else
-      OS << "<anonymous>::";
+      OS << "(anonymous namespace)::";
   } else if (ClassTemplateSpecializationDecl *Spec
                = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
     IncludeStrongLifetimeRAII Strong(Policy);
@@ -890,13 +900,13 @@
     OS << Typedef->getIdentifier()->getName();
   } else {
     // Make an unambiguous representation for anonymous types, e.g.
-    //   <anonymous enum at /usr/include/string.h:120:9>
+    //   (anonymous enum at /usr/include/string.h:120:9)
     
     if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
-      OS << "<lambda";
+      OS << "(lambda";
       HasKindDecoration = true;
     } else {
-      OS << "<anonymous";
+      OS << "(anonymous";
     }
     
     if (Policy.AnonymousTagLocations) {
@@ -915,7 +925,7 @@
       }
     }
     
-    OS << '>';
+    OS << ')';
   }
 
   // If this is a class template specialization, print the template
@@ -1249,13 +1259,12 @@
   print(T->getBaseType(), OS, StringRef());
   OS << '<';
   bool isFirst = true;
-  for (ObjCObjectType::qual_iterator
-         I = T->qual_begin(), E = T->qual_end(); I != E; ++I) {
+  for (const auto *I : T->quals()) {
     if (isFirst)
       isFirst = false;
     else
       OS << ',';
-    OS << (*I)->getName();
+    OS << I->getName();
   }
   OS << '>';
   spaceBeforePlaceHolder(OS);
@@ -1271,12 +1280,12 @@
   T->getPointeeType().getLocalQualifiers().print(OS, Policy,
                                                 /*appendSpaceIfNonEmpty=*/true);
 
+  assert(!T->isObjCSelType());
+
   if (T->isObjCIdType() || T->isObjCQualifiedIdType())
     OS << "id";
   else if (T->isObjCClassType() || T->isObjCQualifiedClassType())
     OS << "Class";
-  else if (T->isObjCSelType())
-    OS << "SEL";
   else
     OS << T->getInterfaceDecl()->getName();
   
@@ -1292,7 +1301,8 @@
     OS << '>';
   }
   
-  if (!T->isObjCIdType() && !T->isObjCQualifiedIdType()) {
+  if (!T->isObjCIdType() && !T->isObjCQualifiedIdType() &&
+      !T->isObjCClassType() && !T->isObjCQualifiedClassType()) {
     OS << " *"; // Don't forget the implicit pointer.
   } else {
     spaceBeforePlaceHolder(OS);
@@ -1413,13 +1423,10 @@
   print(llvm::errs(), PrintingPolicy(LO), "identifier");
   llvm::errs() << '\n';
 }
-void QualType::dump() const {
-  dump(0);
-}
 
-void Type::dump() const {
-  QualType(this, 0).dump();
-}
+LLVM_DUMP_METHOD void QualType::dump() const { dump(0); }
+
+LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
 
 std::string Qualifiers::getAsString() const {
   LangOptions LO;
diff --git a/lib/AST/VTTBuilder.cpp b/lib/AST/VTTBuilder.cpp
index 5ca4e86..c213d1c 100644
--- a/lib/AST/VTTBuilder.cpp
+++ b/lib/AST/VTTBuilder.cpp
@@ -56,15 +56,13 @@
 void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
   const CXXRecordDecl *RD = Base.getBase();
 
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    
+  for (const auto &I : RD->bases()) {    
     // Don't layout virtual bases.
-    if (I->isVirtual())
+    if (I.isVirtual())
         continue;
 
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
     CharUnits BaseOffset = Base.getBaseOffset() + 
@@ -88,10 +86,9 @@
   if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
     return;
 
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
+  for (const auto &I : RD->bases()) {
     const CXXRecordDecl *BaseDecl =
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     // Itanium C++ ABI 2.6.2:
     //   Secondary virtual pointers are present for all bases with either
@@ -106,7 +103,7 @@
     bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
     bool BaseDeclIsNonVirtualPrimaryBase = false;
     CharUnits BaseOffset;
-    if (I->isVirtual()) {
+    if (I.isVirtual()) {
       // Ignore virtual bases that we've already visited.
       if (!VBases.insert(BaseDecl))
         continue;
@@ -153,13 +150,12 @@
 
 void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
                                    VisitedVirtualBasesSetTy &VBases) {
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
+  for (const auto &I : RD->bases()) {
     const CXXRecordDecl *BaseDecl = 
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
     
     // Check if this is a virtual base.
-    if (I->isVirtual()) {
+    if (I.isVirtual()) {
       // Check if we've seen this base before.
       if (!VBases.insert(BaseDecl))
         continue;
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index 5f7ae0f..c6b38d3 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -16,6 +16,7 @@
 #include "clang/AST/CXXInheritance.h"
 #include "clang/AST/RecordLayout.h"
 #include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/Support/Format.h"
 #include "llvm/Support/raw_ostream.h"
 #include <algorithm>
@@ -268,11 +269,11 @@
   const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
   
   // Canonicalize the return types.
-  CanQualType CanDerivedReturnType = 
-    Context.getCanonicalType(DerivedFT->getResultType());
-  CanQualType CanBaseReturnType = 
-    Context.getCanonicalType(BaseFT->getResultType());
-  
+  CanQualType CanDerivedReturnType =
+      Context.getCanonicalType(DerivedFT->getReturnType());
+  CanQualType CanBaseReturnType =
+      Context.getCanonicalType(BaseFT->getReturnType());
+
   assert(CanDerivedReturnType->getTypeClass() == 
          CanBaseReturnType->getTypeClass() && 
          "Types must have same type class!");
@@ -337,13 +338,12 @@
     OffsetInLayoutClass;
   
   // Traverse our bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+  for (const auto &B : RD->bases()) {
+    const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
 
     CharUnits BaseOffset;
     CharUnits BaseOffsetInLayoutClass;
-    if (I->isVirtual()) {
+    if (B.isVirtual()) {
       // Check if we've visited this virtual base before.
       if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
         continue;
@@ -363,7 +363,7 @@
     }
 
     ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset), 
-                       I->isVirtual(), BaseOffsetInLayoutClass, 
+                       B.isVirtual(), BaseOffsetInLayoutClass, 
                        SubobjectOffsets, SubobjectLayoutClassOffsets, 
                        SubobjectCounts);
   }
@@ -374,16 +374,15 @@
   const CXXRecordDecl *RD = Base.getBase();
   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
 
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+  for (const auto &B : RD->bases()) {
+    const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
     
     // Ignore bases that don't have any virtual member functions.
     if (!BaseDecl->isPolymorphic())
       continue;
 
     CharUnits BaseOffset;
-    if (I->isVirtual()) {
+    if (B.isVirtual()) {
       if (!VisitedVirtualBases.insert(BaseDecl)) {
         // We've visited this base before.
         continue;
@@ -397,21 +396,22 @@
     dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
   }
 
-  Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
+  Out << "Final overriders for (";
+  RD->printQualifiedName(Out);
+  Out << ", ";
   Out << Base.getBaseOffset().getQuantity() << ")\n";
 
   // Now dump the overriders for this base subobject.
-  for (CXXRecordDecl::method_iterator I = RD->method_begin(), 
-       E = RD->method_end(); I != E; ++I) {
-    const CXXMethodDecl *MD = *I;
-
+  for (const auto *MD : RD->methods()) {
     if (!MD->isVirtual())
       continue;
   
     OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
 
-    Out << "  " << MD->getQualifiedNameAsString() << " - (";
-    Out << Overrider.Method->getQualifiedNameAsString();
+    Out << "  ";
+    MD->printQualifiedName(Out);
+    Out << " - (";
+    Overrider.Method->printQualifiedName(Out);
     Out << ", " << Overrider.Offset.getQuantity() << ')';
 
     BaseOffset Offset;
@@ -420,8 +420,10 @@
 
     if (!Offset.isEmpty()) {
       Out << " [ret-adj: ";
-      if (Offset.VirtualBase)
-        Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
+      if (Offset.VirtualBase) {
+        Offset.VirtualBase->printQualifiedName(Out);
+        Out << " vbase, ";
+      }
              
       Out << Offset.NonVirtualOffset.getQuantity() << " nv]";
     }
@@ -472,10 +474,10 @@
   // list here because there isn't necessarily an inheritance
   // relationship between the two methods.
   if (LT->getTypeQuals() != RT->getTypeQuals() ||
-      LT->getNumArgs() != RT->getNumArgs())
+      LT->getNumParams() != RT->getNumParams())
     return false;
-  for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
-    if (LT->getArgType(I) != RT->getArgType(I))
+  for (unsigned I = 0, E = LT->getNumParams(); I != E; ++I)
+    if (LT->getParamType(I) != RT->getParamType(I))
       return false;
   return true;
 }
@@ -684,10 +686,7 @@
   }
   
   // Add the vcall offsets.
-  for (CXXRecordDecl::method_iterator I = RD->method_begin(),
-       E = RD->method_end(); I != E; ++I) {
-    const CXXMethodDecl *MD = *I;
-    
+  for (const auto *MD : RD->methods()) {
     if (!MD->isVirtual())
       continue;
 
@@ -715,13 +714,11 @@
   }
 
   // And iterate over all non-virtual bases (ignoring the primary base).
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-  
-    if (I->isVirtual())
+  for (const auto &B : RD->bases()) {  
+    if (B.isVirtual())
       continue;
 
-    const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+    const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
     if (BaseDecl == PrimaryBase)
       continue;
 
@@ -741,12 +738,11 @@
     Context.getASTRecordLayout(LayoutClass);
 
   // Add vbase offsets.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+  for (const auto &B : RD->bases()) {
+    const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
 
     // Check if this is a virtual base that we haven't visited before.
-    if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
+    if (B.isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
       CharUnits Offset = 
         LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
 
@@ -1509,10 +1505,7 @@
   NewVirtualFunctionsTy NewVirtualFunctions;
 
   // Now go through all virtual member functions and add them.
-  for (CXXRecordDecl::method_iterator I = RD->method_begin(),
-       E = RD->method_end(); I != E; ++I) {
-    const CXXMethodDecl *MD = *I;
-  
+  for (const auto *MD : RD->methods()) {
     if (!MD->isVirtual())
       continue;
 
@@ -1759,13 +1752,12 @@
   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
   const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
   
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
+  for (const auto &B : RD->bases()) {
     // Ignore virtual bases, we'll emit them later.
-    if (I->isVirtual())
+    if (B.isVirtual())
       continue;
     
-    const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+    const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
 
     // Ignore bases that don't have a vtable.
     if (!BaseDecl->isDynamicClass())
@@ -1838,13 +1830,12 @@
   }
 
   // Traverse bases, looking for more primary virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+  for (const auto &B : RD->bases()) {
+    const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
 
     CharUnits BaseOffsetInLayoutClass;
     
-    if (I->isVirtual()) {
+    if (B.isVirtual()) {
       if (!VBases.insert(BaseDecl))
         continue;
       
@@ -1868,13 +1859,12 @@
   //   Then come the virtual base virtual tables, also in inheritance graph
   //   order, and again excluding primary bases (which share virtual tables with
   //   the classes for which they are primary).
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+  for (const auto &B : RD->bases()) {
+    const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
 
     // Check if this base needs a vtable. (If it's virtual, not a primary base
     // of some other class, and we haven't visited it before).
-    if (I->isVirtual() && BaseDecl->isDynamicClass() && 
+    if (B.isVirtual() && BaseDecl->isDynamicClass() && 
         !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
       const ASTRecordLayout &MostDerivedClassLayout =
         Context.getASTRecordLayout(MostDerivedClass);
@@ -1900,21 +1890,6 @@
   }
 }
 
-struct ItaniumThunkInfoComparator {
-  bool operator() (const ThunkInfo &LHS, const ThunkInfo &RHS) {
-    assert(LHS.Method == 0);
-    assert(RHS.Method == 0);
-
-    if (LHS.This != RHS.This)
-      return LHS.This < RHS.This;
-
-    if (LHS.Return != RHS.Return)
-      return LHS.Return < RHS.Return;
-
-    return false;
-  }
-};
-
 /// dumpLayout - Dump the vtable layout.
 void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
   // FIXME: write more tests that actually use the dumpLayout output to prevent
@@ -1922,12 +1897,13 @@
 
   if (isBuildingConstructorVTable()) {
     Out << "Construction vtable for ('";
-    Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
+    MostDerivedClass->printQualifiedName(Out);
+    Out << "', ";
     Out << MostDerivedClassOffset.getQuantity() << ") in '";
-    Out << LayoutClass->getQualifiedNameAsString();
+    LayoutClass->printQualifiedName(Out);
   } else {
     Out << "Vtable for '";
-    Out << MostDerivedClass->getQualifiedNameAsString();
+    MostDerivedClass->printQualifiedName(Out);
   }
   Out << "' (" << Components.size() << " entries).\n";
 
@@ -1973,7 +1949,8 @@
       break;
     
     case VTableComponent::CK_RTTI:
-      Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
+      Component.getRTTIDecl()->printQualifiedName(Out);
+      Out << " RTTI";
       break;
     
     case VTableComponent::CK_FunctionPointer: {
@@ -2028,7 +2005,7 @@
       
       const CXXDestructorDecl *DD = Component.getDestructorDecl();
       
-      Out << DD->getQualifiedNameAsString();
+      DD->printQualifiedName(Out);
       if (IsComplete)
         Out << "() [complete]";
       else
@@ -2078,7 +2055,8 @@
         const BaseSubobject &Base = 
           AddressPointsByIndex.find(NextIndex)->second;
         
-        Out << "       -- (" << Base.getBase()->getQualifiedNameAsString();
+        Out << "       -- (";
+        Base.getBase()->printQualifiedName(Out);
         Out << ", " << Base.getBaseOffset().getQuantity();
         Out << ") vtable address --\n";
       } else {
@@ -2124,7 +2102,8 @@
     }
     
     Out << "Virtual base offset offsets for '";
-    Out << MostDerivedClass->getQualifiedNameAsString() << "' (";
+    MostDerivedClass->printQualifiedName(Out);
+    Out << "' (";
     Out << ClassNamesAndOffsets.size();
     Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
 
@@ -2158,7 +2137,10 @@
 
       ThunkInfoVectorTy ThunksVector = Thunks[MD];
       std::sort(ThunksVector.begin(), ThunksVector.end(),
-                ItaniumThunkInfoComparator());
+                [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+        assert(LHS.Method == 0 && RHS.Method == 0);
+        return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
+      });
 
       Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
       Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
@@ -2203,10 +2185,7 @@
   // Store them in a map keyed by the index so we'll get a sorted table.
   std::map<uint64_t, std::string> IndicesMap;
 
-  for (CXXRecordDecl::method_iterator i = MostDerivedClass->method_begin(),
-       e = MostDerivedClass->method_end(); i != e; ++i) {
-    const CXXMethodDecl *MD = *i;
-    
+  for (const auto *MD : MostDerivedClass->methods()) {
     // We only want virtual member functions.
     if (!MD->isVirtual())
       continue;
@@ -2230,7 +2209,7 @@
   // Print the vtable indices for all the member functions.
   if (!IndicesMap.empty()) {
     Out << "VTable indices for '";
-    Out << MostDerivedClass->getQualifiedNameAsString();
+    MostDerivedClass->printQualifiedName(Out);
     Out << "' (" << IndicesMap.size() << " entries).\n";
 
     for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(),
@@ -2245,17 +2224,6 @@
 
   Out << '\n';
 }
-
-struct VTableThunksComparator {
-  bool operator()(const VTableLayout::VTableThunkTy &LHS,
-                  const VTableLayout::VTableThunkTy &RHS) {
-    if (LHS.first == RHS.first) {
-      assert(LHS.second == RHS.second &&
-             "Different thunks should have unique indices!");
-    }
-    return LHS.first < RHS.first;
-  }
-};
 }
 
 VTableLayout::VTableLayout(uint64_t NumVTableComponents,
@@ -2276,14 +2244,18 @@
             this->VTableThunks.get());
   std::sort(this->VTableThunks.get(),
             this->VTableThunks.get() + NumVTableThunks,
-            VTableThunksComparator());
+            [](const VTableLayout::VTableThunkTy &LHS,
+               const VTableLayout::VTableThunkTy &RHS) {
+    assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
+           "Different thunks should have unique indices!");
+    return LHS.first < RHS.first;
+  });
 }
 
 VTableLayout::~VTableLayout() { }
 
 ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context)
-  : IsMicrosoftABI(Context.getTargetInfo().getCXXABI().isMicrosoft()) {
-}
+    : VTableContextBase(/*MS=*/false) {}
 
 ItaniumVTableContext::~ItaniumVTableContext() {
   llvm::DeleteContainerSeconds(VTableLayouts);
@@ -2348,8 +2320,6 @@
 
 void
 ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
-  assert(!IsMicrosoftABI && "Shouldn't be called in this ABI!");
-
   const VTableLayout *&Entry = VTableLayouts[RD];
 
   // Check if we've computed this information before.
@@ -2444,6 +2414,9 @@
   typedef llvm::DenseMap<GlobalDecl, MethodVFTableLocation>
     MethodVFTableLocationsTy;
 
+  typedef llvm::iterator_range<MethodVFTableLocationsTy::const_iterator>
+    method_locations_range;
+
 private:
   /// VTables - Global vtable information.
   MicrosoftVTableContext &VTables;
@@ -2457,7 +2430,7 @@
 
   const ASTRecordLayout &MostDerivedClassLayout;
 
-  VFPtrInfo WhichVFPtr;
+  const VPtrInfo &WhichVFPtr;
 
   /// FinalOverriders - The final overriders of the most derived class.
   const FinalOverriders Overriders;
@@ -2521,11 +2494,8 @@
   }
 
   /// ComputeThisOffset - Returns the 'this' argument offset for the given
-  /// method in the given subobject, relative to the beginning of the
-  /// MostDerivedClass.
-  CharUnits ComputeThisOffset(const CXXMethodDecl *MD,
-                              BaseSubobject Base,
-                              FinalOverriders::OverriderInfo Overrider);
+  /// method, relative to the beginning of the MostDerivedClass.
+  CharUnits ComputeThisOffset(FinalOverriders::OverriderInfo Overrider);
 
   void CalculateVtordispAdjustment(FinalOverriders::OverriderInfo Overrider,
                                    CharUnits ThisOffset, ThisAdjustment &TA);
@@ -2533,17 +2503,21 @@
   /// AddMethod - Add a single virtual member function to the vftable
   /// components vector.
   void AddMethod(const CXXMethodDecl *MD, ThunkInfo TI) {
+    if (!TI.isEmpty()) {
+      VTableThunks[Components.size()] = TI;
+      AddThunk(MD, TI);
+    }
     if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
       assert(TI.Return.isEmpty() &&
              "Destructor can't have return adjustment!");
       Components.push_back(VTableComponent::MakeDeletingDtor(DD));
     } else {
-      if (!TI.isEmpty())
-        VTableThunks[Components.size()] = TI;
       Components.push_back(VTableComponent::MakeFunction(MD));
     }
   }
 
+  bool NeedsReturnAdjustingThunk(const CXXMethodDecl *MD);
+
   /// AddMethods - Add the methods of this base subobject and the relevant
   /// subbases to the vftable we're currently laying out.
   void AddMethods(BaseSubobject Base, unsigned BaseDepth,
@@ -2557,6 +2531,7 @@
     BasesSetVectorTy VisitedBases;
     AddMethods(BaseSubobject(MostDerivedClass, CharUnits::Zero()), 0, 0,
                VisitedBases);
+    assert(Components.size() && "vftable can't be empty");
 
     assert(MethodVFTableLocations.empty());
     for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
@@ -2567,8 +2542,8 @@
       // and the entries shadowed by return adjusting thunks.
       if (MD->getParent() != MostDerivedClass || MI.Shadowed)
         continue;
-      MethodVFTableLocation Loc(MI.VBTableIndex, WhichVFPtr.LastVBase,
-                                WhichVFPtr.VFPtrOffset, MI.VFTableIndex);
+      MethodVFTableLocation Loc(MI.VBTableIndex, WhichVFPtr.getVBaseWithVPtr(),
+                                WhichVFPtr.NonVirtualOffset, MI.VFTableIndex);
       if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
         MethodVFTableLocations[GlobalDecl(DD, Dtor_Deleting)] = Loc;
       } else {
@@ -2586,12 +2561,12 @@
 
 public:
   VFTableBuilder(MicrosoftVTableContext &VTables,
-                 const CXXRecordDecl *MostDerivedClass, VFPtrInfo Which)
+                 const CXXRecordDecl *MostDerivedClass, const VPtrInfo *Which)
       : VTables(VTables),
         Context(MostDerivedClass->getASTContext()),
         MostDerivedClass(MostDerivedClass),
         MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)),
-        WhichVFPtr(Which),
+        WhichVFPtr(*Which),
         Overriders(MostDerivedClass, CharUnits(), MostDerivedClass) {
     LayoutVFTable();
 
@@ -2605,12 +2580,9 @@
 
   ThunksMapTy::const_iterator thunks_end() const { return Thunks.end(); }
 
-  MethodVFTableLocationsTy::const_iterator vtable_indices_begin() const {
-    return MethodVFTableLocations.begin();
-  }
-
-  MethodVFTableLocationsTy::const_iterator vtable_indices_end() const {
-    return MethodVFTableLocations.end();
+  method_locations_range vtable_locations() const {
+    return method_locations_range(MethodVFTableLocations.begin(),
+                                  MethodVFTableLocations.end());
   }
 
   uint64_t getNumVTableComponents() const { return Components.size(); }
@@ -2634,6 +2606,8 @@
   void dumpLayout(raw_ostream &);
 };
 
+} // end namespace
+
 /// InitialOverriddenDefinitionCollector - Finds the set of least derived bases
 /// that define the given method.
 struct InitialOverriddenDefinitionCollector {
@@ -2655,14 +2629,18 @@
 }
 
 CharUnits
-VFTableBuilder::ComputeThisOffset(const CXXMethodDecl *MD,
-                                  BaseSubobject Base,
-                                  FinalOverriders::OverriderInfo Overrider) {
+VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) {
   InitialOverriddenDefinitionCollector Collector;
-  visitAllOverriddenMethods(MD, Collector);
+  visitAllOverriddenMethods(Overrider.Method, Collector);
+
+  // If there are no overrides then 'this' is located
+  // in the base that defines the method.
+  if (Collector.Bases.size() == 0)
+    return Overrider.Offset;
 
   CXXBasePaths Paths;
-  Base.getBase()->lookupInBases(BaseInSet, &Collector.Bases, Paths);
+  Overrider.Method->getParent()->lookupInBases(BaseInSet, &Collector.Bases,
+                                               Paths);
 
   // This will hold the smallest this offset among overridees of MD.
   // This implies that an offset of a non-virtual base will dominate an offset
@@ -2674,7 +2652,7 @@
   for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
        I != E; ++I) {
     const CXXBasePath &Path = (*I);
-    CharUnits ThisOffset = Base.getBaseOffset();
+    CharUnits ThisOffset = Overrider.Offset;
     CharUnits LastVBaseOffset;
 
     // For each path from the overrider to the parents of the overridden methods,
@@ -2705,16 +2683,16 @@
       }
     }
 
-    if (isa<CXXDestructorDecl>(MD)) {
+    if (isa<CXXDestructorDecl>(Overrider.Method)) {
       if (LastVBaseOffset.isZero()) {
         // If a "Base" class has at least one non-virtual base with a virtual
         // destructor, the "Base" virtual destructor will take the address
         // of the "Base" subobject as the "this" argument.
-        return Base.getBaseOffset();
+        ThisOffset = Overrider.Offset;
       } else {
         // A virtual destructor of a virtual base takes the address of the
         // virtual base subobject as the "this" argument.
-        return LastVBaseOffset;
+        ThisOffset = LastVBaseOffset;
       }
     }
 
@@ -2734,7 +2712,7 @@
   const ASTRecordLayout::VBaseOffsetsMapTy &VBaseMap =
       MostDerivedClassLayout.getVBaseOffsetsMap();
   const ASTRecordLayout::VBaseOffsetsMapTy::const_iterator &VBaseMapEntry =
-      VBaseMap.find(WhichVFPtr.LastVBase);
+      VBaseMap.find(WhichVFPtr.getVBaseWithVPtr());
   assert(VBaseMapEntry != VBaseMap.end());
 
   // Check if we need a vtordisp adjustment at all.
@@ -2744,7 +2722,7 @@
   CharUnits VFPtrVBaseOffset = VBaseMapEntry->second.VBaseOffset;
   // The implicit vtordisp field is located right before the vbase.
   TA.Virtual.Microsoft.VtordispOffset =
-      (VFPtrVBaseOffset - WhichVFPtr.VFPtrFullOffset).getQuantity() - 4;
+      (VFPtrVBaseOffset - WhichVFPtr.FullOffsetInMDC).getQuantity() - 4;
 
   // If the final overrider is defined in either:
   // - the most derived class or its non-virtual base or
@@ -2756,13 +2734,13 @@
 
   const CXXRecordDecl *OverriderVBase =
       ComputeBaseOffset(Context, OverriderRD, MostDerivedClass).VirtualBase;
-  if (!OverriderVBase || OverriderVBase == WhichVFPtr.LastVBase)
+  if (!OverriderVBase || OverriderVBase == WhichVFPtr.getVBaseWithVPtr())
     return;
 
   // Otherwise, we need to do use the dynamic offset of the final overrider
   // in order to get "this" adjustment right.
   TA.Virtual.Microsoft.VBPtrOffset =
-      (VFPtrVBaseOffset + WhichVFPtr.VFPtrOffset -
+      (VFPtrVBaseOffset + WhichVFPtr.NonVirtualOffset -
        MostDerivedClassLayout.getVBPtrOffset()).getQuantity();
   TA.Virtual.Microsoft.VBOffsetOffset =
       Context.getTypeSizeInChars(Context.IntTy).getQuantity() *
@@ -2777,25 +2755,20 @@
   // Put the virtual methods into VirtualMethods in the proper order:
   // 1) Group overloads by declaration name. New groups are added to the
   //    vftable in the order of their first declarations in this class
-  //    (including overrides).
+  //    (including overrides and non-virtual methods).
   // 2) In each group, new overloads appear in the reverse order of declaration.
   typedef SmallVector<const CXXMethodDecl *, 1> MethodGroup;
   SmallVector<MethodGroup, 10> Groups;
   typedef llvm::DenseMap<DeclarationName, unsigned> VisitedGroupIndicesTy;
   VisitedGroupIndicesTy VisitedGroupIndices;
-  for (CXXRecordDecl::method_iterator I = RD->method_begin(),
-       E = RD->method_end(); I != E; ++I) {
-    const CXXMethodDecl *MD = *I;
-    if (!MD->isVirtual())
-      continue;
-
+  for (const auto *MD : RD->methods()) {
     VisitedGroupIndicesTy::iterator J;
     bool Inserted;
-    llvm::tie(J, Inserted) = VisitedGroupIndices.insert(
+    std::tie(J, Inserted) = VisitedGroupIndices.insert(
         std::make_pair(MD->getDeclName(), Groups.size()));
     if (Inserted)
-      Groups.push_back(MethodGroup(1, MD));
-    else
+      Groups.push_back(MethodGroup());
+    if (MD->isVirtual())
       Groups[J->second].push_back(MD);
   }
 
@@ -2803,6 +2776,32 @@
     VirtualMethods.append(Groups[I].rbegin(), Groups[I].rend());
 }
 
+/// We need a return adjusting thunk for this method if its return type is
+/// not trivially convertible to the return type of any of its overridden
+/// methods.
+bool VFTableBuilder::NeedsReturnAdjustingThunk(const CXXMethodDecl *MD) {
+  OverriddenMethodsSetTy OverriddenMethods;
+  ComputeAllOverriddenMethods(MD, OverriddenMethods);
+  for (OverriddenMethodsSetTy::iterator I = OverriddenMethods.begin(),
+                                        E = OverriddenMethods.end();
+       I != E; ++I) {
+    const CXXMethodDecl *OverriddenMD = *I;
+    BaseOffset Adjustment =
+        ComputeReturnAdjustmentBaseOffset(Context, MD, OverriddenMD);
+    if (!Adjustment.isEmpty())
+      return true;
+  }
+  return false;
+}
+
+static bool isDirectVBase(const CXXRecordDecl *Base, const CXXRecordDecl *RD) {
+  for (const auto &B : RD->bases()) {
+    if (B.isVirtual() && B.getType()->getAsCXXRecordDecl() == Base)
+      return true;
+  }
+  return false;
+}
+
 void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
                                 const CXXRecordDecl *LastVBase,
                                 BasesSetVectorTy &VisitedBases) {
@@ -2816,9 +2815,9 @@
   // the one defined by the vfptr base path or the primary base of the current class.
   const CXXRecordDecl *NextBase = 0, *NextLastVBase = LastVBase;
   CharUnits NextBaseOffset;
-  if (BaseDepth < WhichVFPtr.PathToBaseWithVFPtr.size()) {
-    NextBase = WhichVFPtr.PathToBaseWithVFPtr[BaseDepth];
-    if (Layout.getVBaseOffsetsMap().count(NextBase)) {
+  if (BaseDepth < WhichVFPtr.PathToBaseWithVPtr.size()) {
+    NextBase = WhichVFPtr.PathToBaseWithVPtr[BaseDepth];
+    if (isDirectVBase(NextBase, RD)) {
       NextLastVBase = NextBase;
       NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(NextBase);
     } else {
@@ -2856,13 +2855,21 @@
 
     FinalOverriders::OverriderInfo Overrider =
         Overriders.getOverrider(MD, Base.getBaseOffset());
-    ThisAdjustment ThisAdjustmentOffset;
-    bool ForceThunk = false;
+    const CXXMethodDecl *OverriderMD = Overrider.Method;
+    const CXXMethodDecl *OverriddenMD =
+        FindNearestOverriddenMethod(MD, VisitedBases);
 
-    // Check if this virtual member function overrides
-    // a method in one of the visited bases.
-    if (const CXXMethodDecl *OverriddenMD =
-            FindNearestOverriddenMethod(MD, VisitedBases)) {
+    ThisAdjustment ThisAdjustmentOffset;
+    bool ReturnAdjustingThunk = false;
+    CharUnits ThisOffset = ComputeThisOffset(Overrider);
+    ThisAdjustmentOffset.NonVirtual =
+        (ThisOffset - WhichVFPtr.FullOffsetInMDC).getQuantity();
+    if ((OverriddenMD || OverriderMD != MD) &&
+        WhichVFPtr.getVBaseWithVPtr())
+      CalculateVtordispAdjustment(Overrider, ThisOffset, ThisAdjustmentOffset);
+
+    if (OverriddenMD) {
+      // If MD overrides anything in this vftable, we need to update the entries.
       MethodInfoMapTy::iterator OverriddenMDIterator =
           MethodInfoMap.find(OverriddenMD);
 
@@ -2872,23 +2879,7 @@
 
       MethodInfo &OverriddenMethodInfo = OverriddenMDIterator->second;
 
-      // Create a this-adjusting thunk if needed.
-      CharUnits TI = ComputeThisOffset(MD, Base, Overrider);
-      if (TI != WhichVFPtr.VFPtrFullOffset) {
-        ThisAdjustmentOffset.NonVirtual =
-            (TI - WhichVFPtr.VFPtrFullOffset).getQuantity();
-      }
-
-      if (WhichVFPtr.LastVBase)
-        CalculateVtordispAdjustment(Overrider, TI, ThisAdjustmentOffset);
-
-      if (!ThisAdjustmentOffset.isEmpty()) {
-        VTableThunks[OverriddenMethodInfo.VFTableIndex].This =
-            ThisAdjustmentOffset;
-        AddThunk(MD, VTableThunks[OverriddenMethodInfo.VFTableIndex]);
-      }
-
-      if (MD->getResultType() == OverriddenMD->getResultType()) {
+      if (!NeedsReturnAdjustingThunk(MD)) {
         // No return adjustment needed - just replace the overridden method info
         // with the current info.
         MethodInfo MI(OverriddenMethodInfo.VBTableIndex,
@@ -2899,33 +2890,17 @@
                "Should not have method info for this method yet!");
         MethodInfoMap.insert(std::make_pair(MD, MI));
         continue;
-      } else {
-        // In case we need a return adjustment, we'll add a new slot for
-        // the overrider and put a return-adjusting thunk where the overridden
-        // method was in the vftable.
-        // For now, just mark the overriden method as shadowed by a new slot.
-        OverriddenMethodInfo.Shadowed = true;
-        ForceThunk = true;
-
-        // Also apply this adjustment to the shadowed slots.
-        if (!ThisAdjustmentOffset.isEmpty()) {
-          // FIXME: this is O(N^2), can be O(N).
-          const CXXMethodDecl *SubOverride = OverriddenMD;
-          while ((SubOverride =
-                      FindNearestOverriddenMethod(SubOverride, VisitedBases))) {
-            MethodInfoMapTy::iterator SubOverrideIterator =
-                MethodInfoMap.find(SubOverride);
-            if (SubOverrideIterator == MethodInfoMap.end())
-              break;
-            MethodInfo &SubOverrideMI = SubOverrideIterator->second;
-            assert(SubOverrideMI.Shadowed);
-            VTableThunks[SubOverrideMI.VFTableIndex].This =
-                ThisAdjustmentOffset;
-            AddThunk(MD, VTableThunks[SubOverrideMI.VFTableIndex]);
-          }
-        }
       }
-    } else if (Base.getBaseOffset() != WhichVFPtr.VFPtrFullOffset ||
+
+      // In case we need a return adjustment, we'll add a new slot for
+      // the overrider. Mark the overriden method as shadowed by the new slot.
+      OverriddenMethodInfo.Shadowed = true;
+
+      // Force a special name mangling for a return-adjusting thunk
+      // unless the method is the final overrider without this adjustment.
+      ReturnAdjustingThunk =
+          !(MD == OverriderMD && ThisAdjustmentOffset.isEmpty());
+    } else if (Base.getBaseOffset() != WhichVFPtr.FullOffsetInMDC ||
                MD->size_overridden_methods()) {
       // Skip methods that don't belong to the vftable of the current class,
       // e.g. each method that wasn't seen in any of the visited sub-bases
@@ -2943,8 +2918,6 @@
            "Should not have method info for this method yet!");
     MethodInfoMap.insert(std::make_pair(MD, MI));
 
-    const CXXMethodDecl *OverriderMD = Overrider.Method;
-
     // Check if this overrider needs a return adjustment.
     // We don't want to do this for pure virtual member functions.
     BaseOffset ReturnAdjustmentOffset;
@@ -2954,7 +2927,7 @@
           ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
     }
     if (!ReturnAdjustmentOffset.isEmpty()) {
-      ForceThunk = true;
+      ReturnAdjustingThunk = true;
       ReturnAdjustment.NonVirtual =
           ReturnAdjustmentOffset.NonVirtualOffset.getQuantity();
       if (ReturnAdjustmentOffset.VirtualBase) {
@@ -2969,40 +2942,30 @@
     }
 
     AddMethod(OverriderMD, ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment,
-                                     ForceThunk ? MD : 0));
+                                     ReturnAdjustingThunk ? MD : 0));
   }
 }
 
-void PrintBasePath(const VFPtrInfo::BasePath &Path, raw_ostream &Out) {
-  for (VFPtrInfo::BasePath::const_reverse_iterator I = Path.rbegin(),
+static void PrintBasePath(const VPtrInfo::BasePath &Path, raw_ostream &Out) {
+  for (VPtrInfo::BasePath::const_reverse_iterator I = Path.rbegin(),
        E = Path.rend(); I != E; ++I) {
-    Out << "'" << (*I)->getQualifiedNameAsString() << "' in ";
+    Out << "'";
+    (*I)->printQualifiedName(Out);
+    Out << "' in ";
   }
 }
 
-struct MicrosoftThunkInfoStableSortComparator {
-  bool operator() (const ThunkInfo &LHS, const ThunkInfo &RHS) {
-    if (LHS.This != RHS.This)
-      return LHS.This < RHS.This;
-
-    if (LHS.Return != RHS.Return)
-      return LHS.Return < RHS.Return;
-
-    // Keep different thunks with the same adjustments in the order they
-    // were put into the vector.
-    return false;
-  }
-};
-
 static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out,
                                          bool ContinueFirstLine) {
   const ReturnAdjustment &R = TI.Return;
   bool Multiline = false;
-  const char *LinePrefix = "\n        ";
-  if (!R.isEmpty()) {
+  const char *LinePrefix = "\n       ";
+  if (!R.isEmpty() || TI.Method) {
     if (!ContinueFirstLine)
       Out << LinePrefix;
-    Out << "[return adjustment: ";
+    Out << "[return adjustment (to type '"
+        << TI.Method->getReturnType().getCanonicalType().getAsString()
+        << "'): ";
     if (R.Virtual.Microsoft.VBPtrOffset)
       Out << "vbptr at offset " << R.Virtual.Microsoft.VBPtrOffset << ", ";
     if (R.Virtual.Microsoft.VBIndex)
@@ -3021,7 +2984,7 @@
       Out << "vtordisp at " << T.Virtual.Microsoft.VtordispOffset << ", ";
       if (T.Virtual.Microsoft.VBPtrOffset) {
         Out << "vbptr at " << T.Virtual.Microsoft.VBPtrOffset
-            << " to the left, ";
+            << " to the left,";
         assert(T.Virtual.Microsoft.VBOffsetOffset > 0);
         Out << LinePrefix << " vboffset at "
             << T.Virtual.Microsoft.VBOffsetOffset << " in the vbtable, ";
@@ -3033,9 +2996,11 @@
 
 void VFTableBuilder::dumpLayout(raw_ostream &Out) {
   Out << "VFTable for ";
-  PrintBasePath(WhichVFPtr.PathToBaseWithVFPtr, Out);
-  Out << "'" << MostDerivedClass->getQualifiedNameAsString();
-  Out << "' (" << Components.size() << " entries).\n";
+  PrintBasePath(WhichVFPtr.PathToBaseWithVPtr, Out);
+  Out << "'";
+  MostDerivedClass->printQualifiedName(Out);
+  Out << "' (" << Components.size()
+      << (Components.size() == 1 ? " entry" : " entries") << ").\n";
 
   for (unsigned I = 0, E = Components.size(); I != E; ++I) {
     Out << llvm::format("%4d | ", I);
@@ -3045,12 +3010,15 @@
     // Dump the component.
     switch (Component.getKind()) {
     case VTableComponent::CK_RTTI:
-      Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
+      Component.getRTTIDecl()->printQualifiedName(Out);
+      Out << " RTTI";
       break;
 
     case VTableComponent::CK_FunctionPointer: {
       const CXXMethodDecl *MD = Component.getFunctionDecl();
 
+      // FIXME: Figure out how to print the real thunk type, since they can
+      // differ in the return type.
       std::string Str = PredefinedExpr::ComputeName(
           PredefinedExpr::PrettyFunctionNoVirtual, MD);
       Out << Str;
@@ -3072,7 +3040,7 @@
     case VTableComponent::CK_DeletingDtorPointer: {
       const CXXDestructorDecl *DD = Component.getDestructorDecl();
 
-      Out << DD->getQualifiedNameAsString();
+      DD->printQualifiedName(Out);
       Out << "() [scalar deleting]";
 
       if (DD->isPure())
@@ -3124,7 +3092,11 @@
 
       ThunkInfoVectorTy ThunksVector = Thunks[MD];
       std::stable_sort(ThunksVector.begin(), ThunksVector.end(),
-                       MicrosoftThunkInfoStableSortComparator());
+                       [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+        // Keep different thunks with the same adjustments in the order they
+        // were put into the vector.
+        return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
+      });
 
       Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
       Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
@@ -3140,123 +3112,169 @@
       Out << '\n';
     }
   }
-}
+
+  Out.flush();
 }
 
-void MicrosoftVTableContext::enumerateVFPtrs(
-    const CXXRecordDecl *MostDerivedClass,
-    const ASTRecordLayout &MostDerivedClassLayout, BaseSubobject Base,
-    const CXXRecordDecl *LastVBase,
-    const VFPtrInfo::BasePath &PathFromCompleteClass,
-    BasesSetVectorTy &VisitedVBases,
-    VFPtrListTy &Result) {
-  const CXXRecordDecl *CurrentClass = Base.getBase();
-  CharUnits OffsetInCompleteClass = Base.getBaseOffset();
-  const ASTRecordLayout &CurrentClassLayout =
-      Context.getASTRecordLayout(CurrentClass);
-
-  if (CurrentClassLayout.hasOwnVFPtr()) {
-    if (LastVBase) {
-      uint64_t VBIndex = getVBTableIndex(MostDerivedClass, LastVBase);
-      assert(VBIndex > 0 && "vbases must have vbindex!");
-      CharUnits VFPtrOffset =
-          OffsetInCompleteClass -
-          MostDerivedClassLayout.getVBaseClassOffset(LastVBase);
-      Result.push_back(VFPtrInfo(VBIndex, LastVBase, VFPtrOffset,
-                                 PathFromCompleteClass, OffsetInCompleteClass));
-    } else {
-      Result.push_back(VFPtrInfo(OffsetInCompleteClass, PathFromCompleteClass));
-    }
+static bool setsIntersect(const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &A,
+                          const llvm::ArrayRef<const CXXRecordDecl *> &B) {
+  for (llvm::ArrayRef<const CXXRecordDecl *>::iterator I = B.begin(),
+                                                       E = B.end();
+       I != E; ++I) {
+    if (A.count(*I))
+      return true;
   }
+  return false;
+}
 
-  for (CXXRecordDecl::base_class_const_iterator I = CurrentClass->bases_begin(),
-       E = CurrentClass->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+static bool rebucketPaths(VPtrInfoVector &Paths);
 
-    CharUnits NextBaseOffset;
-    const CXXRecordDecl *NextLastVBase;
-    if (I->isVirtual()) {
-      if (!VisitedVBases.insert(BaseDecl))
+/// Produces MSVC-compatible vbtable data.  The symbols produced by this
+/// algorithm match those produced by MSVC 2012 and newer, which is different
+/// from MSVC 2010.
+///
+/// MSVC 2012 appears to minimize the vbtable names using the following
+/// algorithm.  First, walk the class hierarchy in the usual order, depth first,
+/// left to right, to find all of the subobjects which contain a vbptr field.
+/// Visiting each class node yields a list of inheritance paths to vbptrs.  Each
+/// record with a vbptr creates an initially empty path.
+///
+/// To combine paths from child nodes, the paths are compared to check for
+/// ambiguity.  Paths are "ambiguous" if multiple paths have the same set of
+/// components in the same order.  Each group of ambiguous paths is extended by
+/// appending the class of the base from which it came.  If the current class
+/// node produced an ambiguous path, its path is extended with the current class.
+/// After extending paths, MSVC again checks for ambiguity, and extends any
+/// ambiguous path which wasn't already extended.  Because each node yields an
+/// unambiguous set of paths, MSVC doesn't need to extend any path more than once
+/// to produce an unambiguous set of paths.
+///
+/// TODO: Presumably vftables use the same algorithm.
+void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
+                                                const CXXRecordDecl *RD,
+                                                VPtrInfoVector &Paths) {
+  assert(Paths.empty());
+  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+  // Base case: this subobject has its own vptr.
+  if (ForVBTables ? Layout.hasOwnVBPtr() : Layout.hasOwnVFPtr())
+    Paths.push_back(new VPtrInfo(RD));
+
+  // Recursive case: get all the vbtables from our bases and remove anything
+  // that shares a virtual base.
+  llvm::SmallPtrSet<const CXXRecordDecl*, 4> VBasesSeen;
+  for (const auto &B : RD->bases()) {
+    const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
+    if (B.isVirtual() && VBasesSeen.count(Base))
+      continue;
+
+    if (!Base->isDynamicClass())
+      continue;
+
+    const VPtrInfoVector &BasePaths =
+        ForVBTables ? enumerateVBTables(Base) : getVFPtrOffsets(Base);
+
+    for (VPtrInfoVector::const_iterator II = BasePaths.begin(),
+                                        EE = BasePaths.end();
+         II != EE; ++II) {
+      VPtrInfo *BaseInfo = *II;
+
+      // Don't include the path if it goes through a virtual base that we've
+      // already included.
+      if (setsIntersect(VBasesSeen, BaseInfo->ContainingVBases))
         continue;
-      NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
-      NextLastVBase = BaseDecl;
-    } else {
-      NextBaseOffset = OffsetInCompleteClass +
-                       CurrentClassLayout.getBaseClassOffset(BaseDecl);
-      NextLastVBase = LastVBase;
+
+      // Copy the path and adjust it as necessary.
+      VPtrInfo *P = new VPtrInfo(*BaseInfo);
+
+      // We mangle Base into the path if the path would've been ambiguous and it
+      // wasn't already extended with Base.
+      if (P->MangledPath.empty() || P->MangledPath.back() != Base)
+        P->NextBaseToMangle = Base;
+
+      // Keep track of the full path.
+      // FIXME: Why do we need this?
+      P->PathToBaseWithVPtr.insert(P->PathToBaseWithVPtr.begin(), Base);
+
+      // Keep track of which derived class ultimately uses the vtable, and what
+      // the full adjustment is from the MDC to this vtable.  The adjustment is
+      // captured by an optional vbase and a non-virtual offset.
+      if (Base == (ForVBTables ? Layout.getBaseSharingVBPtr()
+                               : Layout.getPrimaryBase()))
+        P->ReusingBase = RD;
+      if (B.isVirtual())
+        P->ContainingVBases.push_back(Base);
+      else if (P->ContainingVBases.empty())
+        P->NonVirtualOffset += Layout.getBaseClassOffset(Base);
+
+      // Update the full offset in the MDC.
+      P->FullOffsetInMDC = P->NonVirtualOffset;
+      if (const CXXRecordDecl *VB = P->getVBaseWithVPtr())
+        P->FullOffsetInMDC += Layout.getVBaseClassOffset(VB);
+
+      Paths.push_back(P);
     }
 
-    VFPtrInfo::BasePath NewPath = PathFromCompleteClass;
-    NewPath.push_back(BaseDecl);
-    BaseSubobject NextBase(BaseDecl, NextBaseOffset);
+    if (B.isVirtual())
+      VBasesSeen.insert(Base);
 
-    enumerateVFPtrs(MostDerivedClass, MostDerivedClassLayout, NextBase,
-                    NextLastVBase, NewPath, VisitedVBases, Result);
+    // After visiting any direct base, we've transitively visited all of its
+    // morally virtual bases.
+    for (const auto &VB : Base->vbases())
+      VBasesSeen.insert(VB.getType()->getAsCXXRecordDecl());
   }
+
+  // Sort the paths into buckets, and if any of them are ambiguous, extend all
+  // paths in ambiguous buckets.
+  bool Changed = true;
+  while (Changed)
+    Changed = rebucketPaths(Paths);
 }
 
-/// CalculatePathToMangle - Calculate the subset of records that should be used
-/// to mangle the vftable for the given vfptr.
-/// Should only be called if a class has multiple vftables.
-static void
-CalculatePathToMangle(const CXXRecordDecl *RD, VFPtrInfo &VFPtr) {
-  // FIXME: In some rare cases this code produces a slightly incorrect mangling.
-  // It's very likely that the vbtable mangling code can be adjusted to mangle
-  // both vftables and vbtables correctly.
-
-  VFPtrInfo::BasePath &FullPath = VFPtr.PathToBaseWithVFPtr;
-  if (FullPath.empty()) {
-    // Mangle the class's own vftable.
-    assert(RD->getNumVBases() &&
-           "Something's wrong: if the most derived "
-           "class has more than one vftable, it can only have its own "
-           "vftable if it has vbases");
-    VFPtr.PathToMangle.push_back(RD);
-    return;
+static bool extendPath(VPtrInfo *P) {
+  if (P->NextBaseToMangle) {
+    P->MangledPath.push_back(P->NextBaseToMangle);
+    P->NextBaseToMangle = 0;  // Prevent the path from being extended twice.
+    return true;
   }
-
-  unsigned Begin = 0;
-
-  // First, skip all the bases before the vbase.
-  if (VFPtr.LastVBase) {
-    while (FullPath[Begin] != VFPtr.LastVBase) {
-      Begin++;
-      assert(Begin < FullPath.size());
-    }
-  }
-
-  // Then, put the rest of the base path in the reverse order.
-  for (unsigned I = FullPath.size(); I != Begin; --I) {
-    const CXXRecordDecl *CurBase = FullPath[I - 1],
-                        *ItsBase = (I == 1) ? RD : FullPath[I - 2];
-    bool BaseIsVirtual = false;
-    for (CXXRecordDecl::base_class_const_iterator J = ItsBase->bases_begin(),
-         F = ItsBase->bases_end(); J != F; ++J) {
-      if (J->getType()->getAsCXXRecordDecl() == CurBase) {
-        BaseIsVirtual = J->isVirtual();
-        break;
-      }
-    }
-
-    // Should skip the current base if it is a non-virtual base with no siblings.
-    if (BaseIsVirtual || ItsBase->getNumBases() != 1)
-      VFPtr.PathToMangle.push_back(CurBase);
-  }
+  return false;
 }
 
-void MicrosoftVTableContext::enumerateVFPtrs(
-    const CXXRecordDecl *ForClass,
-    MicrosoftVTableContext::VFPtrListTy &Result) {
-  Result.clear();
-  const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(ForClass);
-  BasesSetVectorTy VisitedVBases;
-  enumerateVFPtrs(ForClass, ClassLayout,
-                  BaseSubobject(ForClass, CharUnits::Zero()), 0,
-                  VFPtrInfo::BasePath(), VisitedVBases, Result);
-  if (Result.size() > 1) {
-    for (unsigned I = 0, E = Result.size(); I != E; ++I)
-      CalculatePathToMangle(ForClass, Result[I]);
+static bool rebucketPaths(VPtrInfoVector &Paths) {
+  // What we're essentially doing here is bucketing together ambiguous paths.
+  // Any bucket with more than one path in it gets extended by NextBase, which
+  // is usually the direct base of the inherited the vbptr.  This code uses a
+  // sorted vector to implement a multiset to form the buckets.  Note that the
+  // ordering is based on pointers, but it doesn't change our output order.  The
+  // current algorithm is designed to match MSVC 2012's names.
+  VPtrInfoVector PathsSorted(Paths);
+  std::sort(PathsSorted.begin(), PathsSorted.end(),
+            [](const VPtrInfo *LHS, const VPtrInfo *RHS) {
+    return LHS->MangledPath < RHS->MangledPath;
+  });
+  bool Changed = false;
+  for (size_t I = 0, E = PathsSorted.size(); I != E;) {
+    // Scan forward to find the end of the bucket.
+    size_t BucketStart = I;
+    do {
+      ++I;
+    } while (I != E && PathsSorted[BucketStart]->MangledPath ==
+                           PathsSorted[I]->MangledPath);
+
+    // If this bucket has multiple paths, extend them all.
+    if (I - BucketStart > 1) {
+      for (size_t II = BucketStart; II != I; ++II)
+        Changed |= extendPath(PathsSorted[II]);
+      assert(Changed && "no paths were extended to fix ambiguity");
+    }
   }
+  return Changed;
+}
+
+MicrosoftVTableContext::~MicrosoftVTableContext() {
+  llvm::DeleteContainerSeconds(VFPtrLocations);
+  llvm::DeleteContainerSeconds(VFTableLayouts);
+  llvm::DeleteContainerSeconds(VBaseInfo);
 }
 
 void MicrosoftVTableContext::computeVTableRelatedInformation(
@@ -3269,24 +3287,31 @@
 
   const VTableLayout::AddressPointsMapTy EmptyAddressPointsMap;
 
-  VFPtrListTy &VFPtrs = VFPtrLocations[RD];
-  enumerateVFPtrs(RD, VFPtrs);
+  VPtrInfoVector *VFPtrs = new VPtrInfoVector();
+  computeVTablePaths(/*ForVBTables=*/false, RD, *VFPtrs);
+  VFPtrLocations[RD] = VFPtrs;
 
   MethodVFTableLocationsTy NewMethodLocations;
-  for (VFPtrListTy::iterator I = VFPtrs.begin(), E = VFPtrs.end();
+  for (VPtrInfoVector::iterator I = VFPtrs->begin(), E = VFPtrs->end();
        I != E; ++I) {
     VFTableBuilder Builder(*this, RD, *I);
 
-    VFTableIdTy id(RD, I->VFPtrFullOffset);
+    VFTableIdTy id(RD, (*I)->FullOffsetInMDC);
     assert(VFTableLayouts.count(id) == 0);
     SmallVector<VTableLayout::VTableThunkTy, 1> VTableThunks(
         Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
     VFTableLayouts[id] = new VTableLayout(
         Builder.getNumVTableComponents(), Builder.vtable_component_begin(),
         VTableThunks.size(), VTableThunks.data(), EmptyAddressPointsMap, true);
-    NewMethodLocations.insert(Builder.vtable_indices_begin(),
-                              Builder.vtable_indices_end());
     Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+    for (const auto &Loc : Builder.vtable_locations()) {
+      GlobalDecl GD = Loc.first;
+      MethodVFTableLocation NewLoc = Loc.second;
+      auto M = NewMethodLocations.find(GD);
+      if (M == NewMethodLocations.end() || NewLoc < M->second)
+        NewMethodLocations[GD] = NewLoc;
+    }
   }
 
   MethodVFTableLocations.insert(NewMethodLocations.begin(),
@@ -3324,8 +3349,10 @@
   // Print the vtable indices for all the member functions.
   if (!IndicesMap.empty()) {
     Out << "VFTable indices for ";
-    Out << "'" << RD->getQualifiedNameAsString();
-    Out << "' (" << IndicesMap.size() << " entries).\n";
+    Out << "'";
+    RD->printQualifiedName(Out);
+    Out << "' (" << IndicesMap.size()
+        << (IndicesMap.size() == 1 ? " entry" : " entries") << ").\n";
 
     CharUnits LastVFPtrOffset = CharUnits::fromQuantity(-1);
     uint64_t LastVBIndex = 0;
@@ -3352,49 +3379,66 @@
     }
     Out << '\n';
   }
+
+  Out.flush();
 }
 
-void MicrosoftVTableContext::computeVBTableRelatedInformation(
+const VirtualBaseInfo *MicrosoftVTableContext::computeVBTableRelatedInformation(
     const CXXRecordDecl *RD) {
-  if (ComputedVBTableIndices.count(RD))
-    return;
-  ComputedVBTableIndices.insert(RD);
+  VirtualBaseInfo *VBI;
 
-  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-  BasesSetVectorTy VisitedBases;
+  {
+    // Get or create a VBI for RD.  Don't hold a reference to the DenseMap cell,
+    // as it may be modified and rehashed under us.
+    VirtualBaseInfo *&Entry = VBaseInfo[RD];
+    if (Entry)
+      return Entry;
+    Entry = VBI = new VirtualBaseInfo();
+  }
+
+  computeVTablePaths(/*ForVBTables=*/true, RD, VBI->VBPtrPaths);
 
   // First, see if the Derived class shared the vbptr with a non-virtual base.
+  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
   if (const CXXRecordDecl *VBPtrBase = Layout.getBaseSharingVBPtr()) {
-    // If the Derived class shares the vbptr with a non-virtual base,
-    // it inherits its vbase indices.
-    computeVBTableRelatedInformation(VBPtrBase);
-    for (CXXRecordDecl::base_class_const_iterator I = VBPtrBase->vbases_begin(),
-         E = VBPtrBase->vbases_end(); I != E; ++I) {
-      const CXXRecordDecl *SubVBase = I->getType()->getAsCXXRecordDecl();
-      assert(VBTableIndices.count(ClassPairTy(VBPtrBase, SubVBase)));
-      VBTableIndices[ClassPairTy(RD, SubVBase)] =
-          VBTableIndices[ClassPairTy(VBPtrBase, SubVBase)];
-      VisitedBases.insert(SubVBase);
-    }
+    // If the Derived class shares the vbptr with a non-virtual base, the shared
+    // virtual bases come first so that the layout is the same.
+    const VirtualBaseInfo *BaseInfo =
+        computeVBTableRelatedInformation(VBPtrBase);
+    VBI->VBTableIndices.insert(BaseInfo->VBTableIndices.begin(),
+                               BaseInfo->VBTableIndices.end());
   }
 
   // New vbases are added to the end of the vbtable.
   // Skip the self entry and vbases visited in the non-virtual base, if any.
-  unsigned VBTableIndex = 1 + VisitedBases.size();
-  for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
-       E = RD->vbases_end(); I != E; ++I) {
-    const CXXRecordDecl *CurVBase = I->getType()->getAsCXXRecordDecl();
-    if (VisitedBases.insert(CurVBase))
-      VBTableIndices[ClassPairTy(RD, CurVBase)] = VBTableIndex++;
+  unsigned VBTableIndex = 1 + VBI->VBTableIndices.size();
+  for (const auto &VB : RD->vbases()) {
+    const CXXRecordDecl *CurVBase = VB.getType()->getAsCXXRecordDecl();
+    if (!VBI->VBTableIndices.count(CurVBase))
+      VBI->VBTableIndices[CurVBase] = VBTableIndex++;
   }
+
+  return VBI;
 }
 
-const MicrosoftVTableContext::VFPtrListTy &
+unsigned MicrosoftVTableContext::getVBTableIndex(const CXXRecordDecl *Derived,
+                                                 const CXXRecordDecl *VBase) {
+  const VirtualBaseInfo *VBInfo = computeVBTableRelatedInformation(Derived);
+  assert(VBInfo->VBTableIndices.count(VBase));
+  return VBInfo->VBTableIndices.find(VBase)->second;
+}
+
+const VPtrInfoVector &
+MicrosoftVTableContext::enumerateVBTables(const CXXRecordDecl *RD) {
+  return computeVBTableRelatedInformation(RD)->VBPtrPaths;
+}
+
+const VPtrInfoVector &
 MicrosoftVTableContext::getVFPtrOffsets(const CXXRecordDecl *RD) {
   computeVTableRelatedInformation(RD);
 
   assert(VFPtrLocations.count(RD) && "Couldn't find vfptr locations");
-  return VFPtrLocations[RD];
+  return *VFPtrLocations[RD];
 }
 
 const VTableLayout &
diff --git a/lib/ASTMatchers/ASTMatchFinder.cpp b/lib/ASTMatchers/ASTMatchFinder.cpp
index f6dcb97..72a9d03 100644
--- a/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -58,11 +58,8 @@
   BoundNodesTreeBuilder BoundNodes;
 
   bool operator<(const MatchKey &Other) const {
-    if (MatcherID != Other.MatcherID)
-      return MatcherID < Other.MatcherID;
-    if (Node != Other.Node)
-      return Node < Other.Node;
-    return BoundNodes < Other.BoundNodes;
+    return std::tie(MatcherID, Node, BoundNodes) <
+           std::tie(Other.MatcherID, Other.Node, Other.BoundNodes);
   }
 };
 
@@ -272,8 +269,8 @@
   // traversal should continue after this function returns.
   template <typename T>
   bool traverse(const T &Node) {
-    TOOLING_COMPILE_ASSERT(IsBaseType<T>::value,
-                           traverse_can_only_be_instantiated_with_base_type);
+    static_assert(IsBaseType<T>::value,
+                  "traverse can only be instantiated with base type");
     if (!match(Node))
       return false;
     return baseTraverse(Node);
@@ -410,36 +407,36 @@
     return Visitor.findMatch(Node);
   }
 
-  virtual bool classIsDerivedFrom(const CXXRecordDecl *Declaration,
-                                  const Matcher<NamedDecl> &Base,
-                                  BoundNodesTreeBuilder *Builder);
+  bool classIsDerivedFrom(const CXXRecordDecl *Declaration,
+                          const Matcher<NamedDecl> &Base,
+                          BoundNodesTreeBuilder *Builder) override;
 
   // Implements ASTMatchFinder::matchesChildOf.
-  virtual bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
-                              const DynTypedMatcher &Matcher,
-                              BoundNodesTreeBuilder *Builder,
-                              TraversalKind Traversal,
-                              BindKind Bind) {
+  bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
+                      const DynTypedMatcher &Matcher,
+                      BoundNodesTreeBuilder *Builder,
+                      TraversalKind Traversal,
+                      BindKind Bind) override {
     if (ResultCache.size() > MaxMemoizationEntries)
       ResultCache.clear();
     return memoizedMatchesRecursively(Node, Matcher, Builder, 1, Traversal,
                                       Bind);
   }
   // Implements ASTMatchFinder::matchesDescendantOf.
-  virtual bool matchesDescendantOf(const ast_type_traits::DynTypedNode &Node,
-                                   const DynTypedMatcher &Matcher,
-                                   BoundNodesTreeBuilder *Builder,
-                                   BindKind Bind) {
+  bool matchesDescendantOf(const ast_type_traits::DynTypedNode &Node,
+                           const DynTypedMatcher &Matcher,
+                           BoundNodesTreeBuilder *Builder,
+                           BindKind Bind) override {
     if (ResultCache.size() > MaxMemoizationEntries)
       ResultCache.clear();
     return memoizedMatchesRecursively(Node, Matcher, Builder, INT_MAX,
                                       TK_AsIs, Bind);
   }
   // Implements ASTMatchFinder::matchesAncestorOf.
-  virtual bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
-                                 const DynTypedMatcher &Matcher,
-                                 BoundNodesTreeBuilder *Builder,
-                                 AncestorMatchMode MatchMode) {
+  bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
+                         const DynTypedMatcher &Matcher,
+                         BoundNodesTreeBuilder *Builder,
+                         AncestorMatchMode MatchMode) override {
     // Reset the cache outside of the recursive call to make sure we
     // don't invalidate any iterators.
     if (ResultCache.size() > MaxMemoizationEntries)
@@ -469,7 +466,7 @@
   }
 
   // Implements ASTMatchFinder::getASTContext.
-  virtual ASTContext &getASTContext() const { return *ActiveASTContext; }
+  ASTContext &getASTContext() const override { return *ActiveASTContext; }
 
   bool shouldVisitTemplateInstantiations() const { return true; }
   bool shouldVisitImplicitCode() const { return true; }
@@ -576,7 +573,7 @@
       : Context(Context),
         Callback(Callback) {}
 
-    virtual void visitMatch(const BoundNodes& BoundNodesView) {
+    void visitMatch(const BoundNodes& BoundNodesView) override {
       Callback->run(MatchFinder::MatchResult(BoundNodesView, Context));
     }
 
@@ -667,11 +664,8 @@
                                          BoundNodesTreeBuilder *Builder) {
   if (!Declaration->hasDefinition())
     return false;
-  typedef CXXRecordDecl::base_class_const_iterator BaseIterator;
-  for (BaseIterator It = Declaration->bases_begin(),
-                    End = Declaration->bases_end();
-       It != End; ++It) {
-    const Type *TypeNode = It->getType().getTypePtr();
+  for (const auto &It : Declaration->bases()) {
+    const Type *TypeNode = It.getType().getTypePtr();
 
     if (typeHasMatchingAlias(TypeNode, Base, Builder))
       return true;
@@ -749,7 +743,7 @@
       : Finder(Finder), ParsingDone(ParsingDone) {}
 
 private:
-  virtual void HandleTranslationUnit(ASTContext &Context) {
+  void HandleTranslationUnit(ASTContext &Context) override {
     if (ParsingDone != NULL) {
       ParsingDone->run();
     }
diff --git a/lib/ASTMatchers/ASTMatchersInternal.cpp b/lib/ASTMatchers/ASTMatchersInternal.cpp
index d15eb54..47b8b6d 100644
--- a/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -34,6 +34,26 @@
   }
 }
 
+bool NotUnaryOperator(const ast_type_traits::DynTypedNode DynNode,
+                      ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder,
+                      ArrayRef<DynTypedMatcher> InnerMatchers) {
+  if (InnerMatchers.size() != 1)
+    return false;
+
+  // The 'unless' matcher will always discard the result:
+  // If the inner matcher doesn't match, unless returns true,
+  // but the inner matcher cannot have bound anything.
+  // If the inner matcher matches, the result is false, and
+  // any possible binding will be discarded.
+  // We still need to hand in all the bound nodes up to this
+  // point so the inner matcher can depend on bound nodes,
+  // and we need to actively discard the bound nodes, otherwise
+  // the inner matcher will reset the bound nodes if it doesn't
+  // match, but this would be inversed by 'unless'.
+  BoundNodesTreeBuilder Discard(*Builder);
+  return !InnerMatchers[0].matches(DynNode, Finder, &Discard);
+}
+
 bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode DynNode,
                            ASTMatchFinder *Finder,
                            BoundNodesTreeBuilder *Builder,
diff --git a/lib/ASTMatchers/CMakeLists.txt b/lib/ASTMatchers/CMakeLists.txt
index 4a390a8..8a8e8c3 100644
--- a/lib/ASTMatchers/CMakeLists.txt
+++ b/lib/ASTMatchers/CMakeLists.txt
@@ -5,19 +5,7 @@
 add_clang_library(clangASTMatchers
   ASTMatchFinder.cpp
   ASTMatchersInternal.cpp
-  )
 
-add_dependencies(clangASTMatchers
-  ClangAttrClasses
-  ClangAttrImpl
-  ClangAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangASTMatchers
-  clangBasic
+  LINK_LIBS
   clangAST
   )
diff --git a/lib/ASTMatchers/Dynamic/CMakeLists.txt b/lib/ASTMatchers/Dynamic/CMakeLists.txt
index 843341b..c0d80ad 100644
--- a/lib/ASTMatchers/Dynamic/CMakeLists.txt
+++ b/lib/ASTMatchers/Dynamic/CMakeLists.txt
@@ -5,12 +5,9 @@
   VariantValue.cpp
   Parser.cpp
   Registry.cpp
-  )
 
-add_dependencies(clangDynamicASTMatchers
+  LINK_LIBS
+  clangAST
   clangASTMatchers
-  )
-
-target_link_libraries(clangDynamicASTMatchers
-  clangASTMatchers
+  clangBasic
   )
diff --git a/lib/ASTMatchers/Dynamic/Marshallers.h b/lib/ASTMatchers/Dynamic/Marshallers.h
index ae0c300..7726807 100644
--- a/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -20,14 +20,12 @@
 #ifndef LLVM_CLANG_AST_MATCHERS_DYNAMIC_MARSHALLERS_H
 #define LLVM_CLANG_AST_MATCHERS_DYNAMIC_MARSHALLERS_H
 
-#include <string>
-
 #include "clang/ASTMatchers/ASTMatchers.h"
 #include "clang/ASTMatchers/Dynamic/Diagnostics.h"
 #include "clang/ASTMatchers/Dynamic/VariantValue.h"
 #include "clang/Basic/LLVM.h"
 #include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/type_traits.h"
+#include <string>
 
 namespace clang {
 namespace ast_matchers {
@@ -35,6 +33,46 @@
 
 namespace internal {
 
+struct ArgKind {
+  enum Kind {
+    AK_Matcher,
+    AK_Unsigned,
+    AK_String
+  };
+  ArgKind(Kind K)
+      : K(K) {}
+  ArgKind(ast_type_traits::ASTNodeKind MatcherKind)
+      : K(AK_Matcher), MatcherKind(MatcherKind) {}
+
+  std::string asString() const {
+    switch (getArgKind()) {
+    case AK_Matcher:
+      return (Twine("Matcher<") + MatcherKind.asStringRef() + ">").str();
+    case AK_Unsigned:
+      return "unsigned";
+    case AK_String:
+      return "string";
+    }
+    llvm_unreachable("unhandled ArgKind");
+  }
+
+  Kind getArgKind() const { return K; }
+  ast_type_traits::ASTNodeKind getMatcherKind() const {
+    assert(K == AK_Matcher);
+    return MatcherKind;
+  }
+
+  bool operator<(const ArgKind &Other) const {
+    if (K == AK_Matcher && Other.K == AK_Matcher)
+      return MatcherKind < Other.MatcherKind;
+    return K < Other.K;
+  }
+
+private:
+  Kind K;
+  ast_type_traits::ASTNodeKind MatcherKind;
+};
+
 /// \brief Helper template class to just from argument type to the right is/get
 ///   functions in VariantValue.
 /// Used to verify and extract the matcher arguments below.
@@ -43,11 +81,13 @@
 };
 
 template <> struct ArgTypeTraits<std::string> {
-  static StringRef asString() { return "String"; }
   static bool is(const VariantValue &Value) { return Value.isString(); }
   static const std::string &get(const VariantValue &Value) {
     return Value.getString();
   }
+  static ArgKind getKind() {
+    return ArgKind(ArgKind::AK_String);
+  }
 };
 
 template <>
@@ -55,46 +95,95 @@
 };
 
 template <class T> struct ArgTypeTraits<ast_matchers::internal::Matcher<T> > {
-  static std::string asString() {
-    return (Twine("Matcher<") +
-            ast_type_traits::ASTNodeKind::getFromNodeKind<T>().asStringRef() +
-            ">").str();
-  }
   static bool is(const VariantValue &Value) {
     return Value.isMatcher() && Value.getMatcher().hasTypedMatcher<T>();
   }
   static ast_matchers::internal::Matcher<T> get(const VariantValue &Value) {
     return Value.getMatcher().getTypedMatcher<T>();
   }
+  static ArgKind getKind() {
+    return ArgKind(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+  }
 };
 
 template <> struct ArgTypeTraits<unsigned> {
-  static std::string asString() { return "Unsigned"; }
   static bool is(const VariantValue &Value) { return Value.isUnsigned(); }
   static unsigned get(const VariantValue &Value) {
     return Value.getUnsigned();
   }
+  static ArgKind getKind() {
+    return ArgKind(ArgKind::AK_Unsigned);
+  }
 };
 
-/// \brief Generic MatcherCreate interface.
+/// \brief Matcher descriptor interface.
 ///
-/// Provides a \c run() method that constructs the matcher from the provided
-/// arguments.
-class MatcherCreateCallback {
+/// Provides a \c create() method that constructs the matcher from the provided
+/// arguments, and various other methods for type introspection.
+class MatcherDescriptor {
 public:
-  virtual ~MatcherCreateCallback() {}
-  virtual VariantMatcher run(const SourceRange &NameRange,
-                             ArrayRef<ParserValue> Args,
-                             Diagnostics *Error) const = 0;
+  virtual ~MatcherDescriptor() {}
+  virtual VariantMatcher create(const SourceRange &NameRange,
+                                ArrayRef<ParserValue> Args,
+                                Diagnostics *Error) const = 0;
+
+  /// Returns whether the matcher is variadic. Variadic matchers can take any
+  /// number of arguments, but they must be of the same type.
+  virtual bool isVariadic() const = 0;
+
+  /// Returns the number of arguments accepted by the matcher if not variadic.
+  virtual unsigned getNumArgs() const = 0;
+
+  /// Given that the matcher is being converted to type \p ThisKind, append the
+  /// set of argument types accepted for argument \p ArgNo to \p ArgKinds.
+  // FIXME: We should provide the ability to constrain the output of this
+  // function based on the types of other matcher arguments.
+  virtual void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+                           std::vector<ArgKind> &ArgKinds) const = 0;
+
+  /// Returns whether this matcher is convertible to the given type.  If it is
+  /// so convertible, store in *Specificity a value corresponding to the
+  /// "specificity" of the converted matcher to the given context, and in
+  /// *LeastDerivedKind the least derived matcher kind which would result in the
+  /// same matcher overload.  Zero specificity indicates that this conversion
+  /// would produce a trivial matcher that will either always or never match.
+  /// Such matchers are excluded from code completion results.
+  virtual bool isConvertibleTo(
+      ast_type_traits::ASTNodeKind Kind, unsigned *Specificity = 0,
+      ast_type_traits::ASTNodeKind *LeastDerivedKind = 0) const = 0;
+
+  /// Returns whether the matcher will, given a matcher of any type T, yield a
+  /// matcher of type T.
+  virtual bool isPolymorphic() const { return false; }
 };
 
+inline bool isRetKindConvertibleTo(
+    llvm::ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
+    ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+    ast_type_traits::ASTNodeKind *LeastDerivedKind) {
+  for (llvm::ArrayRef<ast_type_traits::ASTNodeKind>::const_iterator
+           i = RetKinds.begin(),
+           e = RetKinds.end();
+       i != e; ++i) {
+    unsigned Distance;
+    if (i->isBaseOf(Kind, &Distance)) {
+      if (Specificity)
+        *Specificity = 100 - Distance;
+      if (LeastDerivedKind)
+        *LeastDerivedKind = *i;
+      return true;
+    }
+  }
+  return false;
+}
+
 /// \brief Simple callback implementation. Marshaller and function are provided.
 ///
 /// This class wraps a function of arbitrary signature and a marshaller
-/// function into a MatcherCreateCallback.
+/// function into a MatcherDescriptor.
 /// The marshaller is in charge of taking the VariantValue arguments, checking
 /// their types, unpacking them and calling the underlying function.
-class FixedArgCountMatcherCreateCallback : public MatcherCreateCallback {
+class FixedArgCountMatcherDescriptor : public MatcherDescriptor {
 public:
   typedef VariantMatcher (*MarshallerType)(void (*Func)(),
                                            StringRef MatcherName,
@@ -105,64 +194,42 @@
   /// \param Marshaller Function to unpack the arguments and call \c Func
   /// \param Func Matcher construct function. This is the function that
   ///   compile-time matcher expressions would use to create the matcher.
-  FixedArgCountMatcherCreateCallback(MarshallerType Marshaller, void (*Func)(),
-                                     StringRef MatcherName)
-      : Marshaller(Marshaller), Func(Func), MatcherName(MatcherName) {}
+  /// \param RetKinds The list of matcher types to which the matcher is
+  ///   convertible.
+  /// \param ArgKinds The types of the arguments this matcher takes.
+  FixedArgCountMatcherDescriptor(
+      MarshallerType Marshaller, void (*Func)(), StringRef MatcherName,
+      llvm::ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
+      llvm::ArrayRef<ArgKind> ArgKinds)
+      : Marshaller(Marshaller), Func(Func), MatcherName(MatcherName),
+        RetKinds(RetKinds.begin(), RetKinds.end()),
+        ArgKinds(ArgKinds.begin(), ArgKinds.end()) {}
 
-  VariantMatcher run(const SourceRange &NameRange, ArrayRef<ParserValue> Args,
-                     Diagnostics *Error) const {
+  VariantMatcher create(const SourceRange &NameRange,
+                        ArrayRef<ParserValue> Args, Diagnostics *Error) const {
     return Marshaller(Func, MatcherName, NameRange, Args, Error);
   }
 
+  bool isVariadic() const { return false; }
+  unsigned getNumArgs() const { return ArgKinds.size(); }
+  void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+                   std::vector<ArgKind> &Kinds) const {
+    Kinds.push_back(ArgKinds[ArgNo]);
+  }
+  bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+                       ast_type_traits::ASTNodeKind *LeastDerivedKind) const {
+    return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
+                                  LeastDerivedKind);
+  }
+
 private:
   const MarshallerType Marshaller;
   void (* const Func)();
   const std::string MatcherName;
+  const std::vector<ast_type_traits::ASTNodeKind> RetKinds;
+  const std::vector<ArgKind> ArgKinds;
 };
 
-/// \brief Simple callback implementation. Free function is wrapped.
-///
-/// This class simply wraps a free function with the right signature to export
-/// it as a MatcherCreateCallback.
-/// This allows us to have one implementation of the interface for as many free
-/// functions as we want, reducing the number of symbols and size of the
-/// object file.
-class FreeFuncMatcherCreateCallback : public MatcherCreateCallback {
-public:
-  typedef VariantMatcher (*RunFunc)(StringRef MatcherName,
-                                    const SourceRange &NameRange,
-                                    ArrayRef<ParserValue> Args,
-                                    Diagnostics *Error);
-
-  FreeFuncMatcherCreateCallback(RunFunc Func, StringRef MatcherName)
-      : Func(Func), MatcherName(MatcherName.str()) {}
-
-  VariantMatcher run(const SourceRange &NameRange, ArrayRef<ParserValue> Args,
-                     Diagnostics *Error) const {
-    return Func(MatcherName, NameRange, Args, Error);
-  }
-
-private:
-  const RunFunc Func;
-  const std::string MatcherName;
-};
-
-/// \brief Helper macros to check the arguments on all marshaller functions.
-#define CHECK_ARG_COUNT(count)                                                 \
-  if (Args.size() != count) {                                                  \
-    Error->addError(NameRange, Error->ET_RegistryWrongArgCount)                \
-        << count << Args.size();                                               \
-    return VariantMatcher();                                                   \
-  }
-
-#define CHECK_ARG_TYPE(index, type)                                            \
-  if (!ArgTypeTraits<type>::is(Args[index].Value)) {                           \
-    Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType)         \
-        << (index + 1) << ArgTypeTraits<type>::asString()                      \
-        << Args[index].Value.getTypeAsString();                                \
-    return VariantMatcher();                                                   \
-  }
-
 /// \brief Helper methods to extract and merge all possible typed matchers
 /// out of the polymorphic object.
 template <class PolyMatcher>
@@ -193,10 +260,178 @@
                                                    NULL) {
   std::vector<DynTypedMatcher> Matchers;
   mergePolyMatchers(PolyMatcher, Matchers, typename T::ReturnTypes());
-  VariantMatcher Out = VariantMatcher::PolymorphicMatcher(Matchers);
+  VariantMatcher Out = VariantMatcher::PolymorphicMatcher(std::move(Matchers));
   return Out;
 }
 
+template <typename T>
+inline void buildReturnTypeVectorFromTypeList(
+    std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+  RetTypes.push_back(
+      ast_type_traits::ASTNodeKind::getFromNodeKind<typename T::head>());
+  buildReturnTypeVectorFromTypeList<typename T::tail>(RetTypes);
+}
+
+template <>
+inline void
+buildReturnTypeVectorFromTypeList<ast_matchers::internal::EmptyTypeList>(
+    std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {}
+
+template <typename T>
+struct BuildReturnTypeVector {
+  static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+    buildReturnTypeVectorFromTypeList<typename T::ReturnTypes>(RetTypes);
+  }
+};
+
+template <typename T>
+struct BuildReturnTypeVector<ast_matchers::internal::Matcher<T> > {
+  static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+    RetTypes.push_back(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+  }
+};
+
+template <typename T>
+struct BuildReturnTypeVector<ast_matchers::internal::BindableMatcher<T> > {
+  static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+    RetTypes.push_back(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+  }
+};
+
+/// \brief Variadic marshaller function.
+template <typename ResultT, typename ArgT,
+          ResultT (*Func)(ArrayRef<const ArgT *>)>
+VariantMatcher
+variadicMatcherDescriptor(StringRef MatcherName, const SourceRange &NameRange,
+                          ArrayRef<ParserValue> Args, Diagnostics *Error) {
+  ArgT **InnerArgs = new ArgT *[Args.size()]();
+
+  bool HasError = false;
+  for (size_t i = 0, e = Args.size(); i != e; ++i) {
+    typedef ArgTypeTraits<ArgT> ArgTraits;
+    const ParserValue &Arg = Args[i];
+    const VariantValue &Value = Arg.Value;
+    if (!ArgTraits::is(Value)) {
+      Error->addError(Arg.Range, Error->ET_RegistryWrongArgType)
+          << (i + 1) << ArgTraits::getKind().asString() << Value.getTypeAsString();
+      HasError = true;
+      break;
+    }
+    InnerArgs[i] = new ArgT(ArgTraits::get(Value));
+  }
+
+  VariantMatcher Out;
+  if (!HasError) {
+    Out = outvalueToVariantMatcher(
+        Func(ArrayRef<const ArgT *>(InnerArgs, Args.size())));
+  }
+
+  for (size_t i = 0, e = Args.size(); i != e; ++i) {
+    delete InnerArgs[i];
+  }
+  delete[] InnerArgs;
+  return Out;
+}
+
+/// \brief Matcher descriptor for variadic functions.
+///
+/// This class simply wraps a VariadicFunction with the right signature to export
+/// it as a MatcherDescriptor.
+/// This allows us to have one implementation of the interface for as many free
+/// functions as we want, reducing the number of symbols and size of the
+/// object file.
+class VariadicFuncMatcherDescriptor : public MatcherDescriptor {
+public:
+  typedef VariantMatcher (*RunFunc)(StringRef MatcherName,
+                                    const SourceRange &NameRange,
+                                    ArrayRef<ParserValue> Args,
+                                    Diagnostics *Error);
+
+  template <typename ResultT, typename ArgT,
+            ResultT (*F)(ArrayRef<const ArgT *>)>
+  VariadicFuncMatcherDescriptor(llvm::VariadicFunction<ResultT, ArgT, F> Func,
+                          StringRef MatcherName)
+      : Func(&variadicMatcherDescriptor<ResultT, ArgT, F>),
+        MatcherName(MatcherName.str()),
+        ArgsKind(ArgTypeTraits<ArgT>::getKind()) {
+    BuildReturnTypeVector<ResultT>::build(RetKinds);
+  }
+
+  VariantMatcher create(const SourceRange &NameRange,
+                        ArrayRef<ParserValue> Args, Diagnostics *Error) const {
+    return Func(MatcherName, NameRange, Args, Error);
+  }
+
+  bool isVariadic() const { return true; }
+  unsigned getNumArgs() const { return 0; }
+  void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+                   std::vector<ArgKind> &Kinds) const {
+    Kinds.push_back(ArgsKind);
+  }
+  bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+                       ast_type_traits::ASTNodeKind *LeastDerivedKind) const {
+    return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
+                                  LeastDerivedKind);
+  }
+
+private:
+  const RunFunc Func;
+  const std::string MatcherName;
+  std::vector<ast_type_traits::ASTNodeKind> RetKinds;
+  const ArgKind ArgsKind;
+};
+
+/// \brief Return CK_Trivial when appropriate for VariadicDynCastAllOfMatchers.
+class DynCastAllOfMatcherDescriptor : public VariadicFuncMatcherDescriptor {
+public:
+  template <typename BaseT, typename DerivedT>
+  DynCastAllOfMatcherDescriptor(
+      ast_matchers::internal::VariadicDynCastAllOfMatcher<BaseT, DerivedT> Func,
+      StringRef MatcherName)
+      : VariadicFuncMatcherDescriptor(Func, MatcherName),
+        DerivedKind(ast_type_traits::ASTNodeKind::getFromNodeKind<DerivedT>()) {
+  }
+
+  bool
+  isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+                ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+    // If Kind is not a base of DerivedKind, either DerivedKind is a base of
+    // Kind (in which case the match will always succeed) or Kind and
+    // DerivedKind are unrelated (in which case it will always fail), so set
+    // Specificity to 0.
+    if (VariadicFuncMatcherDescriptor::isConvertibleTo(Kind, Specificity,
+                                                 LeastDerivedKind)) {
+      if (Kind.isSame(DerivedKind) || !Kind.isBaseOf(DerivedKind)) {
+        if (Specificity)
+          *Specificity = 0;
+      }
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+private:
+  const ast_type_traits::ASTNodeKind DerivedKind;
+};
+
+/// \brief Helper macros to check the arguments on all marshaller functions.
+#define CHECK_ARG_COUNT(count)                                                 \
+  if (Args.size() != count) {                                                  \
+    Error->addError(NameRange, Error->ET_RegistryWrongArgCount)                \
+        << count << Args.size();                                               \
+    return VariantMatcher();                                                   \
+  }
+
+#define CHECK_ARG_TYPE(index, type)                                            \
+  if (!ArgTypeTraits<type>::is(Args[index].Value)) {                           \
+    Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType)         \
+        << (index + 1) << ArgTypeTraits<type>::getKind().asString()            \
+        << Args[index].Value.getTypeAsString();                                \
+    return VariantMatcher();                                                   \
+  }
+
+
 /// \brief 0-arg marshaller function.
 template <typename ReturnType>
 static VariantMatcher matcherMarshall0(void (*Func)(), StringRef MatcherName,
@@ -239,42 +474,6 @@
 #undef CHECK_ARG_COUNT
 #undef CHECK_ARG_TYPE
 
-/// \brief Variadic marshaller function.
-template <typename ResultT, typename ArgT,
-          ResultT (*Func)(ArrayRef<const ArgT *>)>
-VariantMatcher
-variadicMatcherCreateCallback(StringRef MatcherName,
-                              const SourceRange &NameRange,
-                              ArrayRef<ParserValue> Args, Diagnostics *Error) {
-  ArgT **InnerArgs = new ArgT *[Args.size()]();
-
-  bool HasError = false;
-  for (size_t i = 0, e = Args.size(); i != e; ++i) {
-    typedef ArgTypeTraits<ArgT> ArgTraits;
-    const ParserValue &Arg = Args[i];
-    const VariantValue &Value = Arg.Value;
-    if (!ArgTraits::is(Value)) {
-      Error->addError(Arg.Range, Error->ET_RegistryWrongArgType)
-          << (i + 1) << ArgTraits::asString() << Value.getTypeAsString();
-      HasError = true;
-      break;
-    }
-    InnerArgs[i] = new ArgT(ArgTraits::get(Value));
-  }
-
-  VariantMatcher Out;
-  if (!HasError) {
-    Out = outvalueToVariantMatcher(
-        Func(ArrayRef<const ArgT *>(InnerArgs, Args.size())));
-  }
-
-  for (size_t i = 0, e = Args.size(); i != e; ++i) {
-    delete InnerArgs[i];
-  }
-  delete[] InnerArgs;
-  return Out;
-}
-
 /// \brief Helper class used to collect all the possible overloads of an
 ///   argument adaptative matcher function.
 template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
@@ -282,7 +481,7 @@
 class AdaptativeOverloadCollector {
 public:
   AdaptativeOverloadCollector(StringRef Name,
-                              std::vector<MatcherCreateCallback *> &Out)
+                              std::vector<MatcherDescriptor *> &Out)
       : Name(Name), Out(Out) {
     collect(FromTypes());
   }
@@ -296,33 +495,32 @@
 
   /// \brief Recursive case. Get the overload for the head of the list, and
   ///   recurse to the tail.
-  template <typename FromTypeList> inline void collect(FromTypeList);
+  template <typename FromTypeList>
+  inline void collect(FromTypeList);
 
   const StringRef Name;
-  std::vector<MatcherCreateCallback *> &Out;
+  std::vector<MatcherDescriptor *> &Out;
 };
 
-/// \brief MatcherCreateCallback that wraps multiple "overloads" of the same
+/// \brief MatcherDescriptor that wraps multiple "overloads" of the same
 ///   matcher.
 ///
 /// It will try every overload and generate appropriate errors for when none or
 /// more than one overloads match the arguments.
-class OverloadedMatcherCreateCallback : public MatcherCreateCallback {
+class OverloadedMatcherDescriptor : public MatcherDescriptor {
 public:
-  OverloadedMatcherCreateCallback(ArrayRef<MatcherCreateCallback *> Callbacks)
-      : Overloads(Callbacks) {}
+  OverloadedMatcherDescriptor(ArrayRef<MatcherDescriptor *> Callbacks)
+      : Overloads(Callbacks.begin(), Callbacks.end()) {}
 
-  virtual ~OverloadedMatcherCreateCallback() {
-    llvm::DeleteContainerPointers(Overloads);
-  }
+  virtual ~OverloadedMatcherDescriptor() {}
 
-  virtual VariantMatcher run(const SourceRange &NameRange,
-                             ArrayRef<ParserValue> Args,
-                             Diagnostics *Error) const {
+  virtual VariantMatcher create(const SourceRange &NameRange,
+                                ArrayRef<ParserValue> Args,
+                                Diagnostics *Error) const {
     std::vector<VariantMatcher> Constructed;
     Diagnostics::OverloadContext Ctx(Error);
-    for (size_t i = 0, e = Overloads.size(); i != e; ++i) {
-      VariantMatcher SubMatcher = Overloads[i]->run(NameRange, Args, Error);
+    for (const auto &O : Overloads) {
+      VariantMatcher SubMatcher = O->create(NameRange, Args, Error);
       if (!SubMatcher.isNull()) {
         Constructed.push_back(SubMatcher);
       }
@@ -339,20 +537,67 @@
     return Constructed[0];
   }
 
+  bool isVariadic() const {
+    bool Overload0Variadic = Overloads[0]->isVariadic();
+#ifndef NDEBUG
+    for (const auto &O : Overloads) {
+      assert(Overload0Variadic == O->isVariadic());
+    }
+#endif
+    return Overload0Variadic;
+  }
+
+  unsigned getNumArgs() const {
+    unsigned Overload0NumArgs = Overloads[0]->getNumArgs();
+#ifndef NDEBUG
+    for (const auto &O : Overloads) {
+      assert(Overload0NumArgs == O->getNumArgs());
+    }
+#endif
+    return Overload0NumArgs;
+  }
+
+  void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+                   std::vector<ArgKind> &Kinds) const {
+    for (const auto &O : Overloads) {
+      if (O->isConvertibleTo(ThisKind))
+        O->getArgKinds(ThisKind, ArgNo, Kinds);
+    }
+  }
+
+  bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+                       ast_type_traits::ASTNodeKind *LeastDerivedKind) const {
+    for (const auto &O : Overloads) {
+      if (O->isConvertibleTo(Kind, Specificity, LeastDerivedKind))
+        return true;
+    }
+    return false;
+  }
+
 private:
-  std::vector<MatcherCreateCallback *> Overloads;
+  std::vector<std::unique_ptr<MatcherDescriptor>> Overloads;
 };
 
 /// \brief Variadic operator marshaller function.
-class VariadicOperatorMatcherCreateCallback : public MatcherCreateCallback {
+class VariadicOperatorMatcherDescriptor : public MatcherDescriptor {
 public:
   typedef ast_matchers::internal::VariadicOperatorFunction VarFunc;
-  VariadicOperatorMatcherCreateCallback(VarFunc Func, StringRef MatcherName)
-      : Func(Func), MatcherName(MatcherName) {}
+  VariadicOperatorMatcherDescriptor(unsigned MinCount, unsigned MaxCount,
+                                    VarFunc Func, StringRef MatcherName)
+      : MinCount(MinCount), MaxCount(MaxCount), Func(Func),
+        MatcherName(MatcherName) {}
 
-  virtual VariantMatcher run(const SourceRange &NameRange,
-                             ArrayRef<ParserValue> Args,
-                             Diagnostics *Error) const {
+  virtual VariantMatcher create(const SourceRange &NameRange,
+                                ArrayRef<ParserValue> Args,
+                                Diagnostics *Error) const {
+    if (Args.size() < MinCount || MaxCount < Args.size()) {
+      const std::string MaxStr =
+          (MaxCount == UINT_MAX ? "" : Twine(MaxCount)).str();
+      Error->addError(NameRange, Error->ET_RegistryWrongArgCount)
+          << ("(" + Twine(MinCount) + ", " + MaxStr + ")") << Args.size();
+      return VariantMatcher();
+    }
+
     std::vector<VariantMatcher> InnerArgs;
     for (size_t i = 0, e = Args.size(); i != e; ++i) {
       const ParserValue &Arg = Args[i];
@@ -364,85 +609,123 @@
       }
       InnerArgs.push_back(Value.getMatcher());
     }
-    return VariantMatcher::VariadicOperatorMatcher(Func, InnerArgs);
+    return VariantMatcher::VariadicOperatorMatcher(Func, std::move(InnerArgs));
   }
 
+  bool isVariadic() const { return true; }
+  unsigned getNumArgs() const { return 0; }
+  void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+                   std::vector<ArgKind> &Kinds) const {
+    Kinds.push_back(ThisKind);
+  }
+  bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+                       ast_type_traits::ASTNodeKind *LeastDerivedKind) const {
+    if (Specificity)
+      *Specificity = 1;
+    if (LeastDerivedKind)
+      *LeastDerivedKind = Kind;
+    return true;
+  }
+  bool isPolymorphic() const override { return true; }
+
 private:
+  const unsigned MinCount;
+  const unsigned MaxCount;
   const VarFunc Func;
   const StringRef MatcherName;
 };
 
-
 /// Helper functions to select the appropriate marshaller functions.
 /// They detect the number of arguments, arguments types and return type.
 
 /// \brief 0-arg overload
 template <typename ReturnType>
-MatcherCreateCallback *makeMatcherAutoMarshall(ReturnType (*Func)(),
-                                               StringRef MatcherName) {
-  return new FixedArgCountMatcherCreateCallback(
+MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(),
+                                     StringRef MatcherName) {
+  std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+  BuildReturnTypeVector<ReturnType>::build(RetTypes);
+  return new FixedArgCountMatcherDescriptor(
       matcherMarshall0<ReturnType>, reinterpret_cast<void (*)()>(Func),
-      MatcherName);
+      MatcherName, RetTypes, llvm::ArrayRef<ArgKind>());
 }
 
 /// \brief 1-arg overload
 template <typename ReturnType, typename ArgType1>
-MatcherCreateCallback *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1),
-                                               StringRef MatcherName) {
-  return new FixedArgCountMatcherCreateCallback(
+MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1),
+                                     StringRef MatcherName) {
+  std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+  BuildReturnTypeVector<ReturnType>::build(RetTypes);
+  ArgKind AK = ArgTypeTraits<ArgType1>::getKind();
+  return new FixedArgCountMatcherDescriptor(
       matcherMarshall1<ReturnType, ArgType1>,
-      reinterpret_cast<void (*)()>(Func), MatcherName);
+      reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AK);
 }
 
 /// \brief 2-arg overload
 template <typename ReturnType, typename ArgType1, typename ArgType2>
-MatcherCreateCallback *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1,
-                                                                  ArgType2),
-                                               StringRef MatcherName) {
-  return new FixedArgCountMatcherCreateCallback(
+MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
+                                     StringRef MatcherName) {
+  std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+  BuildReturnTypeVector<ReturnType>::build(RetTypes);
+  ArgKind AKs[] = { ArgTypeTraits<ArgType1>::getKind(),
+                    ArgTypeTraits<ArgType2>::getKind() };
+  return new FixedArgCountMatcherDescriptor(
       matcherMarshall2<ReturnType, ArgType1, ArgType2>,
-      reinterpret_cast<void (*)()>(Func), MatcherName);
+      reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AKs);
 }
 
 /// \brief Variadic overload.
 template <typename ResultT, typename ArgT,
           ResultT (*Func)(ArrayRef<const ArgT *>)>
-MatcherCreateCallback *
+MatcherDescriptor *
 makeMatcherAutoMarshall(llvm::VariadicFunction<ResultT, ArgT, Func> VarFunc,
                         StringRef MatcherName) {
-  return new FreeFuncMatcherCreateCallback(
-      &variadicMatcherCreateCallback<ResultT, ArgT, Func>, MatcherName);
+  return new VariadicFuncMatcherDescriptor(VarFunc, MatcherName);
+}
+
+/// \brief Overload for VariadicDynCastAllOfMatchers.
+///
+/// Not strictly necessary, but DynCastAllOfMatcherDescriptor gives us better
+/// completion results for that type of matcher.
+template <typename BaseT, typename DerivedT>
+MatcherDescriptor *
+makeMatcherAutoMarshall(ast_matchers::internal::VariadicDynCastAllOfMatcher<
+                            BaseT, DerivedT> VarFunc,
+                        StringRef MatcherName) {
+  return new DynCastAllOfMatcherDescriptor(VarFunc, MatcherName);
 }
 
 /// \brief Argument adaptative overload.
 template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
           typename FromTypes, typename ToTypes>
-MatcherCreateCallback *
+MatcherDescriptor *
 makeMatcherAutoMarshall(ast_matchers::internal::ArgumentAdaptingMatcherFunc<
                             ArgumentAdapterT, FromTypes, ToTypes>,
                         StringRef MatcherName) {
-  std::vector<MatcherCreateCallback *> Overloads;
+  std::vector<MatcherDescriptor *> Overloads;
   AdaptativeOverloadCollector<ArgumentAdapterT, FromTypes, ToTypes>(MatcherName,
                                                                     Overloads);
-  return new OverloadedMatcherCreateCallback(Overloads);
+  return new OverloadedMatcherDescriptor(Overloads);
 }
 
 template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
           typename FromTypes, typename ToTypes>
 template <typename FromTypeList>
-inline void
-AdaptativeOverloadCollector<ArgumentAdapterT, FromTypes, ToTypes>::collect(
-    FromTypeList) {
+inline void AdaptativeOverloadCollector<ArgumentAdapterT, FromTypes,
+                                        ToTypes>::collect(FromTypeList) {
   Out.push_back(makeMatcherAutoMarshall(
       &AdaptativeFunc::template create<typename FromTypeList::head>, Name));
   collect(typename FromTypeList::tail());
 }
 
 /// \brief Variadic operator overload.
-MatcherCreateCallback *makeMatcherAutoMarshall(
-    ast_matchers::internal::VariadicOperatorMatcherFunc Func,
-    StringRef MatcherName) {
-  return new VariadicOperatorMatcherCreateCallback(Func.Func, MatcherName);
+template <unsigned MinCount, unsigned MaxCount>
+MatcherDescriptor *
+makeMatcherAutoMarshall(ast_matchers::internal::VariadicOperatorMatcherFunc<
+                            MinCount, MaxCount> Func,
+                        StringRef MatcherName) {
+  return new VariadicOperatorMatcherDescriptor(MinCount, MaxCount, Func.Func,
+                                               MatcherName);
 }
 
 }  // namespace internal
diff --git a/lib/ASTMatchers/Dynamic/Parser.cpp b/lib/ASTMatchers/Dynamic/Parser.cpp
index df9596e..24ffdcd 100644
--- a/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -12,13 +12,13 @@
 ///
 //===----------------------------------------------------------------------===//
 
-#include <string>
-#include <vector>
-
 #include "clang/ASTMatchers/Dynamic/Parser.h"
 #include "clang/ASTMatchers/Dynamic/Registry.h"
 #include "clang/Basic/CharInfo.h"
+#include "llvm/ADT/Optional.h"
 #include "llvm/ADT/Twine.h"
+#include <string>
+#include <vector>
 
 namespace clang {
 namespace ast_matchers {
@@ -28,15 +28,16 @@
 struct Parser::TokenInfo {
   /// \brief Different possible tokens.
   enum TokenKind {
-    TK_Eof = 0,
-    TK_OpenParen = 1,
-    TK_CloseParen = 2,
-    TK_Comma = 3,
-    TK_Period = 4,
-    TK_Literal = 5,
-    TK_Ident = 6,
-    TK_InvalidChar = 7,
-    TK_Error = 8
+    TK_Eof,
+    TK_OpenParen,
+    TK_CloseParen,
+    TK_Comma,
+    TK_Period,
+    TK_Literal,
+    TK_Ident,
+    TK_InvalidChar,
+    TK_Error,
+    TK_CodeCompletion
   };
 
   /// \brief Some known identifiers.
@@ -56,7 +57,15 @@
 class Parser::CodeTokenizer {
 public:
   explicit CodeTokenizer(StringRef MatcherCode, Diagnostics *Error)
-      : Code(MatcherCode), StartOfLine(MatcherCode), Line(1), Error(Error) {
+      : Code(MatcherCode), StartOfLine(MatcherCode), Line(1), Error(Error),
+        CodeCompletionLocation(0) {
+    NextToken = getNextToken();
+  }
+
+  CodeTokenizer(StringRef MatcherCode, Diagnostics *Error,
+                unsigned CodeCompletionOffset)
+      : Code(MatcherCode), StartOfLine(MatcherCode), Line(1), Error(Error),
+        CodeCompletionLocation(MatcherCode.data() + CodeCompletionOffset) {
     NextToken = getNextToken();
   }
 
@@ -78,6 +87,13 @@
     TokenInfo Result;
     Result.Range.Start = currentLocation();
 
+    if (CodeCompletionLocation && CodeCompletionLocation <= Code.data()) {
+      Result.Kind = TokenInfo::TK_CodeCompletion;
+      Result.Text = StringRef(CodeCompletionLocation, 0);
+      CodeCompletionLocation = 0;
+      return Result;
+    }
+
     if (Code.empty()) {
       Result.Kind = TokenInfo::TK_Eof;
       Result.Text = "";
@@ -122,8 +138,21 @@
       if (isAlphanumeric(Code[0])) {
         // Parse an identifier
         size_t TokenLength = 1;
-        while (TokenLength < Code.size() && isAlphanumeric(Code[TokenLength]))
+        while (1) {
+          // A code completion location in/immediately after an identifier will
+          // cause the portion of the identifier before the code completion
+          // location to become a code completion token.
+          if (CodeCompletionLocation == Code.data() + TokenLength) {
+            CodeCompletionLocation = 0;
+            Result.Kind = TokenInfo::TK_CodeCompletion;
+            Result.Text = Code.substr(0, TokenLength);
+            Code = Code.drop_front(TokenLength);
+            return Result;
+          }
+          if (TokenLength == Code.size() || !isAlphanumeric(Code[TokenLength]))
+            break;
           ++TokenLength;
+        }
         Result.Kind = TokenInfo::TK_Ident;
         Result.Text = Code.substr(0, TokenLength);
         Code = Code.drop_front(TokenLength);
@@ -224,10 +253,27 @@
   unsigned Line;
   Diagnostics *Error;
   TokenInfo NextToken;
+  const char *CodeCompletionLocation;
 };
 
 Parser::Sema::~Sema() {}
 
+struct Parser::ScopedContextEntry {
+  Parser *P;
+
+  ScopedContextEntry(Parser *P, MatcherCtor C) : P(P) {
+    P->ContextStack.push_back(std::make_pair(C, 0u));
+  }
+
+  ~ScopedContextEntry() {
+    P->ContextStack.pop_back();
+  }
+
+  void nextArg() {
+    ++P->ContextStack.back().second;
+  }
+};
+
 /// \brief Parse and validate a matcher expression.
 /// \return \c true on success, in which case \c Value has the matcher parsed.
 ///   If the input is malformed, or some argument has an error, it
@@ -242,32 +288,43 @@
     return false;
   }
 
+  llvm::Optional<MatcherCtor> Ctor =
+      S->lookupMatcherCtor(NameToken.Text, NameToken.Range, Error);
   std::vector<ParserValue> Args;
   TokenInfo EndToken;
-  while (Tokenizer->nextTokenKind() != TokenInfo::TK_Eof) {
-    if (Tokenizer->nextTokenKind() == TokenInfo::TK_CloseParen) {
-      // End of args.
-      EndToken = Tokenizer->consumeNextToken();
-      break;
-    }
-    if (Args.size() > 0) {
-      // We must find a , token to continue.
-      const TokenInfo CommaToken = Tokenizer->consumeNextToken();
-      if (CommaToken.Kind != TokenInfo::TK_Comma) {
-        Error->addError(CommaToken.Range, Error->ET_ParserNoComma)
-            << CommaToken.Text;
+
+  {
+    ScopedContextEntry SCE(this, Ctor ? *Ctor : 0);
+
+    while (Tokenizer->nextTokenKind() != TokenInfo::TK_Eof) {
+      if (Tokenizer->nextTokenKind() == TokenInfo::TK_CloseParen) {
+        // End of args.
+        EndToken = Tokenizer->consumeNextToken();
+        break;
+      }
+      if (Args.size() > 0) {
+        // We must find a , token to continue.
+        const TokenInfo CommaToken = Tokenizer->consumeNextToken();
+        if (CommaToken.Kind != TokenInfo::TK_Comma) {
+          Error->addError(CommaToken.Range, Error->ET_ParserNoComma)
+              << CommaToken.Text;
+          return false;
+        }
+      }
+
+      Diagnostics::Context Ctx(Diagnostics::Context::MatcherArg, Error,
+                               NameToken.Text, NameToken.Range,
+                               Args.size() + 1);
+      ParserValue ArgValue;
+      ArgValue.Text = Tokenizer->peekNextToken().Text;
+      ArgValue.Range = Tokenizer->peekNextToken().Range;
+      if (!parseExpressionImpl(&ArgValue.Value)) {
         return false;
       }
+
+      Args.push_back(ArgValue);
+      SCE.nextArg();
     }
-
-    Diagnostics::Context Ctx(Diagnostics::Context::MatcherArg, Error,
-                             NameToken.Text, NameToken.Range, Args.size() + 1);
-    ParserValue ArgValue;
-    ArgValue.Text = Tokenizer->peekNextToken().Text;
-    ArgValue.Range = Tokenizer->peekNextToken().Range;
-    if (!parseExpressionImpl(&ArgValue.Value)) return false;
-
-    Args.push_back(ArgValue);
   }
 
   if (EndToken.Kind == TokenInfo::TK_Eof) {
@@ -280,6 +337,11 @@
     // Parse .bind("foo")
     Tokenizer->consumeNextToken();  // consume the period.
     const TokenInfo BindToken = Tokenizer->consumeNextToken();
+    if (BindToken.Kind == TokenInfo::TK_CodeCompletion) {
+      addCompletion(BindToken, "bind(\"", "bind");
+      return false;
+    }
+
     const TokenInfo OpenToken = Tokenizer->consumeNextToken();
     const TokenInfo IDToken = Tokenizer->consumeNextToken();
     const TokenInfo CloseToken = Tokenizer->consumeNextToken();
@@ -306,19 +368,55 @@
     BindID = IDToken.Value.getString();
   }
 
+  if (!Ctor)
+    return false;
+
   // Merge the start and end infos.
   Diagnostics::Context Ctx(Diagnostics::Context::ConstructMatcher, Error,
                            NameToken.Text, NameToken.Range);
   SourceRange MatcherRange = NameToken.Range;
   MatcherRange.End = EndToken.Range.End;
   VariantMatcher Result = S->actOnMatcherExpression(
-      NameToken.Text, MatcherRange, BindID, Args, Error);
+      *Ctor, MatcherRange, BindID, Args, Error);
   if (Result.isNull()) return false;
 
   *Value = Result;
   return true;
 }
 
+// If the prefix of this completion matches the completion token, add it to
+// Completions minus the prefix.
+void Parser::addCompletion(const TokenInfo &CompToken, StringRef TypedText,
+                           StringRef Decl) {
+  if (TypedText.size() >= CompToken.Text.size() &&
+      TypedText.substr(0, CompToken.Text.size()) == CompToken.Text) {
+    Completions.push_back(
+        MatcherCompletion(TypedText.substr(CompToken.Text.size()), Decl));
+  }
+}
+
+void Parser::addExpressionCompletions() {
+  const TokenInfo CompToken = Tokenizer->consumeNextToken();
+  assert(CompToken.Kind == TokenInfo::TK_CodeCompletion);
+
+  // We cannot complete code if there is an invalid element on the context
+  // stack.
+  for (ContextStackTy::iterator I = ContextStack.begin(),
+                                E = ContextStack.end();
+       I != E; ++I) {
+    if (!I->first)
+      return;
+  }
+
+  std::vector<MatcherCompletion> RegCompletions =
+      Registry::getCompletions(ContextStack);
+  for (std::vector<MatcherCompletion>::iterator I = RegCompletions.begin(),
+                                                E = RegCompletions.end();
+       I != E; ++I) {
+    addCompletion(CompToken, I->TypedText, I->MatcherDecl);
+  }
+}
+
 /// \brief Parse an <Expresssion>
 bool Parser::parseExpressionImpl(VariantValue *Value) {
   switch (Tokenizer->nextTokenKind()) {
@@ -329,6 +427,10 @@
   case TokenInfo::TK_Ident:
     return parseMatcherExpressionImpl(Value);
 
+  case TokenInfo::TK_CodeCompletion:
+    addExpressionCompletions();
+    return false;
+
   case TokenInfo::TK_Eof:
     Error->addError(Tokenizer->consumeNextToken().Range,
                     Error->ET_ParserNoCode);
@@ -358,16 +460,21 @@
 class RegistrySema : public Parser::Sema {
 public:
   virtual ~RegistrySema() {}
-  VariantMatcher actOnMatcherExpression(StringRef MatcherName,
+  llvm::Optional<MatcherCtor> lookupMatcherCtor(StringRef MatcherName,
+                                                const SourceRange &NameRange,
+                                                Diagnostics *Error) {
+    return Registry::lookupMatcherCtor(MatcherName, NameRange, Error);
+  }
+  VariantMatcher actOnMatcherExpression(MatcherCtor Ctor,
                                         const SourceRange &NameRange,
                                         StringRef BindID,
                                         ArrayRef<ParserValue> Args,
                                         Diagnostics *Error) {
     if (BindID.empty()) {
-      return Registry::constructMatcher(MatcherName, NameRange, Args, Error);
+      return Registry::constructMatcher(Ctor, NameRange, Args, Error);
     } else {
-      return Registry::constructBoundMatcher(MatcherName, NameRange, BindID,
-                                             Args, Error);
+      return Registry::constructBoundMatcher(Ctor, NameRange, BindID, Args,
+                                             Error);
     }
   }
 };
@@ -390,6 +497,18 @@
   return true;
 }
 
+std::vector<MatcherCompletion>
+Parser::completeExpression(StringRef Code, unsigned CompletionOffset) {
+  Diagnostics Error;
+  CodeTokenizer Tokenizer(Code, &Error, CompletionOffset);
+  RegistrySema S;
+  Parser P(&Tokenizer, &S, &Error);
+  VariantValue Dummy;
+  P.parseExpressionImpl(&Dummy);
+
+  return P.Completions;
+}
+
 llvm::Optional<DynTypedMatcher>
 Parser::parseMatcherExpression(StringRef Code, Diagnostics *Error) {
   RegistrySema S;
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index 70e956e..6089b45 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -13,23 +13,24 @@
 //===------------------------------------------------------------===//
 
 #include "clang/ASTMatchers/Dynamic/Registry.h"
-
-#include <utility>
-
 #include "Marshallers.h"
 #include "clang/ASTMatchers/ASTMatchers.h"
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Support/ManagedStatic.h"
+#include <set>
+#include <utility>
+
+using namespace clang::ast_type_traits;
 
 namespace clang {
 namespace ast_matchers {
 namespace dynamic {
 namespace {
 
-using internal::MatcherCreateCallback;
+using internal::MatcherDescriptor;
 
-typedef llvm::StringMap<const MatcherCreateCallback *> ConstructorMap;
+typedef llvm::StringMap<const MatcherDescriptor *> ConstructorMap;
 class RegistryMaps {
 public:
   RegistryMaps();
@@ -38,12 +39,12 @@
   const ConstructorMap &constructors() const { return Constructors; }
 
 private:
-  void registerMatcher(StringRef MatcherName, MatcherCreateCallback *Callback);
+  void registerMatcher(StringRef MatcherName, MatcherDescriptor *Callback);
   ConstructorMap Constructors;
 };
 
 void RegistryMaps::registerMatcher(StringRef MatcherName,
-                                   MatcherCreateCallback *Callback) {
+                                   MatcherDescriptor *Callback) {
   assert(Constructors.find(MatcherName) == Constructors.end());
   Constructors[MatcherName] = Callback;
 }
@@ -58,14 +59,14 @@
 
 #define REGISTER_OVERLOADED_2(name)                                            \
   do {                                                                         \
-    MatcherCreateCallback *Callbacks[] = {                                     \
+    MatcherDescriptor *Callbacks[] = {                                         \
       internal::makeMatcherAutoMarshall(SPECIFIC_MATCHER_OVERLOAD(name, 0),    \
                                         #name),                                \
       internal::makeMatcherAutoMarshall(SPECIFIC_MATCHER_OVERLOAD(name, 1),    \
                                         #name)                                 \
     };                                                                         \
     registerMatcher(#name,                                                     \
-                    new internal::OverloadedMatcherCreateCallback(Callbacks)); \
+                    new internal::OverloadedMatcherDescriptor(Callbacks));     \
   } while (0)
 
 /// \brief Generate a registry map with all the known matchers.
@@ -76,11 +77,9 @@
   // ofKind
   //
   // Polymorphic + argument overload:
-  // unless
   // findAll
   //
   // Other:
-  // loc
   // equals
   // equalsNode
 
@@ -89,6 +88,7 @@
   REGISTER_OVERLOADED_2(hasType);
   REGISTER_OVERLOADED_2(isDerivedFrom);
   REGISTER_OVERLOADED_2(isSameOrDerivedFrom);
+  REGISTER_OVERLOADED_2(loc);
   REGISTER_OVERLOADED_2(pointsTo);
   REGISTER_OVERLOADED_2(references);
   REGISTER_OVERLOADED_2(thisPointerType);
@@ -113,6 +113,7 @@
   REGISTER_MATCHER(builtinType);
   REGISTER_MATCHER(cStyleCastExpr);
   REGISTER_MATCHER(callExpr);
+  REGISTER_MATCHER(caseStmt);
   REGISTER_MATCHER(castExpr);
   REGISTER_MATCHER(catchStmt);
   REGISTER_MATCHER(characterLiteral);
@@ -133,7 +134,9 @@
   REGISTER_MATCHER(declCountIs);
   REGISTER_MATCHER(declRefExpr);
   REGISTER_MATCHER(declStmt);
+  REGISTER_MATCHER(declaratorDecl);
   REGISTER_MATCHER(defaultArgExpr);
+  REGISTER_MATCHER(defaultStmt);
   REGISTER_MATCHER(deleteExpr);
   REGISTER_MATCHER(dependentSizedArrayType);
   REGISTER_MATCHER(destructorDecl);
@@ -143,15 +146,20 @@
   REGISTER_MATCHER(elaboratedType);
   REGISTER_MATCHER(enumConstantDecl);
   REGISTER_MATCHER(enumDecl);
+  REGISTER_MATCHER(equalsBoundNode);
   REGISTER_MATCHER(explicitCastExpr);
   REGISTER_MATCHER(expr);
+  REGISTER_MATCHER(exprWithCleanups);
   REGISTER_MATCHER(fieldDecl);
   REGISTER_MATCHER(floatLiteral);
   REGISTER_MATCHER(forEach);
+  REGISTER_MATCHER(forEachConstructorInitializer);
   REGISTER_MATCHER(forEachDescendant);
+  REGISTER_MATCHER(forEachSwitchCase);
   REGISTER_MATCHER(forField);
   REGISTER_MATCHER(forRangeStmt);
   REGISTER_MATCHER(forStmt);
+  REGISTER_MATCHER(friendDecl);
   REGISTER_MATCHER(functionDecl);
   REGISTER_MATCHER(functionTemplateDecl);
   REGISTER_MATCHER(functionType);
@@ -170,6 +178,7 @@
   REGISTER_MATCHER(hasBase);
   REGISTER_MATCHER(hasBody);
   REGISTER_MATCHER(hasCanonicalType);
+  REGISTER_MATCHER(hasCaseConstant);
   REGISTER_MATCHER(hasCondition);
   REGISTER_MATCHER(hasConditionVariableStatement);
   REGISTER_MATCHER(hasDeclContext);
@@ -203,6 +212,7 @@
   REGISTER_MATCHER(hasTargetDecl);
   REGISTER_MATCHER(hasTemplateArgument);
   REGISTER_MATCHER(hasTrueExpression);
+  REGISTER_MATCHER(hasTypeLoc);
   REGISTER_MATCHER(hasUnaryOperand);
   REGISTER_MATCHER(hasValueType);
   REGISTER_MATCHER(ifStmt);
@@ -215,12 +225,15 @@
   REGISTER_MATCHER(innerType);
   REGISTER_MATCHER(integerLiteral);
   REGISTER_MATCHER(isArrow);
+  REGISTER_MATCHER(isConst);
   REGISTER_MATCHER(isConstQualified);
   REGISTER_MATCHER(isDefinition);
   REGISTER_MATCHER(isExplicitTemplateSpecialization);
+  REGISTER_MATCHER(isExpr);
   REGISTER_MATCHER(isExternC);
   REGISTER_MATCHER(isImplicit);
   REGISTER_MATCHER(isInteger);
+  REGISTER_MATCHER(isListInitialization);
   REGISTER_MATCHER(isOverride);
   REGISTER_MATCHER(isPrivate);
   REGISTER_MATCHER(isProtected);
@@ -252,6 +265,7 @@
   REGISTER_MATCHER(operatorCallExpr);
   REGISTER_MATCHER(parameterCountIs);
   REGISTER_MATCHER(parenType);
+  REGISTER_MATCHER(parmVarDecl);
   REGISTER_MATCHER(pointee);
   REGISTER_MATCHER(pointerType);
   REGISTER_MATCHER(qualType);
@@ -275,6 +289,7 @@
   REGISTER_MATCHER(switchCase);
   REGISTER_MATCHER(switchStmt);
   REGISTER_MATCHER(templateSpecializationType);
+  REGISTER_MATCHER(temporaryObjectExpr);
   REGISTER_MATCHER(thisExpr);
   REGISTER_MATCHER(throughUsingDecl);
   REGISTER_MATCHER(throwExpr);
@@ -285,6 +300,10 @@
   REGISTER_MATCHER(typedefType);
   REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
   REGISTER_MATCHER(unaryOperator);
+  REGISTER_MATCHER(unaryTransformType);
+  REGISTER_MATCHER(unless);
+  REGISTER_MATCHER(unresolvedConstructExpr);
+  REGISTER_MATCHER(unresolvedUsingValueDecl);
   REGISTER_MATCHER(userDefinedLiteral);
   REGISTER_MATCHER(usingDecl);
   REGISTER_MATCHER(varDecl);
@@ -306,27 +325,180 @@
 } // anonymous namespace
 
 // static
-VariantMatcher Registry::constructMatcher(StringRef MatcherName,
-                                          const SourceRange &NameRange,
-                                          ArrayRef<ParserValue> Args,
-                                          Diagnostics *Error) {
+llvm::Optional<MatcherCtor>
+Registry::lookupMatcherCtor(StringRef MatcherName, const SourceRange &NameRange,
+                            Diagnostics *Error) {
   ConstructorMap::const_iterator it =
       RegistryData->constructors().find(MatcherName);
   if (it == RegistryData->constructors().end()) {
     Error->addError(NameRange, Error->ET_RegistryNotFound) << MatcherName;
-    return VariantMatcher();
+    return llvm::Optional<MatcherCtor>();
   }
 
-  return it->second->run(NameRange, Args, Error);
+  return it->second;
+}
+
+namespace {
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+                              const std::set<ASTNodeKind> &KS) {
+  unsigned Count = 0;
+  for (std::set<ASTNodeKind>::const_iterator I = KS.begin(), E = KS.end();
+       I != E; ++I) {
+    if (I != KS.begin())
+      OS << "|";
+    if (Count++ == 3) {
+      OS << "...";
+      break;
+    }
+    OS << *I;
+  }
+  return OS;
+}
+
+struct ReverseSpecificityThenName {
+  bool operator()(const std::pair<unsigned, std::string> &A,
+                  const std::pair<unsigned, std::string> &B) const {
+    return A.first > B.first || (A.first == B.first && A.second < B.second);
+  }
+};
+
+}
+
+std::vector<MatcherCompletion> Registry::getCompletions(
+    llvm::ArrayRef<std::pair<MatcherCtor, unsigned> > Context) {
+  ASTNodeKind InitialTypes[] = {
+    ASTNodeKind::getFromNodeKind<Decl>(),
+    ASTNodeKind::getFromNodeKind<QualType>(),
+    ASTNodeKind::getFromNodeKind<Type>(),
+    ASTNodeKind::getFromNodeKind<Stmt>(),
+    ASTNodeKind::getFromNodeKind<NestedNameSpecifier>(),
+    ASTNodeKind::getFromNodeKind<NestedNameSpecifierLoc>(),
+    ASTNodeKind::getFromNodeKind<TypeLoc>()
+  };
+  llvm::ArrayRef<ASTNodeKind> InitialTypesRef(InitialTypes);
+
+  // Starting with the above seed of acceptable top-level matcher types, compute
+  // the acceptable type set for the argument indicated by each context element.
+  std::set<ASTNodeKind> TypeSet(InitialTypesRef.begin(), InitialTypesRef.end());
+  for (llvm::ArrayRef<std::pair<MatcherCtor, unsigned> >::iterator
+           CtxI = Context.begin(),
+           CtxE = Context.end();
+       CtxI != CtxE; ++CtxI) {
+    std::vector<internal::ArgKind> NextTypeSet;
+    for (std::set<ASTNodeKind>::iterator I = TypeSet.begin(), E = TypeSet.end();
+         I != E; ++I) {
+      if (CtxI->first->isConvertibleTo(*I) &&
+          (CtxI->first->isVariadic() ||
+           CtxI->second < CtxI->first->getNumArgs()))
+        CtxI->first->getArgKinds(*I, CtxI->second, NextTypeSet);
+    }
+    TypeSet.clear();
+    for (std::vector<internal::ArgKind>::iterator I = NextTypeSet.begin(),
+                                                  E = NextTypeSet.end();
+         I != E; ++I) {
+      if (I->getArgKind() == internal::ArgKind::AK_Matcher)
+        TypeSet.insert(I->getMatcherKind());
+    }
+  }
+
+  typedef std::map<std::pair<unsigned, std::string>, MatcherCompletion,
+                   ReverseSpecificityThenName> CompletionsTy;
+  CompletionsTy Completions;
+
+  // TypeSet now contains the list of acceptable types for the argument we are
+  // completing.  Search the registry for acceptable matchers.
+  for (ConstructorMap::const_iterator I = RegistryData->constructors().begin(),
+                                      E = RegistryData->constructors().end();
+       I != E; ++I) {
+    std::set<ASTNodeKind> RetKinds;
+    unsigned NumArgs = I->second->isVariadic() ? 1 : I->second->getNumArgs();
+    bool IsPolymorphic = I->second->isPolymorphic();
+    std::vector<std::vector<internal::ArgKind> > ArgsKinds(NumArgs);
+    unsigned MaxSpecificity = 0;
+    for (std::set<ASTNodeKind>::iterator TI = TypeSet.begin(),
+                                         TE = TypeSet.end();
+         TI != TE; ++TI) {
+      unsigned Specificity;
+      ASTNodeKind LeastDerivedKind;
+      if (I->second->isConvertibleTo(*TI, &Specificity, &LeastDerivedKind)) {
+        if (MaxSpecificity < Specificity)
+          MaxSpecificity = Specificity;
+        RetKinds.insert(LeastDerivedKind);
+        for (unsigned Arg = 0; Arg != NumArgs; ++Arg)
+          I->second->getArgKinds(*TI, Arg, ArgsKinds[Arg]);
+        if (IsPolymorphic)
+          break;
+      }
+    }
+
+    if (!RetKinds.empty() && MaxSpecificity > 0) {
+      std::string Decl;
+      llvm::raw_string_ostream OS(Decl);
+
+      if (IsPolymorphic) {
+        OS << "Matcher<T> " << I->first() << "(Matcher<T>";
+      } else {
+        OS << "Matcher<" << RetKinds << "> " << I->first() << "(";
+        for (std::vector<std::vector<internal::ArgKind> >::iterator
+                 KI = ArgsKinds.begin(),
+                 KE = ArgsKinds.end();
+             KI != KE; ++KI) {
+          if (KI != ArgsKinds.begin())
+            OS << ", ";
+          // This currently assumes that a matcher may not overload a
+          // non-matcher, and all non-matcher overloads have identical
+          // arguments.
+          if ((*KI)[0].getArgKind() == internal::ArgKind::AK_Matcher) {
+            std::set<ASTNodeKind> MatcherKinds;
+            std::transform(
+                KI->begin(), KI->end(),
+                std::inserter(MatcherKinds, MatcherKinds.end()),
+                std::mem_fun_ref(&internal::ArgKind::getMatcherKind));
+            OS << "Matcher<" << MatcherKinds << ">";
+          } else {
+            OS << (*KI)[0].asString();
+          }
+        }
+      }
+      if (I->second->isVariadic())
+        OS << "...";
+      OS << ")";
+
+      std::string TypedText = I->first();
+      TypedText += "(";
+      if (ArgsKinds.empty())
+        TypedText += ")";
+      else if (ArgsKinds[0][0].getArgKind() == internal::ArgKind::AK_String)
+        TypedText += "\"";
+
+      Completions[std::make_pair(MaxSpecificity, I->first())] =
+          MatcherCompletion(TypedText, OS.str());
+    }
+  }
+
+  std::vector<MatcherCompletion> RetVal;
+  for (CompletionsTy::iterator I = Completions.begin(), E = Completions.end();
+       I != E; ++I)
+    RetVal.push_back(I->second);
+  return RetVal;
 }
 
 // static
-VariantMatcher Registry::constructBoundMatcher(StringRef MatcherName,
+VariantMatcher Registry::constructMatcher(MatcherCtor Ctor,
+                                          const SourceRange &NameRange,
+                                          ArrayRef<ParserValue> Args,
+                                          Diagnostics *Error) {
+  return Ctor->create(NameRange, Args, Error);
+}
+
+// static
+VariantMatcher Registry::constructBoundMatcher(MatcherCtor Ctor,
                                                const SourceRange &NameRange,
                                                StringRef BindID,
                                                ArrayRef<ParserValue> Args,
                                                Diagnostics *Error) {
-  VariantMatcher Out = constructMatcher(MatcherName, NameRange, Args, Error);
+  VariantMatcher Out = constructMatcher(Ctor, NameRange, Args, Error);
   if (Out.isNull()) return Out;
 
   llvm::Optional<DynTypedMatcher> Result = Out.getSingleMatcher();
diff --git a/lib/ASTMatchers/Dynamic/VariantValue.cpp b/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 3e49e1b..9c7262e 100644
--- a/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -13,7 +13,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/ASTMatchers/Dynamic/VariantValue.h"
-
 #include "clang/Basic/LLVM.h"
 #include "llvm/ADT/STLExtras.h"
 
@@ -38,7 +37,8 @@
   }
 
   virtual void makeTypedMatcher(MatcherOps &Ops) const {
-    if (Ops.canConstructFrom(Matcher))
+    bool Ignore;
+    if (Ops.canConstructFrom(Matcher, Ignore))
       Ops.constructFrom(Matcher);
   }
 
@@ -48,8 +48,8 @@
 
 class VariantMatcher::PolymorphicPayload : public VariantMatcher::Payload {
 public:
-  PolymorphicPayload(ArrayRef<DynTypedMatcher> MatchersIn)
-      : Matchers(MatchersIn) {}
+  PolymorphicPayload(std::vector<DynTypedMatcher> MatchersIn)
+      : Matchers(std::move(MatchersIn)) {}
 
   virtual ~PolymorphicPayload() {}
 
@@ -70,15 +70,25 @@
   }
 
   virtual void makeTypedMatcher(MatcherOps &Ops) const {
+    bool FoundIsExact = false;
     const DynTypedMatcher *Found = NULL;
+    int NumFound = 0;
     for (size_t i = 0, e = Matchers.size(); i != e; ++i) {
-      if (Ops.canConstructFrom(Matchers[i])) {
-        if (Found)
-          return;
+      bool IsExactMatch;
+      if (Ops.canConstructFrom(Matchers[i], IsExactMatch)) {
+        if (Found) {
+          if (FoundIsExact) {
+            assert(!IsExactMatch && "We should not have two exact matches.");
+            continue;
+          }
+        }
         Found = &Matchers[i];
+        FoundIsExact = IsExactMatch;
+        ++NumFound;
       }
     }
-    if (Found)
+    // We only succeed if we found exactly one, or if we found an exact match.
+    if (Found && (FoundIsExact || NumFound == 1))
       Ops.constructFrom(*Found);
   }
 
@@ -88,8 +98,8 @@
 class VariantMatcher::VariadicOpPayload : public VariantMatcher::Payload {
 public:
   VariadicOpPayload(ast_matchers::internal::VariadicOperatorFunction Func,
-                    ArrayRef<VariantMatcher> Args)
-      : Func(Func), Args(Args) {}
+                    std::vector<VariantMatcher> Args)
+      : Func(Func), Args(std::move(Args)) {}
 
   virtual llvm::Optional<DynTypedMatcher> getSingleMatcher() const {
     return llvm::Optional<DynTypedMatcher>();
@@ -121,14 +131,14 @@
 }
 
 VariantMatcher
-VariantMatcher::PolymorphicMatcher(ArrayRef<DynTypedMatcher> Matchers) {
-  return VariantMatcher(new PolymorphicPayload(Matchers));
+VariantMatcher::PolymorphicMatcher(std::vector<DynTypedMatcher> Matchers) {
+  return VariantMatcher(new PolymorphicPayload(std::move(Matchers)));
 }
 
 VariantMatcher VariantMatcher::VariadicOperatorMatcher(
     ast_matchers::internal::VariadicOperatorFunction Func,
-    ArrayRef<VariantMatcher> Args) {
-  return VariantMatcher(new VariadicOpPayload(Func, Args));
+    std::vector<VariantMatcher> Args) {
+  return VariantMatcher(new VariadicOpPayload(Func, std::move(Args)));
 }
 
 llvm::Optional<DynTypedMatcher> VariantMatcher::getSingleMatcher() const {
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index 465f0c3..c90d947 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -28,8 +28,8 @@
 #include "clang/Analysis/Support/BumpVector.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
 
 using namespace clang;
 
@@ -68,7 +68,8 @@
                                                        bool addInitializers,
                                                        bool addTemporaryDtors,
                                                        bool synthesizeBodies,
-                                                       bool addStaticInitBranch)
+                                                       bool addStaticInitBranch,
+                                                       bool addCXXNewAllocator)
   : SynthesizeBodies(synthesizeBodies)
 {
   cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
@@ -76,12 +77,11 @@
   cfgBuildOptions.AddInitializers = addInitializers;
   cfgBuildOptions.AddTemporaryDtors = addTemporaryDtors;
   cfgBuildOptions.AddStaticInitBranches = addStaticInitBranch;
+  cfgBuildOptions.AddCXXNewAllocator = addCXXNewAllocator;
 }
 
 void AnalysisDeclContextManager::clear() {
-  for (ContextMap::iterator I = Contexts.begin(), E = Contexts.end(); I!=E; ++I)
-    delete I->second;
-  Contexts.clear();
+  llvm::DeleteContainerSeconds(Contexts);
 }
 
 static BodyFarm &getBodyFarm(ASTContext &C) {
@@ -94,14 +94,21 @@
   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
     Stmt *Body = FD->getBody();
     if (!Body && Manager && Manager->synthesizeBodies()) {
-      IsAutosynthesized = true;
-      return getBodyFarm(getASTContext()).getBody(FD);
+      Body = getBodyFarm(getASTContext()).getBody(FD);
+      if (Body)
+        IsAutosynthesized = true;
     }
     return Body;
   }
-  else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
-    return MD->getBody();
-  else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+  else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+    Stmt *Body = MD->getBody();
+    if (!Body && Manager && Manager->synthesizeBodies()) {
+      Body = getBodyFarm(getASTContext()).getBody(MD);
+      if (Body)
+        IsAutosynthesized = true;
+    }
+    return Body;
+  } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
     return BD->getBody();
   else if (const FunctionTemplateDecl *FunTmpl
            = dyn_cast_or_null<FunctionTemplateDecl>(D))
@@ -126,9 +133,8 @@
     return MD->getSelfDecl();
   if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
     // See if 'self' was captured by the block.
-    for (BlockDecl::capture_const_iterator it = BD->capture_begin(),
-         et = BD->capture_end(); it != et; ++it) {
-      const VarDecl *VD = it->getVariable();
+    for (const auto &I : BD->captures()) {
+      const VarDecl *VD = I.getVariable();
       if (VD->getName() == "self")
         return dyn_cast<ImplicitParamDecl>(VD);
     }    
@@ -235,10 +241,8 @@
   if (!PM) {
     PM.reset(new ParentMap(getBody()));
     if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
-      for (CXXConstructorDecl::init_const_iterator I = C->init_begin(),
-                                                   E = C->init_end();
-           I != E; ++I) {
-        PM->addStmt((*I)->getInit());
+      for (const auto *I : C->inits()) {
+        PM->addStmt(I->getInit());
       }
     }
     if (builtCFG)
@@ -435,7 +439,7 @@
   }
 }
 
-void LocationContext::dumpStack() const {
+LLVM_DUMP_METHOD void LocationContext::dumpStack() const {
   dumpStack(llvm::errs());
 }
 
@@ -454,11 +458,6 @@
                             BumpVectorContext &bc)
   : BEVals(bevals), BC(bc) {}
 
-  bool IsTrackedDecl(const VarDecl *VD) {
-    const DeclContext *DC = VD->getDeclContext();
-    return IgnoredContexts.count(DC) == 0;
-  }
-
   void VisitStmt(Stmt *S) {
     for (Stmt::child_range I = S->children(); I; ++I)
       if (Stmt *child = *I)
@@ -506,9 +505,8 @@
   new (BV) DeclVec(BC, 10);
 
   // Go through the capture list.
-  for (BlockDecl::capture_const_iterator CI = BD->capture_begin(),
-       CE = BD->capture_end(); CI != CE; ++CI) {
-    BV->push_back(CI->getVariable(), BC);
+  for (const auto &CI : BD->captures()) {
+    BV->push_back(CI.getVariable(), BC);
   }
 
   // Find the referenced global/static variables.
@@ -548,15 +546,13 @@
   // Release the managed analyses.
   if (ManagedAnalyses) {
     ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
-    for (ManagedAnalysisMap::iterator I = M->begin(), E = M->end(); I!=E; ++I)
-      delete I->second;  
+    llvm::DeleteContainerSeconds(*M);
     delete M;
   }
 }
 
 AnalysisDeclContextManager::~AnalysisDeclContextManager() {
-  for (ContextMap::iterator I = Contexts.begin(), E = Contexts.end(); I!=E; ++I)
-    delete I->second;
+  llvm::DeleteContainerSeconds(Contexts);
 }
 
 LocationContext::~LocationContext() {}
diff --git a/lib/Analysis/Android.mk b/lib/Analysis/Android.mk
index 8a73690..8ccbf3e 100644
--- a/lib/Analysis/Android.mk
+++ b/lib/Analysis/Android.mk
@@ -8,6 +8,7 @@
 TBLGEN_TABLES := \
   AttrList.inc \
   Attrs.inc \
+  AttrVisitor.inc \
   CommentCommandList.inc \
   CommentNodes.inc \
   DeclNodes.inc \
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index 4d5c2ee..039911e 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -35,8 +35,7 @@
   // returns void.
   const FunctionProtoType *FT =
   BPT->getPointeeType()->getAs<FunctionProtoType>();
-  if (!FT || !FT->getResultType()->isVoidType()  ||
-      FT->getNumArgs() != 0)
+  if (!FT || !FT->getReturnType()->isVoidType() || FT->getNumParams() != 0)
     return false;
 
   return true;
@@ -74,6 +73,9 @@
   
   /// Create an Objective-C bool literal.
   ObjCBoolLiteralExpr *makeObjCBool(bool Val);
+
+  /// Create an Objective-C ivar reference.
+  ObjCIvarRefExpr *makeObjCIvarRef(const Expr *Base, const ObjCIvarDecl *IVar);
   
   /// Create a Return statement.
   ReturnStmt *makeReturn(const Expr *RetVal);
@@ -147,6 +149,15 @@
   return new (C) ObjCBoolLiteralExpr(Val, Ty, SourceLocation());
 }
 
+ObjCIvarRefExpr *ASTMaker::makeObjCIvarRef(const Expr *Base,
+                                           const ObjCIvarDecl *IVar) {
+  return new (C) ObjCIvarRefExpr(const_cast<ObjCIvarDecl*>(IVar),
+                                 IVar->getType(), SourceLocation(),
+                                 SourceLocation(), const_cast<Expr*>(Base),
+                                 /*arrow=*/true, /*free=*/false);
+}
+
+
 ReturnStmt *ASTMaker::makeReturn(const Expr *RetVal) {
   return new (C) ReturnStmt(SourceLocation(), const_cast<Expr*>(RetVal), 0);
 }
@@ -278,8 +289,8 @@
   //    return YES;
   //   }
   //   else return NO;
-  
-  QualType ResultTy = D->getResultType();
+
+  QualType ResultTy = D->getReturnType();
   bool isBoolean = ResultTy->isBooleanType();
   if (!isBoolean && !ResultTy->isIntegralType(C))
     return 0;
@@ -374,3 +385,87 @@
   return Val.getValue();
 }
 
+static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
+                                      const ObjCPropertyDecl *Prop) {
+  // First, find the backing ivar.
+  const ObjCIvarDecl *IVar = Prop->getPropertyIvarDecl();
+  if (!IVar)
+    return 0;
+
+  // Ignore weak variables, which have special behavior.
+  if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+    return 0;
+
+  // Look to see if Sema has synthesized a body for us. This happens in
+  // Objective-C++ because the return value may be a C++ class type with a
+  // non-trivial copy constructor. We can only do this if we can find the
+  // @synthesize for this property, though (or if we know it's been auto-
+  // synthesized).
+  const ObjCImplementationDecl *ImplDecl =
+    IVar->getContainingInterface()->getImplementation();
+  if (ImplDecl) {
+    for (const auto *I : ImplDecl->property_impls()) {
+      if (I->getPropertyDecl() != Prop)
+        continue;
+
+      if (I->getGetterCXXConstructor()) {
+        ASTMaker M(Ctx);
+        return M.makeReturn(I->getGetterCXXConstructor());
+      }
+    }
+  }
+
+  // Sanity check that the property is the same type as the ivar, or a
+  // reference to it, and that it is either an object pointer or trivially
+  // copyable.
+  if (!Ctx.hasSameUnqualifiedType(IVar->getType(),
+                                  Prop->getType().getNonReferenceType()))
+    return 0;
+  if (!IVar->getType()->isObjCLifetimeType() &&
+      !IVar->getType().isTriviallyCopyableType(Ctx))
+    return 0;
+
+  // Generate our body:
+  //   return self->_ivar;
+  ASTMaker M(Ctx);
+
+  const VarDecl *selfVar = Prop->getGetterMethodDecl()->getSelfDecl();
+
+  Expr *loadedIVar =
+    M.makeObjCIvarRef(
+      M.makeLvalueToRvalue(
+        M.makeDeclRefExpr(selfVar),
+        selfVar->getType()),
+      IVar);
+
+  if (!Prop->getType()->isReferenceType())
+    loadedIVar = M.makeLvalueToRvalue(loadedIVar, IVar->getType());
+
+  return M.makeReturn(loadedIVar);
+}
+
+Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
+  // We currently only know how to synthesize property accessors.
+  if (!D->isPropertyAccessor())
+    return 0;
+
+  D = D->getCanonicalDecl();
+
+  Optional<Stmt *> &Val = Bodies[D];
+  if (Val.hasValue())
+    return Val.getValue();
+  Val = 0;
+
+  const ObjCPropertyDecl *Prop = D->findPropertyDecl();
+  if (!Prop)
+    return 0;
+
+  // For now, we only synthesize getters.
+  if (D->param_size() != 0)
+    return 0;
+
+  Val = createObjCPropertyGetter(C, Prop);
+
+  return Val.getValue();
+}
+
diff --git a/lib/Analysis/BodyFarm.h b/lib/Analysis/BodyFarm.h
index 96f61df..2d200fb 100644
--- a/lib/Analysis/BodyFarm.h
+++ b/lib/Analysis/BodyFarm.h
@@ -24,6 +24,8 @@
 class ASTContext;
 class Decl;
 class FunctionDecl;
+class ObjCMethodDecl;
+class ObjCPropertyDecl;
 class Stmt;
   
 class BodyFarm {
@@ -32,7 +34,10 @@
   
   /// Factory method for creating bodies for ordinary functions.
   Stmt *getBody(const FunctionDecl *D);
-  
+
+  /// Factory method for creating bodies for Objective-C properties.
+  Stmt *getBody(const ObjCMethodDecl *D);
+
 private:
   typedef llvm::DenseMap<const Decl *, Optional<Stmt *> > BodyMap;
 
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 8b8c573..400c202 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -21,7 +21,7 @@
 #include "clang/AST/StmtVisitor.h"
 #include "clang/Basic/Builtins.h"
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
+#include <memory>
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/Support/Allocator.h"
 #include "llvm/Support/Format.h"
@@ -291,7 +291,7 @@
   typedef BlockScopePosPair JumpSource;
 
   ASTContext *Context;
-  OwningPtr<CFG> cfg;
+  std::unique_ptr<CFG> cfg;
 
   CFGBlock *Block;
   CFGBlock *Succ;
@@ -363,6 +363,7 @@
                                       AddStmtChoice asc);
   CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
   CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
+  CFGBlock *VisitCXXNewExpr(CXXNewExpr *DE, AddStmtChoice asc);
   CFGBlock *VisitCXXDeleteExpr(CXXDeleteExpr *DE, AddStmtChoice asc);
   CFGBlock *VisitCXXForRangeStmt(CXXForRangeStmt *S);
   CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
@@ -459,6 +460,9 @@
   void appendInitializer(CFGBlock *B, CXXCtorInitializer *I) {
     B->appendInitializer(I, cfg->getBumpVectorContext());
   }
+  void appendNewAllocator(CFGBlock *B, CXXNewExpr *NE) {
+    B->appendNewAllocator(NE, cfg->getBumpVectorContext());
+  }
   void appendBaseDtor(CFGBlock *B, const CXXBaseSpecifier *BS) {
     B->appendBaseDtor(BS, cfg->getBumpVectorContext());
   }
@@ -479,8 +483,16 @@
   void prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
       LocalScope::const_iterator B, LocalScope::const_iterator E);
 
-  void addSuccessor(CFGBlock *B, CFGBlock *S) {
-    B->addSuccessor(S, cfg->getBumpVectorContext());
+  void addSuccessor(CFGBlock *B, CFGBlock *S, bool IsReachable = true) {
+    B->addSuccessor(CFGBlock::AdjacentBlock(S, IsReachable),
+                    cfg->getBumpVectorContext());
+  }
+
+  /// Add a reachable successor to a block, with the alternate variant that is
+  /// unreachable.
+  void addSuccessor(CFGBlock *B, CFGBlock *ReachableBlock, CFGBlock *AltBlock) {
+    B->addSuccessor(CFGBlock::AdjacentBlock(ReachableBlock, AltBlock),
+                    cfg->getBumpVectorContext());
   }
 
   /// Try and evaluate an expression to an integer constant.
@@ -712,7 +724,7 @@
   // Create an empty entry block that has no predecessors.
   cfg->setEntry(createBlock());
 
-  return cfg.take();
+  return cfg.release();
 }
 
 /// createBlock - Used to lazily create blocks that are connected
@@ -730,7 +742,7 @@
 CFGBlock *CFGBuilder::createNoReturnBlock() {
   CFGBlock *B = createBlock(false);
   B->setHasNoReturnElement();
-  addSuccessor(B, &cfg->getExit());
+  addSuccessor(B, &cfg->getExit(), Succ);
   return B;
 }
 
@@ -868,30 +880,27 @@
   const CXXRecordDecl *RD = DD->getParent();
 
   // At the end destroy virtual base objects.
-  for (CXXRecordDecl::base_class_const_iterator VI = RD->vbases_begin(),
-      VE = RD->vbases_end(); VI != VE; ++VI) {
-    const CXXRecordDecl *CD = VI->getType()->getAsCXXRecordDecl();
+  for (const auto &VI : RD->vbases()) {
+    const CXXRecordDecl *CD = VI.getType()->getAsCXXRecordDecl();
     if (!CD->hasTrivialDestructor()) {
       autoCreateBlock();
-      appendBaseDtor(Block, VI);
+      appendBaseDtor(Block, &VI);
     }
   }
 
   // Before virtual bases destroy direct base objects.
-  for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
-      BE = RD->bases_end(); BI != BE; ++BI) {
-    if (!BI->isVirtual()) {
-      const CXXRecordDecl *CD = BI->getType()->getAsCXXRecordDecl();
+  for (const auto &BI : RD->bases()) {
+    if (!BI.isVirtual()) {
+      const CXXRecordDecl *CD = BI.getType()->getAsCXXRecordDecl();
       if (!CD->hasTrivialDestructor()) {
         autoCreateBlock();
-        appendBaseDtor(Block, BI);
+        appendBaseDtor(Block, &BI);
       }
     }
   }
 
   // First destroy member objects.
-  for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
-      FE = RD->field_end(); FI != FE; ++FI) {
+  for (auto *FI : RD->fields()) {
     // Check for constant size array. Set type to array element type.
     QualType QT = FI->getType();
     if (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
@@ -903,7 +912,7 @@
     if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
       if (!CD->hasTrivialDestructor()) {
         autoCreateBlock();
-        appendMemberDtor(Block, *FI);
+        appendMemberDtor(Block, FI);
       }
   }
 }
@@ -930,9 +939,8 @@
 
   // For compound statement we will be creating explicit scope.
   if (CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
-    for (CompoundStmt::body_iterator BI = CS->body_begin(), BE = CS->body_end()
-        ; BI != BE; ++BI) {
-      Stmt *SI = (*BI)->stripLabelLikeStatements();
+    for (auto *BI : CS->body()) {
+      Stmt *SI = BI->stripLabelLikeStatements();
       if (DeclStmt *DS = dyn_cast<DeclStmt>(SI))
         Scope = addLocalScopeForDeclStmt(DS, Scope);
     }
@@ -952,11 +960,9 @@
   if (!BuildOpts.AddImplicitDtors)
     return Scope;
 
-  for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end()
-      ; DI != DE; ++DI) {
-    if (VarDecl *VD = dyn_cast<VarDecl>(*DI))
+  for (auto *DI : DS->decls())
+    if (VarDecl *VD = dyn_cast<VarDecl>(DI))
       Scope = addLocalScopeForVarDecl(VD, Scope);
-  }
   return Scope;
 }
 
@@ -1122,6 +1128,9 @@
     case Stmt::CXXConstructExprClass:
       return VisitCXXConstructExpr(cast<CXXConstructExpr>(S), asc);
 
+    case Stmt::CXXNewExprClass:
+      return VisitCXXNewExpr(cast<CXXNewExpr>(S), asc);
+
     case Stmt::CXXDeleteExprClass:
       return VisitCXXDeleteExpr(cast<CXXDeleteExpr>(S), asc);
 
@@ -1293,7 +1302,7 @@
   do {
     if (BinaryOperator *B_RHS = dyn_cast<BinaryOperator>(RHS))
       if (B_RHS->isLogicalOp()) {
-        llvm::tie(RHSBlock, ExitBlock) =
+        std::tie(RHSBlock, ExitBlock) =
           VisitLogicalOperator(B_RHS, Term, TrueBlock, FalseBlock);
         break;
       }
@@ -1311,8 +1320,8 @@
     else {
       RHSBlock->setTerminator(Term);
       TryResult KnownVal = tryEvaluateBool(RHS);
-      addSuccessor(RHSBlock, KnownVal.isFalse() ? NULL : TrueBlock);
-      addSuccessor(RHSBlock, KnownVal.isTrue() ? NULL : FalseBlock);
+      addSuccessor(RHSBlock, TrueBlock, !KnownVal.isFalse());
+      addSuccessor(RHSBlock, FalseBlock, !KnownVal.isTrue());
     }
 
     Block = RHSBlock;
@@ -1355,12 +1364,12 @@
 
   // Now link the LHSBlock with RHSBlock.
   if (B->getOpcode() == BO_LOr) {
-    addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : TrueBlock);
-    addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : RHSBlock);
+    addSuccessor(LHSBlock, TrueBlock, !KnownVal.isFalse());
+    addSuccessor(LHSBlock, RHSBlock, !KnownVal.isTrue());
   } else {
     assert(B->getOpcode() == BO_LAnd);
-    addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
-    addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : FalseBlock);
+    addSuccessor(LHSBlock, RHSBlock, !KnownVal.isFalse());
+    addSuccessor(LHSBlock, FalseBlock, !KnownVal.isTrue());
   }
 
   return std::make_pair(EntryLHSBlock, ExitBlock);
@@ -1619,8 +1628,8 @@
 
   // See if this is a known constant.
   const TryResult& KnownVal = tryEvaluateBool(C->getCond());
-  addSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
-  addSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+  addSuccessor(Block, LHSBlock, !KnownVal.isFalse());
+  addSuccessor(Block, RHSBlock, !KnownVal.isTrue());
   Block->setTerminator(C);
   Expr *condExpr = C->getCond();
 
@@ -1865,9 +1874,10 @@
   // See if this is a known constant.
   const TryResult &KnownVal = tryEvaluateBool(I->getCond());
 
-  // Now add the successors.
-  addSuccessor(Block, KnownVal.isFalse() ? NULL : ThenBlock);
-  addSuccessor(Block, KnownVal.isTrue()? NULL : ElseBlock);
+  // Add the successors.  If we know that specific branches are
+  // unreachable, inform addSuccessor() of that knowledge.
+  addSuccessor(Block, ThenBlock, /* isReachable = */ !KnownVal.isFalse());
+  addSuccessor(Block, ElseBlock, /* isReachable = */ !KnownVal.isTrue());
 
   // Add the condition as the last statement in the new block.  This may create
   // new blocks as the condition may contain control-flow.  Any newly created
@@ -2078,7 +2088,7 @@
     if (BinaryOperator *Cond =
             dyn_cast_or_null<BinaryOperator>(C ? C->IgnoreParens() : 0))
       if (Cond->isLogicalOp()) {
-        llvm::tie(EntryConditionBlock, ExitConditionBlock) =
+        std::tie(EntryConditionBlock, ExitConditionBlock) =
           VisitLogicalOperator(Cond, F, BodyBlock, LoopSuccessor);
         break;
       }
@@ -2394,9 +2404,8 @@
     // more optimal CFG representation.
     if (BinaryOperator *Cond = dyn_cast<BinaryOperator>(C->IgnoreParens()))
       if (Cond->isLogicalOp()) {
-        llvm::tie(EntryConditionBlock, ExitConditionBlock) =
-          VisitLogicalOperator(Cond, W, BodyBlock,
-                               LoopSuccessor);
+        std::tie(EntryConditionBlock, ExitConditionBlock) =
+            VisitLogicalOperator(Cond, W, BodyBlock, LoopSuccessor);
         break;
       }
 
@@ -2726,8 +2735,8 @@
   SwitchAlwaysHasSuccessor |= switchExclusivelyCovered;
   SwitchAlwaysHasSuccessor |= Terminator->isAllEnumCasesCovered() &&
                               Terminator->getSwitchCaseList();
-  addSuccessor(SwitchTerminatedBlock,
-               SwitchAlwaysHasSuccessor ? 0 : DefaultCaseBlock);
+  addSuccessor(SwitchTerminatedBlock, DefaultCaseBlock,
+               !SwitchAlwaysHasSuccessor);
 
   // Add the terminator and condition in the switch block.
   SwitchTerminatedBlock->setTerminator(Terminator);
@@ -2828,10 +2837,9 @@
   // Add this block to the list of successors for the block with the switch
   // statement.
   assert(SwitchTerminatedBlock);
-  addSuccessor(SwitchTerminatedBlock,
+  addSuccessor(SwitchTerminatedBlock, CaseBlock,
                shouldAddCase(switchExclusivelyCovered, switchCond,
-                             CS, *Context)
-               ? CaseBlock : 0);
+                             CS, *Context));
 
   // We set Block to NULL to allow lazy creation of a new block (if necessary)
   Block = NULL;
@@ -3124,6 +3132,23 @@
   return VisitChildren(C);
 }
 
+CFGBlock *CFGBuilder::VisitCXXNewExpr(CXXNewExpr *NE,
+                                      AddStmtChoice asc) {
+
+  autoCreateBlock();
+  appendStmt(Block, NE);
+
+  if (NE->getInitializer())
+    Block = Visit(NE->getInitializer());
+  if (BuildOpts.AddCXXNewAllocator)
+    appendNewAllocator(Block, NE);
+  if (NE->isArray())
+    Block = Visit(NE->getArraySize());
+  for (CXXNewExpr::arg_iterator I = NE->placement_arg_begin(),
+       E = NE->placement_arg_end(); I != E; ++I)
+    Block = Visit(*I);
+  return Block;
+}
 
 CFGBlock *CFGBuilder::VisitCXXDeleteExpr(CXXDeleteExpr *DE,
                                          AddStmtChoice asc) {
@@ -3320,10 +3345,12 @@
     // a new block for the destructor which does not have as a successor
     // anything built thus far. Control won't flow out of this block.
     const CXXDestructorDecl *Dtor = E->getTemporary()->getDestructor();
-    if (Dtor->isNoReturn())
+    if (Dtor->isNoReturn()) {
+      Succ = B;
       Block = createNoReturnBlock();
-    else
+    } else {
       autoCreateBlock();
+    }
 
     appendTemporaryDtor(Block, E);
     B = Block;
@@ -3372,12 +3399,13 @@
 
   Block = createBlock(false);
   Block->setTerminator(CFGTerminator(E, true));
+  assert(Block->getTerminator().isTemporaryDtorsBranch());
 
   // See if this is a known constant.
   const TryResult &KnownVal = tryEvaluateBool(E->getCond());
 
   if (LHSBlock) {
-    addSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+    addSuccessor(Block, LHSBlock, !KnownVal.isFalse());
   } else if (KnownVal.isFalse()) {
     addSuccessor(Block, NULL);
   } else {
@@ -3387,7 +3415,8 @@
 
   if (!RHSBlock)
     RHSBlock = ConfluenceBlock;
-  addSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+
+  addSuccessor(Block, RHSBlock, !KnownVal.isTrue());
 
   return Block;
 }
@@ -3426,6 +3455,7 @@
   switch (getKind()) {
     case CFGElement::Statement:
     case CFGElement::Initializer:
+    case CFGElement::NewAllocator:
       llvm_unreachable("getDestructorDecl should only be used with "
                        "ImplicitDtors");
     case CFGElement::AutomaticObjectDtor: {
@@ -3470,13 +3500,37 @@
 }
 
 //===----------------------------------------------------------------------===//
-// Filtered walking of the CFG.
+// CFGBlock operations.
 //===----------------------------------------------------------------------===//
 
+CFGBlock::AdjacentBlock::AdjacentBlock(CFGBlock *B, bool IsReachable)
+  : ReachableBlock(IsReachable ? B : 0),
+    UnreachableBlock(!IsReachable ? B : 0,
+                     B && IsReachable ? AB_Normal : AB_Unreachable) {}
+
+CFGBlock::AdjacentBlock::AdjacentBlock(CFGBlock *B, CFGBlock *AlternateBlock)
+  : ReachableBlock(B),
+    UnreachableBlock(B == AlternateBlock ? 0 : AlternateBlock,
+                     B == AlternateBlock ? AB_Alternate : AB_Normal) {}
+
+void CFGBlock::addSuccessor(AdjacentBlock Succ,
+                            BumpVectorContext &C) {
+  if (CFGBlock *B = Succ.getReachableBlock())
+    B->Preds.push_back(AdjacentBlock(this, Succ.isReachable()), C);
+
+  if (CFGBlock *UnreachableB = Succ.getPossiblyUnreachableBlock())
+    UnreachableB->Preds.push_back(AdjacentBlock(this, false), C);
+
+  Succs.push_back(Succ, C);
+}
+
 bool CFGBlock::FilterEdge(const CFGBlock::FilterOptions &F,
         const CFGBlock *From, const CFGBlock *To) {
 
-  if (To && F.IgnoreDefaultsWithCoveredEnums) {
+  if (F.IgnoreNullPredecessors && !From)
+    return true;
+
+  if (To && From && F.IgnoreDefaultsWithCoveredEnums) {
     // If the 'To' has no label or is labeled but the label isn't a
     // CaseStmt then filter this edge.
     if (const SwitchStmt *S =
@@ -3572,7 +3626,7 @@
   void setBlockID(signed i) { currentBlock = i; }
   void setStmtID(unsigned i) { currStmt = i; }
 
-  virtual bool handledStmt(Stmt *S, raw_ostream &OS) {
+  bool handledStmt(Stmt *S, raw_ostream &OS) override {
     StmtMapTy::iterator I = StmtMap.find(S);
 
     if (I == StmtMap.end())
@@ -3615,7 +3669,9 @@
 public:
   CFGBlockTerminatorPrint(raw_ostream &os, StmtPrinterHelper* helper,
                           const PrintingPolicy &Policy)
-    : OS(os), Helper(helper), Policy(Policy) {}
+    : OS(os), Helper(helper), Policy(Policy) {
+    this->Policy.IncludeNewlines = false;
+  }
 
   void VisitIfStmt(IfStmt *I) {
     OS << "if ";
@@ -3705,6 +3761,13 @@
   void VisitExpr(Expr *E) {
     E->printPretty(OS, Helper, Policy);
   }
+
+public:
+  void print(CFGTerminator T) {
+    if (T.isTemporaryDtorsBranch())
+      OS << "(Temp Dtor) ";
+    Visit(T.getStmt());
+  }
 };
 } // end anonymous namespace
 
@@ -3787,6 +3850,11 @@
     OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
     OS << " (Implicit destructor)\n";
 
+  } else if (Optional<CFGNewAllocator> NE = E.getAs<CFGNewAllocator>()) {
+    OS << "CFGNewAllocator(";
+    if (const CXXNewExpr *AllocExpr = NE->getAllocatorExpr())
+      AllocExpr->getType().print(OS, PrintingPolicy(Helper.getLangOpts()));
+    OS << ")\n";
   } else if (Optional<CFGDeleteDtor> DE = E.getAs<CFGDeleteDtor>()) {
     const CXXRecordDecl *RD = DE->getCXXRecordDecl();
     if (!RD)
@@ -3835,6 +3903,8 @@
     OS << " (EXIT)]\n";
   else if (&B == cfg->getIndirectGotoBlock())
     OS << " (INDIRECT GOTO DISPATCH)]\n";
+  else if (B.hasNoReturnElement())
+    OS << " (NORETURN)]\n";
   else
     OS << "]\n";
   
@@ -3903,7 +3973,7 @@
 
     PrintingPolicy PP(Helper.getLangOpts());
     CFGBlockTerminatorPrint TPrinter(OS, &Helper, PP);
-    TPrinter.Visit(const_cast<Stmt*>(B.getTerminator().getStmt()));
+    TPrinter.print(B.getTerminator());
     OS << '\n';
     
     if (ShowColors)
@@ -3931,7 +4001,16 @@
         if (i % 10 == 8)
           OS << "\n     ";
 
-        OS << " B" << (*I)->getBlockID();
+        CFGBlock *B = *I;
+        bool Reachable = true;
+        if (!B) {
+          Reachable = false;
+          B = I->getPossiblyUnreachableBlock();
+        }
+
+        OS << " B" << B->getBlockID();
+        if (!Reachable)
+          OS << "(Unreachable)";
       }
       
       if (ShowColors)
@@ -3960,12 +4039,24 @@
         if (i % 10 == 8)
           OS << "\n    ";
 
-        if (*I)
-          OS << " B" << (*I)->getBlockID();
-        else
-          OS  << " NULL";
+        CFGBlock *B = *I;
+
+        bool Reachable = true;
+        if (!B) {
+          Reachable = false;
+          B = I->getPossiblyUnreachableBlock();
+        }
+
+        if (B) {
+          OS << " B" << B->getBlockID();
+          if (!Reachable)
+            OS << "(Unreachable)";
+        }
+        else {
+          OS << " NULL";
+        }
       }
-      
+
       if (ShowColors)
         OS.resetColor();
       OS << '\n';
@@ -4020,10 +4111,10 @@
 void CFGBlock::printTerminator(raw_ostream &OS,
                                const LangOptions &LO) const {
   CFGBlockTerminatorPrint TPrinter(OS, NULL, PrintingPolicy(LO));
-  TPrinter.Visit(const_cast<Stmt*>(getTerminator().getStmt()));
+  TPrinter.print(getTerminator());
 }
 
-Stmt *CFGBlock::getTerminatorCondition() {
+Stmt *CFGBlock::getTerminatorCondition(bool StripParens) {
   Stmt *Terminator = this->Terminator;
   if (!Terminator)
     return NULL;
@@ -4082,6 +4173,9 @@
       return Terminator;
   }
 
+  if (!StripParens)
+    return E;
+
   return E ? E->IgnoreParens() : NULL;
 }
 
diff --git a/lib/Analysis/CFGReachabilityAnalysis.cpp b/lib/Analysis/CFGReachabilityAnalysis.cpp
index 492e66f..4ae135f 100644
--- a/lib/Analysis/CFGReachabilityAnalysis.cpp
+++ b/lib/Analysis/CFGReachabilityAnalysis.cpp
@@ -69,7 +69,8 @@
     // Add the predecessors to the worklist.
     for (CFGBlock::const_pred_iterator i = block->pred_begin(), 
          e = block->pred_end(); i != e; ++i) {
-      worklist.push_back(*i);
+      if (*i)
+        worklist.push_back(*i);
     }
   }
 }
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index deab8f1..9630bc0 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -1,3 +1,8 @@
+set(LLVM_LINK_COMPONENTS
+  MC
+  Support
+  )
+
 add_clang_library(clangAnalysis
   AnalysisDeclContext.cpp
   BodyFarm.cpp
@@ -19,19 +24,8 @@
   ScanfFormatString.cpp
   ThreadSafety.cpp
   UninitializedValues.cpp
-  )
 
-add_dependencies(clangAnalysis
-  ClangAttrClasses
-  ClangAttrList
-  ClangCommentNodes
-  ClangDiagnosticCommon
-  ClangDeclNodes
-  ClangDiagnosticAnalysis
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangAnalysis
+  LINK_LIBS
   clangBasic
   clangAST
   )
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
index 3387015..649ba57 100644
--- a/lib/Analysis/CallGraph.cpp
+++ b/lib/Analysis/CallGraph.cpp
@@ -95,9 +95,8 @@
   if (BlockDecl *BD = dyn_cast<BlockDecl>(D))
     addNodeForDecl(BD, true);
 
-  for (DeclContext::decl_iterator I = D->decls_begin(), E = D->decls_end();
-       I!=E; ++I)
-    if (DeclContext *DC = dyn_cast<DeclContext>(*I))
+  for (auto *I : D->decls())
+    if (auto *DC = dyn_cast<DeclContext>(I))
       addNodesForBlocks(DC);
 }
 
@@ -106,12 +105,7 @@
 }
 
 CallGraph::~CallGraph() {
-  if (!FunctionMap.empty()) {
-    for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
-        I != E; ++I)
-      delete I->second;
-    FunctionMap.clear();
-  }
+  llvm::DeleteContainerSeconds(FunctionMap);
 }
 
 bool CallGraph::includeInGraph(const Decl *D) {
diff --git a/lib/Analysis/Consumed.cpp b/lib/Analysis/Consumed.cpp
index b33c8d8..7b6ad57 100644
--- a/lib/Analysis/Consumed.cpp
+++ b/lib/Analysis/Consumed.cpp
@@ -17,20 +17,20 @@
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/ExprCXX.h"
 #include "clang/AST/RecursiveASTVisitor.h"
-#include "clang/AST/StmtVisitor.h"
 #include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtVisitor.h"
 #include "clang/AST/Type.h"
+#include "clang/Analysis/Analyses/Consumed.h"
 #include "clang/Analysis/Analyses/PostOrderCFGView.h"
 #include "clang/Analysis/AnalysisContext.h"
 #include "clang/Analysis/CFG.h"
-#include "clang/Analysis/Analyses/Consumed.h"
 #include "clang/Basic/OperatorKinds.h"
 #include "clang/Basic/SourceLocation.h"
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/raw_ostream.h"
+#include <memory>
 
 // TODO: Adjust states of args to constructors in the same way that arguments to
 //       function calls are handled.
@@ -143,6 +143,7 @@
   return false;
 }
 
+
 static bool isConsumableType(const QualType &QT) {
   if (QT->isPointerType() || QT->isReferenceType())
     return false;
@@ -153,6 +154,23 @@
   return false;
 }
 
+static bool isAutoCastType(const QualType &QT) {
+  if (QT->isPointerType() || QT->isReferenceType())
+    return false;
+
+  if (const CXXRecordDecl *RD = QT->getAsCXXRecordDecl())
+    return RD->hasAttr<ConsumableAutoCastAttr>();
+
+  return false;
+}
+
+static bool isSetOnReadPtrType(const QualType &QT) {
+  if (const CXXRecordDecl *RD = QT->getPointeeCXXRecordDecl())
+    return RD->hasAttr<ConsumableSetOnReadAttr>();
+  return false;
+}
+
+
 static bool isKnownState(ConsumedState State) {
   switch (State) {
   case CS_Unconsumed:
@@ -165,19 +183,16 @@
   llvm_unreachable("invalid enum");
 }
 
-static bool isRValueRefish(QualType ParamType) {
-  return ParamType->isRValueReferenceType() ||
-        (ParamType->isLValueReferenceType() &&
-         !cast<LValueReferenceType>(
-           ParamType.getCanonicalType())->isSpelledAsLValue());
+static bool isRValueRef(QualType ParamType) {
+  return ParamType->isRValueReferenceType();
 }
 
 static bool isTestingFunction(const FunctionDecl *FunDecl) {
   return FunDecl->hasAttr<TestTypestateAttr>();
 }
 
-static bool isValueType(QualType ParamType) {
-  return !(ParamType->isPointerType() || ParamType->isReferenceType());
+static bool isPointerOrRef(QualType ParamType) {
+  return ParamType->isPointerType() || ParamType->isReferenceType();
 }
 
 static ConsumedState mapConsumableAttrState(const QualType QT) {
@@ -455,15 +470,29 @@
   ConsumedAnalyzer &Analyzer;
   ConsumedStateMap *StateMap;
   MapType PropagationMap;
-  void forwardInfo(const Stmt *From, const Stmt *To);
-  bool isLikeMoveAssignment(const CXXMethodDecl *MethodDecl);
-  void propagateReturnType(const Stmt *Call, const FunctionDecl *Fun,
-                           QualType ReturnType);
+
+  InfoEntry findInfo(const Expr *E) {
+    return PropagationMap.find(E->IgnoreParens());
+  }
+  ConstInfoEntry findInfo(const Expr *E) const {
+    return PropagationMap.find(E->IgnoreParens());
+  }
+  void insertInfo(const Expr *E, const PropagationInfo &PI) {
+    PropagationMap.insert(PairType(E->IgnoreParens(), PI));
+  }
+
+  void forwardInfo(const Expr *From, const Expr *To);
+  void copyInfo(const Expr *From, const Expr *To, ConsumedState CS);
+  ConsumedState getInfo(const Expr *From);
+  void setInfo(const Expr *To, ConsumedState NS);
+  void propagateReturnType(const Expr *Call, const FunctionDecl *Fun);
 
 public:
   void checkCallability(const PropagationInfo &PInfo,
                         const FunctionDecl *FunDecl,
                         SourceLocation BlameLoc);
+  bool handleCall(const CallExpr *Call, const Expr *ObjArg,
+                  const FunctionDecl *FunD);
   
   void VisitBinaryOperator(const BinaryOperator *BinOp);
   void VisitCallExpr(const CallExpr *Call);
@@ -485,8 +514,8 @@
                       ConsumedStateMap *StateMap)
       : AC(AC), Analyzer(Analyzer), StateMap(StateMap) {}
   
-  PropagationInfo getInfo(const Stmt *StmtNode) const {
-    ConstInfoEntry Entry = PropagationMap.find(StmtNode);
+  PropagationInfo getInfo(const Expr *StmtNode) const {
+    ConstInfoEntry Entry = findInfo(StmtNode);
     
     if (Entry != PropagationMap.end())
       return Entry->second;
@@ -499,76 +528,187 @@
   }
 };
 
+
+void ConsumedStmtVisitor::forwardInfo(const Expr *From, const Expr *To) {
+  InfoEntry Entry = findInfo(From);
+  if (Entry != PropagationMap.end())
+    insertInfo(To, Entry->second);
+}
+
+
+// Create a new state for To, which is initialized to the state of From.
+// If NS is not CS_None, sets the state of From to NS.
+void ConsumedStmtVisitor::copyInfo(const Expr *From, const Expr *To,
+                                   ConsumedState NS) {
+  InfoEntry Entry = findInfo(From);
+  if (Entry != PropagationMap.end()) {
+    PropagationInfo& PInfo = Entry->second;
+    ConsumedState CS = PInfo.getAsState(StateMap);
+    if (CS != CS_None)
+      insertInfo(To, PropagationInfo(CS));
+    if (NS != CS_None && PInfo.isPointerToValue())
+      setStateForVarOrTmp(StateMap, PInfo, NS);
+  }
+}
+
+
+// Get the ConsumedState for From
+ConsumedState ConsumedStmtVisitor::getInfo(const Expr *From) {
+  InfoEntry Entry = findInfo(From);
+  if (Entry != PropagationMap.end()) {
+    PropagationInfo& PInfo = Entry->second;
+    return PInfo.getAsState(StateMap);
+  }
+  return CS_None;
+}
+
+
+// If we already have info for To then update it, otherwise create a new entry.
+void ConsumedStmtVisitor::setInfo(const Expr *To, ConsumedState NS) {
+  InfoEntry Entry = findInfo(To);
+  if (Entry != PropagationMap.end()) {
+    PropagationInfo& PInfo = Entry->second;
+    if (PInfo.isPointerToValue())
+      setStateForVarOrTmp(StateMap, PInfo, NS);
+  } else if (NS != CS_None) {
+     insertInfo(To, PropagationInfo(NS));
+  }
+}
+
+
+
 void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
                                            const FunctionDecl *FunDecl,
                                            SourceLocation BlameLoc) {
   assert(!PInfo.isTest());
-  
-  if (!FunDecl->hasAttr<CallableWhenAttr>())
-    return;
-  
+
   const CallableWhenAttr *CWAttr = FunDecl->getAttr<CallableWhenAttr>();
-  
+  if (!CWAttr)
+    return;
+
   if (PInfo.isVar()) {
     ConsumedState VarState = StateMap->getState(PInfo.getVar());
-    
+
     if (VarState == CS_None || isCallableInState(CWAttr, VarState))
       return;
-    
+
     Analyzer.WarningsHandler.warnUseInInvalidState(
       FunDecl->getNameAsString(), PInfo.getVar()->getNameAsString(),
       stateToString(VarState), BlameLoc);
-    
+
   } else {
     ConsumedState TmpState = PInfo.getAsState(StateMap);
-    
+
     if (TmpState == CS_None || isCallableInState(CWAttr, TmpState))
       return;
-    
+
     Analyzer.WarningsHandler.warnUseOfTempInInvalidState(
       FunDecl->getNameAsString(), stateToString(TmpState), BlameLoc);
   }
 }
 
-void ConsumedStmtVisitor::forwardInfo(const Stmt *From, const Stmt *To) {
-  InfoEntry Entry = PropagationMap.find(From);
-  
-  if (Entry != PropagationMap.end())
-    PropagationMap.insert(PairType(To, Entry->second));
+
+// Factors out common behavior for function, method, and operator calls.
+// Check parameters and set parameter state if necessary.
+// Returns true if the state of ObjArg is set, or false otherwise.
+bool ConsumedStmtVisitor::handleCall(const CallExpr *Call, const Expr *ObjArg,
+                                     const FunctionDecl *FunD) {
+  unsigned Offset = 0;
+  if (isa<CXXOperatorCallExpr>(Call) && isa<CXXMethodDecl>(FunD))
+    Offset = 1;  // first argument is 'this'
+
+  // check explicit parameters
+  for (unsigned Index = Offset; Index < Call->getNumArgs(); ++Index) {
+    // Skip variable argument lists.
+    if (Index - Offset >= FunD->getNumParams())
+      break;
+
+    const ParmVarDecl *Param = FunD->getParamDecl(Index - Offset);
+    QualType ParamType = Param->getType();
+
+    InfoEntry Entry = findInfo(Call->getArg(Index));
+
+    if (Entry == PropagationMap.end() || Entry->second.isTest())
+      continue;
+    PropagationInfo PInfo = Entry->second;
+
+    // Check that the parameter is in the correct state.
+    if (ParamTypestateAttr *PTA = Param->getAttr<ParamTypestateAttr>()) {
+      ConsumedState ParamState = PInfo.getAsState(StateMap);
+      ConsumedState ExpectedState = mapParamTypestateAttrState(PTA);
+
+      if (ParamState != ExpectedState)
+        Analyzer.WarningsHandler.warnParamTypestateMismatch(
+          Call->getArg(Index)->getExprLoc(),
+          stateToString(ExpectedState), stateToString(ParamState));
+    }
+
+    if (!(Entry->second.isVar() || Entry->second.isTmp()))
+      continue;
+
+    // Adjust state on the caller side.
+    if (isRValueRef(ParamType))
+      setStateForVarOrTmp(StateMap, PInfo, consumed::CS_Consumed);
+    else if (ReturnTypestateAttr *RT = Param->getAttr<ReturnTypestateAttr>())
+      setStateForVarOrTmp(StateMap, PInfo, mapReturnTypestateAttrState(RT));
+    else if (isPointerOrRef(ParamType) &&
+             (!ParamType->getPointeeType().isConstQualified() ||
+              isSetOnReadPtrType(ParamType)))
+      setStateForVarOrTmp(StateMap, PInfo, consumed::CS_Unknown);
+  }
+
+  if (!ObjArg)
+    return false;
+
+  // check implicit 'self' parameter, if present
+  InfoEntry Entry = findInfo(ObjArg);
+  if (Entry != PropagationMap.end()) {
+    PropagationInfo PInfo = Entry->second;
+    checkCallability(PInfo, FunD, Call->getExprLoc());
+
+    if (SetTypestateAttr *STA = FunD->getAttr<SetTypestateAttr>()) {
+      if (PInfo.isVar()) {
+        StateMap->setState(PInfo.getVar(), mapSetTypestateAttrState(STA));
+        return true;
+      }
+      else if (PInfo.isTmp()) {
+        StateMap->setState(PInfo.getTmp(), mapSetTypestateAttrState(STA));
+        return true;
+      }
+    }
+    else if (isTestingFunction(FunD) && PInfo.isVar()) {
+      PropagationMap.insert(PairType(Call,
+        PropagationInfo(PInfo.getVar(), testsFor(FunD))));
+    }
+  }
+  return false;
 }
 
-bool ConsumedStmtVisitor::isLikeMoveAssignment(
-  const CXXMethodDecl *MethodDecl) {
-  
-  return MethodDecl->isMoveAssignmentOperator() ||
-         (MethodDecl->getOverloadedOperator() == OO_Equal &&
-          MethodDecl->getNumParams() == 1 &&
-          MethodDecl->getParamDecl(0)->getType()->isRValueReferenceType());
-}
 
-void ConsumedStmtVisitor::propagateReturnType(const Stmt *Call,
-                                              const FunctionDecl *Fun,
-                                              QualType ReturnType) {
-  if (isConsumableType(ReturnType)) {
-    
+void ConsumedStmtVisitor::propagateReturnType(const Expr *Call,
+                                              const FunctionDecl *Fun) {
+  QualType RetType = Fun->getCallResultType();
+  if (RetType->isReferenceType())
+    RetType = RetType->getPointeeType();
+
+  if (isConsumableType(RetType)) {
     ConsumedState ReturnState;
-    
-    if (Fun->hasAttr<ReturnTypestateAttr>())
-      ReturnState = mapReturnTypestateAttrState(
-        Fun->getAttr<ReturnTypestateAttr>());
+    if (ReturnTypestateAttr *RTA = Fun->getAttr<ReturnTypestateAttr>())
+      ReturnState = mapReturnTypestateAttrState(RTA);
     else
-      ReturnState = mapConsumableAttrState(ReturnType);
+      ReturnState = mapConsumableAttrState(RetType);
     
     PropagationMap.insert(PairType(Call, PropagationInfo(ReturnState)));
   }
 }
 
+
 void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
   switch (BinOp->getOpcode()) {
   case BO_LAnd:
   case BO_LOr : {
-    InfoEntry LEntry = PropagationMap.find(BinOp->getLHS()),
-              REntry = PropagationMap.find(BinOp->getRHS());
+    InfoEntry LEntry = findInfo(BinOp->getLHS()),
+              REntry = findInfo(BinOp->getRHS());
     
     VarTestResult LTest, RTest;
     
@@ -605,69 +745,32 @@
   }
 }
 
+static bool isStdNamespace(const DeclContext *DC) {
+  if (!DC->isNamespace()) return false;
+  while (DC->getParent()->isNamespace())
+    DC = DC->getParent();
+  const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+
+  return ND && ND->getName() == "std" &&
+         ND->getDeclContext()->isTranslationUnit();
+}
+
 void ConsumedStmtVisitor::VisitCallExpr(const CallExpr *Call) {
-  if (const FunctionDecl *FunDecl =
-    dyn_cast_or_null<FunctionDecl>(Call->getDirectCallee())) {
-    
-    // Special case for the std::move function.
-    // TODO: Make this more specific. (Deferred)
-    if (FunDecl->getNameAsString() == "move") {
-      forwardInfo(Call->getArg(0), Call);
-      return;
-    }
-    
-    unsigned Offset = Call->getNumArgs() - FunDecl->getNumParams();
-    
-    for (unsigned Index = Offset; Index < Call->getNumArgs(); ++Index) {
-      const ParmVarDecl *Param = FunDecl->getParamDecl(Index - Offset);
-      QualType ParamType = Param->getType();
-      
-      InfoEntry Entry = PropagationMap.find(Call->getArg(Index));
-      
-      if (Entry == PropagationMap.end() || Entry->second.isTest())
-        continue;
-      
-      PropagationInfo PInfo = Entry->second;
-      
-      // Check that the parameter is in the correct state.
-      
-      if (Param->hasAttr<ParamTypestateAttr>()) {
-        ConsumedState ParamState = PInfo.getAsState(StateMap);
-        
-        ConsumedState ExpectedState =
-          mapParamTypestateAttrState(Param->getAttr<ParamTypestateAttr>());
-        
-        if (ParamState != ExpectedState)
-          Analyzer.WarningsHandler.warnParamTypestateMismatch(
-            Call->getArg(Index - Offset)->getExprLoc(),
-            stateToString(ExpectedState), stateToString(ParamState));
-      }
-      
-      if (!(Entry->second.isVar() || Entry->second.isTmp()))
-        continue;
-      
-      // Adjust state on the caller side.
-      
-      if (isRValueRefish(ParamType)) {
-        setStateForVarOrTmp(StateMap, PInfo, consumed::CS_Consumed);
-        
-      } else if (Param->hasAttr<ReturnTypestateAttr>()) {
-        setStateForVarOrTmp(StateMap, PInfo,
-          mapReturnTypestateAttrState(Param->getAttr<ReturnTypestateAttr>()));
-        
-      } else if (!isValueType(ParamType) &&
-                 !ParamType->getPointeeType().isConstQualified()) {
-        
-        setStateForVarOrTmp(StateMap, PInfo, consumed::CS_Unknown);
-      }
-    }
-    
-    QualType RetType = FunDecl->getCallResultType();
-    if (RetType->isReferenceType())
-      RetType = RetType->getPointeeType();
-    
-    propagateReturnType(Call, FunDecl, RetType);
+  const FunctionDecl *FunDecl = Call->getDirectCallee();
+  if (!FunDecl)
+    return;
+
+  // Special case for the std::move function.
+  // TODO: Make this more specific. (Deferred)
+  if (Call->getNumArgs() == 1 &&
+      FunDecl->getNameAsString() == "move" &&
+      isStdNamespace(FunDecl->getDeclContext())) {
+    copyInfo(Call->getArg(0), Call, CS_Consumed);
+    return;
   }
+
+  handleCall(Call, 0, FunDecl);
+  propagateReturnType(Call, FunDecl);
 }
 
 void ConsumedStmtVisitor::VisitCastExpr(const CastExpr *Cast) {
@@ -677,7 +780,7 @@
 void ConsumedStmtVisitor::VisitCXXBindTemporaryExpr(
   const CXXBindTemporaryExpr *Temp) {
   
-  InfoEntry Entry = PropagationMap.find(Temp->getSubExpr());
+  InfoEntry Entry = findInfo(Temp->getSubExpr());
   
   if (Entry != PropagationMap.end() && !Entry->second.isTest()) {
     StateMap->setState(Temp, Entry->second.getAsState(StateMap));
@@ -695,168 +798,60 @@
     return;
   
   // FIXME: What should happen if someone annotates the move constructor?
-  if (Constructor->hasAttr<ReturnTypestateAttr>()) {
-    // TODO: Adjust state of args appropriately.
-    
-    ReturnTypestateAttr *RTAttr = Constructor->getAttr<ReturnTypestateAttr>();
-    ConsumedState RetState = mapReturnTypestateAttrState(RTAttr);
+  if (ReturnTypestateAttr *RTA = Constructor->getAttr<ReturnTypestateAttr>()) {
+    // TODO: Adjust state of args appropriately.    
+    ConsumedState RetState = mapReturnTypestateAttrState(RTA);
     PropagationMap.insert(PairType(Call, PropagationInfo(RetState)));
-    
   } else if (Constructor->isDefaultConstructor()) {
-    
     PropagationMap.insert(PairType(Call,
       PropagationInfo(consumed::CS_Consumed)));
-    
   } else if (Constructor->isMoveConstructor()) {
-    
-    InfoEntry Entry = PropagationMap.find(Call->getArg(0));
-    
-    if (Entry != PropagationMap.end()) {
-      PropagationInfo PInfo = Entry->second;
-      
-      if (PInfo.isVar()) {
-        const VarDecl* Var = PInfo.getVar();
-        
-        PropagationMap.insert(PairType(Call,
-          PropagationInfo(StateMap->getState(Var))));
-        
-        StateMap->setState(Var, consumed::CS_Consumed);
-        
-      } else if (PInfo.isTmp()) {
-        const CXXBindTemporaryExpr *Tmp = PInfo.getTmp();
-        
-        PropagationMap.insert(PairType(Call,
-          PropagationInfo(StateMap->getState(Tmp))));
-        
-        StateMap->setState(Tmp, consumed::CS_Consumed);
-        
-      } else {
-        PropagationMap.insert(PairType(Call, PInfo));
-      }
-    }
+    copyInfo(Call->getArg(0), Call, CS_Consumed);
   } else if (Constructor->isCopyConstructor()) {
-    forwardInfo(Call->getArg(0), Call);
-    
+    // Copy state from arg.  If setStateOnRead then set arg to CS_Unknown.
+    ConsumedState NS =
+      isSetOnReadPtrType(Constructor->getThisType(CurrContext)) ?
+      CS_Unknown : CS_None;
+    copyInfo(Call->getArg(0), Call, NS);
   } else {
     // TODO: Adjust state of args appropriately.
-    
     ConsumedState RetState = mapConsumableAttrState(ThisType);
     PropagationMap.insert(PairType(Call, PropagationInfo(RetState)));
   }
 }
 
+
 void ConsumedStmtVisitor::VisitCXXMemberCallExpr(
-  const CXXMemberCallExpr *Call) {
-  
-  VisitCallExpr(Call);
-  
-  InfoEntry Entry = PropagationMap.find(Call->getCallee()->IgnoreParens());
-  
-  if (Entry != PropagationMap.end()) {
-    PropagationInfo PInfo = Entry->second;
-    const CXXMethodDecl *MethodDecl = Call->getMethodDecl();
-    
-    checkCallability(PInfo, MethodDecl, Call->getExprLoc());
-    
-    if (PInfo.isVar()) {
-      if (isTestingFunction(MethodDecl))
-        PropagationMap.insert(PairType(Call,
-          PropagationInfo(PInfo.getVar(), testsFor(MethodDecl))));
-      else if (MethodDecl->hasAttr<SetTypestateAttr>())
-        StateMap->setState(PInfo.getVar(),
-          mapSetTypestateAttrState(MethodDecl->getAttr<SetTypestateAttr>()));
-    } else if (PInfo.isTmp() && MethodDecl->hasAttr<SetTypestateAttr>()) {
-      StateMap->setState(PInfo.getTmp(),
-        mapSetTypestateAttrState(MethodDecl->getAttr<SetTypestateAttr>()));
-    }
-  }
+    const CXXMemberCallExpr *Call) {
+  CXXMethodDecl* MD = Call->getMethodDecl();
+  if (!MD)
+    return;
+
+  handleCall(Call, Call->getImplicitObjectArgument(), MD);
+  propagateReturnType(Call, MD);
 }
 
+
 void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
-  const CXXOperatorCallExpr *Call) {
-  
+    const CXXOperatorCallExpr *Call) {
+
   const FunctionDecl *FunDecl =
     dyn_cast_or_null<FunctionDecl>(Call->getDirectCallee());
-  
   if (!FunDecl) return;
-    
-  if (isa<CXXMethodDecl>(FunDecl) &&
-      isLikeMoveAssignment(cast<CXXMethodDecl>(FunDecl))) {
-    
-    InfoEntry LEntry = PropagationMap.find(Call->getArg(0));
-    InfoEntry REntry = PropagationMap.find(Call->getArg(1));
-    
-    PropagationInfo LPInfo, RPInfo;
-    
-    if (LEntry != PropagationMap.end() &&
-        REntry != PropagationMap.end()) {
-      
-      LPInfo = LEntry->second;
-      RPInfo = REntry->second;
-      
-      if (LPInfo.isPointerToValue() && RPInfo.isPointerToValue()) {
-        setStateForVarOrTmp(StateMap, LPInfo, RPInfo.getAsState(StateMap));
-        PropagationMap.insert(PairType(Call, LPInfo));
-        setStateForVarOrTmp(StateMap, RPInfo, consumed::CS_Consumed);
-        
-      } else if (RPInfo.isState()) {
-        setStateForVarOrTmp(StateMap, LPInfo, RPInfo.getState());
-        PropagationMap.insert(PairType(Call, LPInfo));
-        
-      } else {
-        setStateForVarOrTmp(StateMap, RPInfo, consumed::CS_Consumed);
-      }
-      
-    } else if (LEntry != PropagationMap.end() &&
-               REntry == PropagationMap.end()) {
-      
-      LPInfo = LEntry->second;
-      
-      assert(!LPInfo.isTest());
-      
-      if (LPInfo.isPointerToValue()) {
-        setStateForVarOrTmp(StateMap, LPInfo, consumed::CS_Unknown);
-        PropagationMap.insert(PairType(Call, LPInfo));
-        
-      } else {
-        PropagationMap.insert(PairType(Call,
-          PropagationInfo(consumed::CS_Unknown)));
-      }
-      
-    } else if (LEntry == PropagationMap.end() &&
-               REntry != PropagationMap.end()) {
-      
-      RPInfo = REntry->second;
-      
-      if (RPInfo.isPointerToValue())
-        setStateForVarOrTmp(StateMap, RPInfo, consumed::CS_Consumed);
-    }
-    
-  } else {
-    
-    VisitCallExpr(Call);
-    
-    InfoEntry Entry = PropagationMap.find(Call->getArg(0));
-    
-    if (Entry != PropagationMap.end()) {
-      PropagationInfo PInfo = Entry->second;
-      
-      checkCallability(PInfo, FunDecl, Call->getExprLoc());
-      
-      if (PInfo.isVar()) {
-        if (isTestingFunction(FunDecl))
-          PropagationMap.insert(PairType(Call,
-            PropagationInfo(PInfo.getVar(), testsFor(FunDecl))));
-        else if (FunDecl->hasAttr<SetTypestateAttr>())
-          StateMap->setState(PInfo.getVar(),
-            mapSetTypestateAttrState(FunDecl->getAttr<SetTypestateAttr>()));
-        
-      } else if (PInfo.isTmp() && FunDecl->hasAttr<SetTypestateAttr>()) {
-        StateMap->setState(PInfo.getTmp(),
-          mapSetTypestateAttrState(FunDecl->getAttr<SetTypestateAttr>()));
-    }
-    }
+
+  if (Call->getOperator() == OO_Equal) {
+    ConsumedState CS = getInfo(Call->getArg(1));
+    if (!handleCall(Call, Call->getArg(0), FunDecl))
+      setInfo(Call->getArg(0), CS);
+    return;
   }
+
+  if (const CXXMemberCallExpr *MCall = dyn_cast<CXXMemberCallExpr>(Call))
+    handleCall(MCall, MCall->getImplicitObjectArgument(), FunDecl);
+  else
+    handleCall(Call, Call->getArg(0), FunDecl);
+
+  propagateReturnType(Call, FunDecl);
 }
 
 void ConsumedStmtVisitor::VisitDeclRefExpr(const DeclRefExpr *DeclRef) {
@@ -866,11 +861,9 @@
 }
 
 void ConsumedStmtVisitor::VisitDeclStmt(const DeclStmt *DeclS) {
-  for (DeclStmt::const_decl_iterator DI = DeclS->decl_begin(),
-       DE = DeclS->decl_end(); DI != DE; ++DI) {
-    
-    if (isa<VarDecl>(*DI)) VisitVarDecl(cast<VarDecl>(*DI));
-  }
+  for (const auto *DI : DeclS->decls())    
+    if (isa<VarDecl>(DI))
+      VisitVarDecl(cast<VarDecl>(DI));
   
   if (DeclS->isSingleDecl())
     if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl()))
@@ -892,22 +885,16 @@
   QualType ParamType = Param->getType();
   ConsumedState ParamState = consumed::CS_None;
   
-  if (Param->hasAttr<ParamTypestateAttr>()) {
-    const ParamTypestateAttr *PTAttr = Param->getAttr<ParamTypestateAttr>();
-    ParamState = mapParamTypestateAttrState(PTAttr);
-    
-  } else if (isConsumableType(ParamType)) {
-    ParamState = mapConsumableAttrState(ParamType);
-    
-  } else if (isRValueRefish(ParamType) &&
-             isConsumableType(ParamType->getPointeeType())) {
-    
-    ParamState = mapConsumableAttrState(ParamType->getPointeeType());
-    
-  } else if (ParamType->isReferenceType() &&
-             isConsumableType(ParamType->getPointeeType())) {
+  if (const ParamTypestateAttr *PTA = Param->getAttr<ParamTypestateAttr>())
+    ParamState = mapParamTypestateAttrState(PTA);    
+  else if (isConsumableType(ParamType))
+    ParamState = mapConsumableAttrState(ParamType);    
+  else if (isRValueRef(ParamType) &&
+           isConsumableType(ParamType->getPointeeType()))
+    ParamState = mapConsumableAttrState(ParamType->getPointeeType());    
+  else if (ParamType->isReferenceType() &&
+           isConsumableType(ParamType->getPointeeType()))
     ParamState = consumed::CS_Unknown;
-  }
   
   if (ParamState != CS_None)
     StateMap->setState(Param, ParamState);
@@ -917,7 +904,7 @@
   ConsumedState ExpectedState = Analyzer.getExpectedReturnState();
   
   if (ExpectedState != CS_None) {
-    InfoEntry Entry = PropagationMap.find(Ret->getRetValue());
+    InfoEntry Entry = findInfo(Ret->getRetValue());
     
     if (Entry != PropagationMap.end()) {
       ConsumedState RetState = Entry->second.getAsState(StateMap);
@@ -934,7 +921,7 @@
 }
 
 void ConsumedStmtVisitor::VisitUnaryOperator(const UnaryOperator *UOp) {
-  InfoEntry Entry = PropagationMap.find(UOp->getSubExpr()->IgnoreParens());
+  InfoEntry Entry = findInfo(UOp->getSubExpr());
   if (Entry == PropagationMap.end()) return;
   
   switch (UOp->getOpcode()) {
@@ -956,8 +943,7 @@
 void ConsumedStmtVisitor::VisitVarDecl(const VarDecl *Var) {
   if (isConsumableType(Var->getType())) {
     if (Var->hasInit()) {
-      MapType::iterator VIT = PropagationMap.find(
-        Var->getInit()->IgnoreImplicit());
+      MapType::iterator VIT = findInfo(Var->getInit()->IgnoreImplicit());
       if (VIT != PropagationMap.end()) {
         PropagationInfo PInfo = VIT->second;
         ConsumedState St = PInfo.getAsState(StateMap);
@@ -1158,24 +1144,21 @@
 void ConsumedStateMap::checkParamsForReturnTypestate(SourceLocation BlameLoc,
   ConsumedWarningsHandlerBase &WarningsHandler) const {
   
-  ConsumedState ExpectedState;
-  
   for (VarMapType::const_iterator DMI = VarMap.begin(), DME = VarMap.end();
        DMI != DME; ++DMI) {
     
     if (isa<ParmVarDecl>(DMI->first)) {
       const ParmVarDecl *Param = cast<ParmVarDecl>(DMI->first);
+      const ReturnTypestateAttr *RTA = Param->getAttr<ReturnTypestateAttr>();
       
-      if (!Param->hasAttr<ReturnTypestateAttr>()) continue;
+      if (!RTA)
+        continue;
       
-      ExpectedState =
-        mapReturnTypestateAttrState(Param->getAttr<ReturnTypestateAttr>());
-      
-      if (DMI->second != ExpectedState) {
+      ConsumedState ExpectedState = mapReturnTypestateAttrState(RTA);      
+      if (DMI->second != ExpectedState)
         WarningsHandler.warnParamReturnTypestateMismatch(BlameLoc,
           Param->getNameAsString(), stateToString(ExpectedState),
           stateToString(DMI->second));
-      }
     }
   }
 }
@@ -1286,9 +1269,7 @@
   } else
     ReturnType = D->getCallResultType();
 
-  if (D->hasAttr<ReturnTypestateAttr>()) {
-    const ReturnTypestateAttr *RTSAttr = D->getAttr<ReturnTypestateAttr>();
-
+  if (const ReturnTypestateAttr *RTSAttr = D->getAttr<ReturnTypestateAttr>()) {
     const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl();
     if (!RD || !RD->hasAttr<ConsumableAttr>()) {
       // FIXME: This should be removed when template instantiation propagates
@@ -1300,22 +1281,27 @@
       ExpectedReturnState = CS_None;
     } else
       ExpectedReturnState = mapReturnTypestateAttrState(RTSAttr);
-  } else if (isConsumableType(ReturnType))
-    ExpectedReturnState = mapConsumableAttrState(ReturnType);
+  } else if (isConsumableType(ReturnType)) {
+    if (isAutoCastType(ReturnType))   // We can auto-cast the state to the
+      ExpectedReturnState = CS_None;  // expected state.
+    else
+      ExpectedReturnState = mapConsumableAttrState(ReturnType);
+  }
   else
     ExpectedReturnState = CS_None;
 }
 
 bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
                                   const ConsumedStmtVisitor &Visitor) {
-  
-  OwningPtr<ConsumedStateMap> FalseStates(new ConsumedStateMap(*CurrStates));
+
+  std::unique_ptr<ConsumedStateMap> FalseStates(
+      new ConsumedStateMap(*CurrStates));
   PropagationInfo PInfo;
   
   if (const IfStmt *IfNode =
     dyn_cast_or_null<IfStmt>(CurrBlock->getTerminator().getStmt())) {
     
-    const Stmt *Cond = IfNode->getCond();
+    const Expr *Cond = IfNode->getCond();
     
     PInfo = Visitor.getInfo(Cond);
     if (!PInfo.isValid() && isa<BinaryOperator>(Cond))
@@ -1384,8 +1370,8 @@
     delete CurrStates;
     
   if (*++SI)
-    BlockInfo.addInfo(*SI, FalseStates.take());
-  
+    BlockInfo.addInfo(*SI, FalseStates.release());
+
   CurrStates = NULL;
   return true;
 }
@@ -1410,9 +1396,8 @@
   ConsumedStmtVisitor Visitor(AC, *this, CurrStates);
   
   // Add all trackable parameters to the state map.
-  for (FunctionDecl::param_const_iterator PI = D->param_begin(),
-       PE = D->param_end(); PI != PE; ++PI) {
-    Visitor.VisitParmVarDecl(*PI);
+  for (auto PI : D->params()) {
+    Visitor.VisitParmVarDecl(PI);
   }
   
   // Visit all of the function's basic blocks.
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 9e5ec55..b26d4fb 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -37,7 +37,6 @@
       POV(Ctx.getAnalysis<PostOrderCFGView>()) {}
   
   void enqueueBlock(const CFGBlock *block);
-  void enqueueSuccessors(const CFGBlock *block);
   void enqueuePredecessors(const CFGBlock *block);
 
   const CFGBlock *dequeue();
@@ -53,19 +52,6 @@
     worklist.push_back(block);
   }
 }
-  
-void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
-  const unsigned OldWorklistSize = worklist.size();
-  for (CFGBlock::const_succ_iterator I = block->succ_begin(),
-       E = block->succ_end(); I != E; ++I) {
-    enqueueBlock(*I);
-  }
-
-  if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
-    return;
-
-  sortWorklist();
-}
 
 void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
   const unsigned OldWorklistSize = worklist.size();
@@ -372,7 +358,7 @@
 
 void TransferFunctions::VisitBlockExpr(BlockExpr *BE) {
   AnalysisDeclContext::referenced_decls_iterator I, E;
-  llvm::tie(I, E) =
+  std::tie(I, E) =
     LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl());
   for ( ; I != E ; ++I) {
     const VarDecl *VD = *I;
@@ -389,9 +375,8 @@
 }
 
 void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
-  for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE = DS->decl_end();
-       DI != DE; ++DI)
-    if (VarDecl *VD = dyn_cast<VarDecl>(*DI)) {
+  for (const auto *DI : DS->decls())
+    if (const auto *VD = dyn_cast<VarDecl>(DI)) {
       if (!isAlwaysAlive(VD))
         val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
     }
@@ -581,16 +566,6 @@
   return new LiveVariables(LV);
 }
 
-static bool compare_entries(const CFGBlock *A, const CFGBlock *B) {
-  return A->getBlockID() < B->getBlockID();
-}
-
-static bool compare_vd_entries(const Decl *A, const Decl *B) {
-  SourceLocation ALoc = A->getLocStart();
-  SourceLocation BLoc = B->getLocStart();
-  return ALoc.getRawEncoding() < BLoc.getRawEncoding();
-}
-
 void LiveVariables::dumpBlockLiveness(const SourceManager &M) {
   getImpl(impl).dumpBlockLiveness(M);
 }
@@ -602,7 +577,9 @@
        it != ei; ++it) {
     vec.push_back(it->first);    
   }
-  std::sort(vec.begin(), vec.end(), compare_entries);
+  std::sort(vec.begin(), vec.end(), [](const CFGBlock *A, const CFGBlock *B) {
+    return A->getBlockID() < B->getBlockID();
+  });
 
   std::vector<const VarDecl*> declVec;
 
@@ -619,9 +596,11 @@
           se = vals.liveDecls.end(); si != se; ++si) {
       declVec.push_back(*si);      
     }
-    
-    std::sort(declVec.begin(), declVec.end(), compare_vd_entries);
-    
+
+    std::sort(declVec.begin(), declVec.end(), [](const Decl *A, const Decl *B) {
+      return A->getLocStart() < B->getLocStart();
+    });
+
     for (std::vector<const VarDecl*>::iterator di = declVec.begin(),
          de = declVec.end(); di != de; ++di) {
       llvm::errs() << " " << (*di)->getDeclName().getAsString()
diff --git a/lib/Analysis/NOTICE b/lib/Analysis/NOTICE
deleted file mode 100644
index 369204e..0000000
--- a/lib/Analysis/NOTICE
+++ /dev/null
@@ -1,62 +0,0 @@
-==============================================================================
-LLVM Release License
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2007-2011 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
-    LLVM Team
-
-    University of Illinois at Urbana-Champaign
-
-    http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-    * Redistributions of source code must retain the above copyright notice,
-      this list of conditions and the following disclaimers.
-
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimers in the
-      documentation and/or other materials provided with the distribution.
-
-    * Neither the names of the LLVM Team, University of Illinois at
-      Urbana-Champaign, nor the names of its contributors may be used to
-      endorse or promote products derived from this Software without specific
-      prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-==============================================================================
-The LLVM software contains code written by third parties.  Such software will
-have its own individual LICENSE.TXT file in the directory in which it appears.
-This file will describe the copyrights, license, and restrictions which apply
-to that code.
-
-The disclaimer of warranty in the University of Illinois Open Source License
-applies to all code in the LLVM Distribution, and nothing in any of the
-other licenses gives permission to use the names of the LLVM Team or the
-University of Illinois to endorse or promote products derived from this
-Software.
-
-The following pieces of software have additional or alternate copyrights,
-licenses, and/or restrictions:
-
-Program             Directory
--------             ---------
-<none yet>
diff --git a/lib/Analysis/ProgramPoint.cpp b/lib/Analysis/ProgramPoint.cpp
index 7d67e8a..26b59bb 100644
--- a/lib/Analysis/ProgramPoint.cpp
+++ b/lib/Analysis/ProgramPoint.cpp
@@ -43,9 +43,10 @@
   }
 }
 
-SimpleProgramPointTag::SimpleProgramPointTag(StringRef description)
-  : desc(description) {}
+SimpleProgramPointTag::SimpleProgramPointTag(StringRef MsgProvider, 
+                                             StringRef Msg)
+  : Desc((MsgProvider + " : " + Msg).str()) {}
 
 StringRef SimpleProgramPointTag::getTagDescription() const {
-  return desc;
+  return Desc;
 }
diff --git a/lib/Analysis/PseudoConstantAnalysis.cpp b/lib/Analysis/PseudoConstantAnalysis.cpp
index 5d659ce..314ce7c 100644
--- a/lib/Analysis/PseudoConstantAnalysis.cpp
+++ b/lib/Analysis/PseudoConstantAnalysis.cpp
@@ -171,10 +171,9 @@
     case Stmt::DeclStmtClass: {
       const DeclStmt *DS = cast<DeclStmt>(Head);
       // Iterate over each decl and see if any of them contain reference decls
-      for (DeclStmt::const_decl_iterator I = DS->decl_begin(),
-          E = DS->decl_end(); I != E; ++I) {
+      for (const auto *I : DS->decls()) {
         // We only care about VarDecls
-        const VarDecl *VD = dyn_cast<VarDecl>(*I);
+        const VarDecl *VD = dyn_cast<VarDecl>(I);
         if (!VD)
           continue;
 
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index a2d19c0..3cc8ae4 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -13,10 +13,12 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/Lex/Preprocessor.h"
 #include "clang/AST/Expr.h"
 #include "clang/AST/ExprCXX.h"
 #include "clang/AST/ExprObjC.h"
 #include "clang/AST/StmtCXX.h"
+#include "clang/AST/ParentMap.h"
 #include "clang/Analysis/AnalysisContext.h"
 #include "clang/Analysis/CFG.h"
 #include "clang/Basic/SourceManager.h"
@@ -25,36 +27,324 @@
 
 using namespace clang;
 
-namespace {
-class DeadCodeScan {
-  llvm::BitVector Visited;
-  llvm::BitVector &Reachable;
-  SmallVector<const CFGBlock *, 10> WorkList;
-  
-  typedef SmallVector<std::pair<const CFGBlock *, const Stmt *>, 12>
-      DeferredLocsTy;
-  
-  DeferredLocsTy DeferredLocs;
-  
-public:
-  DeadCodeScan(llvm::BitVector &reachable)
-    : Visited(reachable.size()),
-      Reachable(reachable) {}
-  
-  void enqueue(const CFGBlock *block);  
-  unsigned scanBackwards(const CFGBlock *Start,
-                         clang::reachable_code::Callback &CB);
-  
-  bool isDeadCodeRoot(const CFGBlock *Block);
-  
-  const Stmt *findDeadCode(const CFGBlock *Block);
-  
-  void reportDeadCode(const Stmt *S,
-                      clang::reachable_code::Callback &CB);
-};
+//===----------------------------------------------------------------------===//
+// Core Reachability Analysis routines.
+//===----------------------------------------------------------------------===//
+
+static bool isEnumConstant(const Expr *Ex) {
+  const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex);
+  if (!DR)
+    return false;
+  return isa<EnumConstantDecl>(DR->getDecl());
 }
 
-void DeadCodeScan::enqueue(const CFGBlock *block) {  
+static bool isTrivialExpression(const Expr *Ex) {
+  Ex = Ex->IgnoreParenCasts();
+  return isa<IntegerLiteral>(Ex) || isa<StringLiteral>(Ex) ||
+         isa<CXXBoolLiteralExpr>(Ex) || isa<ObjCBoolLiteralExpr>(Ex) ||
+         isa<CharacterLiteral>(Ex) ||
+         isEnumConstant(Ex);
+}
+
+static bool isTrivialDoWhile(const CFGBlock *B, const Stmt *S) {
+  // Check if the block ends with a do...while() and see if 'S' is the
+  // condition.
+  if (const Stmt *Term = B->getTerminator()) {
+    if (const DoStmt *DS = dyn_cast<DoStmt>(Term)) {
+      const Expr *Cond = DS->getCond()->IgnoreParenCasts();
+      return Cond == S && isTrivialExpression(Cond);
+    }
+  }
+  return false;
+}
+
+static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
+  // Look to see if the block ends with a 'return', and see if 'S'
+  // is a substatement.  The 'return' may not be the last element in
+  // the block because of destructors.
+  for (CFGBlock::const_reverse_iterator I = B->rbegin(), E = B->rend();
+       I != E; ++I) {
+    if (Optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
+      if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(CS->getStmt())) {
+        if (RS == S)
+          return true;
+        if (const Expr *RE = RS->getRetValue()) {
+          RE = RE->IgnoreParenCasts();
+          if (RE == S)
+            return true;
+          ParentMap PM(const_cast<Expr*>(RE));
+          // If 'S' is in the ParentMap, it is a subexpression of
+          // the return statement.  Note also that we are restricting
+          // to looking at return statements in the same CFGBlock,
+          // so this will intentionally not catch cases where the
+          // return statement contains nested control-flow.
+          return PM.getParent(S);
+        }
+      }
+      break;
+    }
+  }
+  return false;
+}
+
+static SourceLocation getTopMostMacro(SourceLocation Loc, SourceManager &SM) {
+  assert(Loc.isMacroID());
+  SourceLocation Last;
+  while (Loc.isMacroID()) {
+    Last = Loc;
+    Loc = SM.getImmediateMacroCallerLoc(Loc);
+  }
+  return Last;
+}
+
+/// Returns true if the statement is expanded from a configuration macro.
+static bool isExpandedFromConfigurationMacro(const Stmt *S,
+                                             Preprocessor &PP,
+                                             bool IgnoreYES_NO = false) {
+  // FIXME: This is not very precise.  Here we just check to see if the
+  // value comes from a macro, but we can do much better.  This is likely
+  // to be over conservative.  This logic is factored into a separate function
+  // so that we can refine it later.
+  SourceLocation L = S->getLocStart();
+  if (L.isMacroID()) {
+    if (IgnoreYES_NO) {
+      // The Objective-C constant 'YES' and 'NO'
+      // are defined as macros.  Do not treat them
+      // as configuration values.
+      SourceManager &SM = PP.getSourceManager();
+      SourceLocation TopL = getTopMostMacro(L, SM);
+      StringRef MacroName = PP.getImmediateMacroName(TopL);
+      if (MacroName == "YES" || MacroName == "NO")
+        return false;
+    }
+    return true;
+  }
+  return false;
+}
+
+static bool isConfigurationValue(const ValueDecl *D, Preprocessor &PP);
+
+/// Returns true if the statement represents a configuration value.
+///
+/// A configuration value is something usually determined at compile-time
+/// to conditionally always execute some branch.  Such guards are for
+/// "sometimes unreachable" code.  Such code is usually not interesting
+/// to report as unreachable, and may mask truly unreachable code within
+/// those blocks.
+static bool isConfigurationValue(const Stmt *S,
+                                 Preprocessor &PP,
+                                 SourceRange *SilenceableCondVal = nullptr,
+                                 bool IncludeIntegers = true,
+                                 bool WrappedInParens = false) {
+  if (!S)
+    return false;
+
+  // Special case looking for the sigil '()' around an integer literal.
+  if (const ParenExpr *PE = dyn_cast<ParenExpr>(S))
+    if (!PE->getLocStart().isMacroID())
+      return isConfigurationValue(PE->getSubExpr(), PP, SilenceableCondVal, 
+                                  IncludeIntegers, true);
+
+  if (const Expr *Ex = dyn_cast<Expr>(S))
+    S = Ex->IgnoreParenCasts();
+
+  bool IgnoreYES_NO = false;
+
+  switch (S->getStmtClass()) {
+    case Stmt::CallExprClass: {
+      const FunctionDecl *Callee =
+        dyn_cast_or_null<FunctionDecl>(cast<CallExpr>(S)->getCalleeDecl());
+      return Callee ? Callee->isConstexpr() : false;
+    }
+    case Stmt::DeclRefExprClass:
+      return isConfigurationValue(cast<DeclRefExpr>(S)->getDecl(), PP);
+    case Stmt::ObjCBoolLiteralExprClass:
+      IgnoreYES_NO = true;
+      // Fallthrough.
+    case Stmt::CXXBoolLiteralExprClass:
+    case Stmt::IntegerLiteralClass: {
+      const Expr *E = cast<Expr>(S);
+      if (IncludeIntegers) {
+        if (SilenceableCondVal && !SilenceableCondVal->getBegin().isValid())
+          *SilenceableCondVal = E->getSourceRange();
+        return WrappedInParens || isExpandedFromConfigurationMacro(E, PP, IgnoreYES_NO);
+      }
+      return false;
+    }
+    case Stmt::MemberExprClass:
+      return isConfigurationValue(cast<MemberExpr>(S)->getMemberDecl(), PP);
+    case Stmt::UnaryExprOrTypeTraitExprClass:
+      return true;
+    case Stmt::BinaryOperatorClass: {
+      const BinaryOperator *B = cast<BinaryOperator>(S);
+      // Only include raw integers (not enums) as configuration
+      // values if they are used in a logical or comparison operator
+      // (not arithmetic).
+      IncludeIntegers &= (B->isLogicalOp() || B->isComparisonOp());
+      return isConfigurationValue(B->getLHS(), PP, SilenceableCondVal,
+                                  IncludeIntegers) ||
+             isConfigurationValue(B->getRHS(), PP, SilenceableCondVal,
+                                  IncludeIntegers);
+    }
+    case Stmt::UnaryOperatorClass: {
+      const UnaryOperator *UO = cast<UnaryOperator>(S);
+      if (SilenceableCondVal) 
+        *SilenceableCondVal = UO->getSourceRange();      
+      return UO->getOpcode() == UO_LNot &&
+             isConfigurationValue(UO->getSubExpr(), PP, SilenceableCondVal,
+                                  IncludeIntegers, WrappedInParens);
+    }
+    default:
+      return false;
+  }
+}
+
+static bool isConfigurationValue(const ValueDecl *D, Preprocessor &PP) {
+  if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D))
+    return isConfigurationValue(ED->getInitExpr(), PP);
+  if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+    // As a heuristic, treat globals as configuration values.  Note
+    // that we only will get here if Sema evaluated this
+    // condition to a constant expression, which means the global
+    // had to be declared in a way to be a truly constant value.
+    // We could generalize this to local variables, but it isn't
+    // clear if those truly represent configuration values that
+    // gate unreachable code.
+    if (!VD->hasLocalStorage())
+      return true;
+
+    // As a heuristic, locals that have been marked 'const' explicitly
+    // can be treated as configuration values as well.
+    return VD->getType().isLocalConstQualified();
+  }
+  return false;
+}
+
+/// Returns true if we should always explore all successors of a block.
+static bool shouldTreatSuccessorsAsReachable(const CFGBlock *B,
+                                             Preprocessor &PP) {
+  if (const Stmt *Term = B->getTerminator()) {
+    if (isa<SwitchStmt>(Term))
+      return true;
+    // Specially handle '||' and '&&'.
+    if (isa<BinaryOperator>(Term)) {
+      return isConfigurationValue(Term, PP);
+    }
+  }
+
+  const Stmt *Cond = B->getTerminatorCondition(/* stripParens */ false);
+  return isConfigurationValue(Cond, PP);
+}
+
+static unsigned scanFromBlock(const CFGBlock *Start,
+                              llvm::BitVector &Reachable,
+                              Preprocessor *PP,
+                              bool IncludeSometimesUnreachableEdges) {
+  unsigned count = 0;
+  
+  // Prep work queue
+  SmallVector<const CFGBlock*, 32> WL;
+  
+  // The entry block may have already been marked reachable
+  // by the caller.
+  if (!Reachable[Start->getBlockID()]) {
+    ++count;
+    Reachable[Start->getBlockID()] = true;
+  }
+  
+  WL.push_back(Start);
+  
+  // Find the reachable blocks from 'Start'.
+  while (!WL.empty()) {
+    const CFGBlock *item = WL.pop_back_val();
+
+    // There are cases where we want to treat all successors as reachable.
+    // The idea is that some "sometimes unreachable" code is not interesting,
+    // and that we should forge ahead and explore those branches anyway.
+    // This allows us to potentially uncover some "always unreachable" code
+    // within the "sometimes unreachable" code.
+    // Look at the successors and mark then reachable.
+    Optional<bool> TreatAllSuccessorsAsReachable;
+    if (!IncludeSometimesUnreachableEdges)
+      TreatAllSuccessorsAsReachable = false;
+
+    for (CFGBlock::const_succ_iterator I = item->succ_begin(), 
+         E = item->succ_end(); I != E; ++I) {
+      const CFGBlock *B = *I;
+      if (!B) do {
+        const CFGBlock *UB = I->getPossiblyUnreachableBlock();
+        if (!UB)
+          break;
+
+        if (!TreatAllSuccessorsAsReachable.hasValue()) {
+          assert(PP);
+          TreatAllSuccessorsAsReachable =
+            shouldTreatSuccessorsAsReachable(item, *PP);
+        }
+
+        if (TreatAllSuccessorsAsReachable.getValue()) {
+          B = UB;
+          break;
+        }
+      }
+      while (false);
+
+      if (B) {
+        unsigned blockID = B->getBlockID();
+        if (!Reachable[blockID]) {
+          Reachable.set(blockID);
+          WL.push_back(B);
+          ++count;
+        }
+      }
+    }
+  }
+  return count;
+}
+
+static unsigned scanMaybeReachableFromBlock(const CFGBlock *Start,
+                                            Preprocessor &PP,
+                                            llvm::BitVector &Reachable) {
+  return scanFromBlock(Start, Reachable, &PP, true);
+}
+
+//===----------------------------------------------------------------------===//
+// Dead Code Scanner.
+//===----------------------------------------------------------------------===//
+
+namespace {
+  class DeadCodeScan {
+    llvm::BitVector Visited;
+    llvm::BitVector &Reachable;
+    SmallVector<const CFGBlock *, 10> WorkList;
+    Preprocessor &PP;
+
+    typedef SmallVector<std::pair<const CFGBlock *, const Stmt *>, 12>
+    DeferredLocsTy;
+
+    DeferredLocsTy DeferredLocs;
+
+  public:
+    DeadCodeScan(llvm::BitVector &reachable, Preprocessor &PP)
+    : Visited(reachable.size()),
+      Reachable(reachable),
+      PP(PP) {}
+
+    void enqueue(const CFGBlock *block);
+    unsigned scanBackwards(const CFGBlock *Start,
+    clang::reachable_code::Callback &CB);
+
+    bool isDeadCodeRoot(const CFGBlock *Block);
+
+    const Stmt *findDeadCode(const CFGBlock *Block);
+
+    void reportDeadCode(const CFGBlock *B,
+                        const Stmt *S,
+                        clang::reachable_code::Callback &CB);
+  };
+}
+
+void DeadCodeScan::enqueue(const CFGBlock *block) {
   unsigned blockID = block->getBlockID();
   if (Reachable[blockID] || Visited[blockID])
     return;
@@ -64,9 +354,9 @@
 
 bool DeadCodeScan::isDeadCodeRoot(const clang::CFGBlock *Block) {
   bool isDeadRoot = true;
-  
+
   for (CFGBlock::const_pred_iterator I = Block->pred_begin(),
-        E = Block->pred_end(); I != E; ++I) {
+       E = Block->pred_end(); I != E; ++I) {
     if (const CFGBlock *PredBlock = *I) {
       unsigned blockID = PredBlock->getBlockID();
       if (Visited[blockID]) {
@@ -81,7 +371,7 @@
       }
     }
   }
-  
+
   return isDeadRoot;
 }
 
@@ -100,11 +390,13 @@
       if (isValidDeadStmt(S))
         return S;
     }
-  
+
   if (CFGTerminator T = Block->getTerminator()) {
-    const Stmt *S = T.getStmt();
-    if (isValidDeadStmt(S))
-      return S;    
+    if (!T.isTemporaryDtorsBranch()) {
+      const Stmt *S = T.getStmt();
+      if (isValidDeadStmt(S))
+        return S;
+    }
   }
 
   return 0;
@@ -124,7 +416,7 @@
 
   unsigned count = 0;
   enqueue(Start);
-  
+
   while (!WorkList.empty()) {
     const CFGBlock *Block = WorkList.pop_back_val();
 
@@ -135,7 +427,7 @@
 
     // Look for any dead code within the block.
     const Stmt *S = findDeadCode(Block);
-    
+
     if (!S) {
       // No dead code.  Possibly an empty block.  Look at dead predecessors.
       for (CFGBlock::const_pred_iterator I = Block->pred_begin(),
@@ -145,16 +437,16 @@
       }
       continue;
     }
-    
+
     // Specially handle macro-expanded code.
     if (S->getLocStart().isMacroID()) {
-      count += clang::reachable_code::ScanReachableFromBlock(Block, Reachable);
+      count += scanMaybeReachableFromBlock(Block, PP, Reachable);
       continue;
     }
 
     if (isDeadCodeRoot(Block)) {
-      reportDeadCode(S, CB);
-      count += clang::reachable_code::ScanReachableFromBlock(Block, Reachable);
+      reportDeadCode(Block, S, CB);
+      count += scanMaybeReachableFromBlock(Block, PP, Reachable);
     }
     else {
       // Record this statement as the possibly best location in a
@@ -169,15 +461,15 @@
   if (!DeferredLocs.empty()) {
     llvm::array_pod_sort(DeferredLocs.begin(), DeferredLocs.end(), SrcCmp);
     for (DeferredLocsTy::iterator I = DeferredLocs.begin(),
-          E = DeferredLocs.end(); I != E; ++I) {
-      const CFGBlock *block = I->first;
-      if (Reachable[block->getBlockID()])
+         E = DeferredLocs.end(); I != E; ++I) {
+      const CFGBlock *Block = I->first;
+      if (Reachable[Block->getBlockID()])
         continue;
-      reportDeadCode(I->second, CB);
-      count += clang::reachable_code::ScanReachableFromBlock(block, Reachable);
+      reportDeadCode(Block, I->second, CB);
+      count += scanMaybeReachableFromBlock(Block, PP, Reachable);
     }
   }
-    
+
   return count;
 }
 
@@ -208,7 +500,7 @@
     case Expr::BinaryConditionalOperatorClass:
     case Expr::ConditionalOperatorClass: {
       const AbstractConditionalOperator *CO =
-        cast<AbstractConditionalOperator>(S);
+      cast<AbstractConditionalOperator>(S);
       return CO->getQuestionLoc();
     }
     case Expr::MemberExprClass: {
@@ -246,61 +538,86 @@
   return S->getLocStart();
 }
 
-void DeadCodeScan::reportDeadCode(const Stmt *S,
+void DeadCodeScan::reportDeadCode(const CFGBlock *B,
+                                  const Stmt *S,
                                   clang::reachable_code::Callback &CB) {
+  // Classify the unreachable code found, or suppress it in some cases.
+  reachable_code::UnreachableKind UK = reachable_code::UK_Other;
+
+  if (isa<BreakStmt>(S)) {
+    UK = reachable_code::UK_Break;
+  }
+  else if (isTrivialDoWhile(B, S)) {
+    return;
+  }
+  else if (isDeadReturn(B, S)) {
+    UK = reachable_code::UK_Return;
+  }
+
+  SourceRange SilenceableCondVal;
+
+  if (UK == reachable_code::UK_Other) {
+    // Check if the dead code is part of the "loop target" of
+    // a for/for-range loop.  This is the block that contains
+    // the increment code.
+    if (const Stmt *LoopTarget = B->getLoopTarget()) {
+      SourceLocation Loc = LoopTarget->getLocStart();
+      SourceRange R1(Loc, Loc), R2;
+
+      if (const ForStmt *FS = dyn_cast<ForStmt>(LoopTarget)) {
+        const Expr *Inc = FS->getInc();
+        Loc = Inc->getLocStart();
+        R2 = Inc->getSourceRange();
+      }
+
+      CB.HandleUnreachable(reachable_code::UK_Loop_Increment,
+                           Loc, SourceRange(), SourceRange(Loc, Loc), R2);
+      return;
+    }
+    
+    // Check if the dead block has a predecessor whose branch has
+    // a configuration value that *could* be modified to
+    // silence the warning.
+    CFGBlock::const_pred_iterator PI = B->pred_begin();
+    if (PI != B->pred_end()) {
+      if (const CFGBlock *PredBlock = PI->getPossiblyUnreachableBlock()) {
+        const Stmt *TermCond =
+            PredBlock->getTerminatorCondition(/* strip parens */ false);
+        isConfigurationValue(TermCond, PP, &SilenceableCondVal);
+      }
+    }
+  }
+
   SourceRange R1, R2;
   SourceLocation Loc = GetUnreachableLoc(S, R1, R2);
-  CB.HandleUnreachable(Loc, R1, R2);
+  CB.HandleUnreachable(UK, Loc, SilenceableCondVal, R1, R2);
 }
 
+//===----------------------------------------------------------------------===//
+// Reachability APIs.
+//===----------------------------------------------------------------------===//
+
 namespace clang { namespace reachable_code {
 
-void Callback::anchor() { }  
+void Callback::anchor() { }
 
 unsigned ScanReachableFromBlock(const CFGBlock *Start,
                                 llvm::BitVector &Reachable) {
-  unsigned count = 0;
-  
-  // Prep work queue
-  SmallVector<const CFGBlock*, 32> WL;
-  
-  // The entry block may have already been marked reachable
-  // by the caller.
-  if (!Reachable[Start->getBlockID()]) {
-    ++count;
-    Reachable[Start->getBlockID()] = true;
-  }
-  
-  WL.push_back(Start);
-  
-  // Find the reachable blocks from 'Start'.
-  while (!WL.empty()) {
-    const CFGBlock *item = WL.pop_back_val();
-    
-    // Look at the successors and mark then reachable.
-    for (CFGBlock::const_succ_iterator I = item->succ_begin(), 
-         E = item->succ_end(); I != E; ++I)
-      if (const CFGBlock *B = *I) {
-        unsigned blockID = B->getBlockID();
-        if (!Reachable[blockID]) {
-          Reachable.set(blockID);
-          WL.push_back(B);
-          ++count;
-        }
-      }
-  }
-  return count;
+  return scanFromBlock(Start, Reachable, /* SourceManager* */ 0, false);
 }
-  
-void FindUnreachableCode(AnalysisDeclContext &AC, Callback &CB) {
+
+void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP,
+                         Callback &CB) {
+
   CFG *cfg = AC.getCFG();
   if (!cfg)
     return;
 
-  // Scan for reachable blocks from the entrance of the CFG.  
+  // Scan for reachable blocks from the entrance of the CFG.
   // If there are no unreachable blocks, we're done.
   llvm::BitVector reachable(cfg->getNumBlockIDs());
-  unsigned numReachable = ScanReachableFromBlock(&cfg->getEntry(), reachable);
+  unsigned numReachable =
+    scanMaybeReachableFromBlock(&cfg->getEntry(), PP, reachable);
   if (numReachable == cfg->getNumBlockIDs())
     return;
   
@@ -309,7 +626,7 @@
   if (!AC.getCFGBuildOptions().AddEHEdges) {
     for (CFG::try_block_iterator I = cfg->try_blocks_begin(),
          E = cfg->try_blocks_end() ; I != E; ++I) {
-      numReachable += ScanReachableFromBlock(*I, reachable);
+      numReachable += scanMaybeReachableFromBlock(*I, PP, reachable);
     }
     if (numReachable == cfg->getNumBlockIDs())
       return;
@@ -323,7 +640,7 @@
     if (reachable[block->getBlockID()])
       continue;
     
-    DeadCodeScan DS(reachable);
+    DeadCodeScan DS(reachable, PP);
     numReachable += DS.scanBackwards(block, CB);
     
     if (numReachable == cfg->getNumBlockIDs())
diff --git a/lib/Analysis/ScanfFormatString.cpp b/lib/Analysis/ScanfFormatString.cpp
index f5ce84f..3ff7f0a 100644
--- a/lib/Analysis/ScanfFormatString.cpp
+++ b/lib/Analysis/ScanfFormatString.cpp
@@ -379,21 +379,23 @@
   return ArgType();
 }
 
-bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
+bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
+                             const LangOptions &LangOpt,
                              ASTContext &Ctx) {
-  if (!QT->isPointerType())
-    return false;
 
   // %n is different from other conversion specifiers; don't try to fix it.
   if (CS.getKind() == ConversionSpecifier::nArg)
     return false;
 
+  if (!QT->isPointerType())
+    return false;
+
   QualType PT = QT->getPointeeType();
 
   // If it's an enum, get its underlying type.
-  if (const EnumType *ETy = QT->getAs<EnumType>())
-    QT = ETy->getDecl()->getIntegerType();
-  
+  if (const EnumType *ETy = PT->getAs<EnumType>())
+    PT = ETy->getDecl()->getIntegerType();
+
   const BuiltinType *BT = PT->getAs<BuiltinType>();
   if (!BT)
     return false;
@@ -405,6 +407,15 @@
       LM.setKind(LengthModifier::AsWideChar);
     else
       LM.setKind(LengthModifier::None);
+
+    // If we know the target array length, we can use it as a field width.
+    if (const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(RawQT)) {
+      if (CAT->getSizeModifier() == ArrayType::Normal)
+        FieldWidth = OptionalAmount(OptionalAmount::Constant,
+                                    CAT->getSize().getZExtValue() - 1,
+                                    "", 0, false);
+
+    }
     return true;
   }
 
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index 6e0e173..fd804eb 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -172,12 +172,9 @@
     const Expr* const* FunArgs;    // Function arguments
     CallingContext*    PrevCtx;    // The previous context; or 0 if none.
 
-    CallingContext(const NamedDecl *D = 0, const Expr *S = 0,
-                   unsigned N = 0, const Expr* const *A = 0,
-                   CallingContext *P = 0)
-      : AttrDecl(D), SelfArg(S), SelfArrow(false),
-        NumArgs(N), FunArgs(A), PrevCtx(P)
-    { }
+    CallingContext(const NamedDecl *D)
+        : AttrDecl(D), SelfArg(0), SelfArrow(false), NumArgs(0), FunArgs(0),
+          PrevCtx(0) {}
   };
 
   typedef SmallVector<SExprNode, 4> NodeVector;
@@ -188,44 +185,41 @@
   NodeVector NodeVec;
 
 private:
+  unsigned make(ExprOp O, unsigned F = 0, const void *D = 0) {
+    NodeVec.push_back(SExprNode(O, F, D));
+    return NodeVec.size() - 1;
+  }
+
   unsigned makeNop() {
-    NodeVec.push_back(SExprNode(EOP_Nop, 0, 0));
-    return NodeVec.size()-1;
+    return make(EOP_Nop);
   }
 
   unsigned makeWildcard() {
-    NodeVec.push_back(SExprNode(EOP_Wildcard, 0, 0));
-    return NodeVec.size()-1;
+    return make(EOP_Wildcard);
   }
 
   unsigned makeUniversal() {
-    NodeVec.push_back(SExprNode(EOP_Universal, 0, 0));
-    return NodeVec.size()-1;
+    return make(EOP_Universal);
   }
 
   unsigned makeNamedVar(const NamedDecl *D) {
-    NodeVec.push_back(SExprNode(EOP_NVar, 0, D));
-    return NodeVec.size()-1;
+    return make(EOP_NVar, 0, D);
   }
 
   unsigned makeLocalVar(const NamedDecl *D) {
-    NodeVec.push_back(SExprNode(EOP_LVar, 0, D));
-    return NodeVec.size()-1;
+    return make(EOP_LVar, 0, D);
   }
 
   unsigned makeThis() {
-    NodeVec.push_back(SExprNode(EOP_This, 0, 0));
-    return NodeVec.size()-1;
+    return make(EOP_This);
   }
 
   unsigned makeDot(const NamedDecl *D, bool Arrow) {
-    NodeVec.push_back(SExprNode(EOP_Dot, Arrow ? 1 : 0, D));
-    return NodeVec.size()-1;
+    return make(EOP_Dot, Arrow ? 1 : 0, D);
   }
 
   unsigned makeCall(unsigned NumArgs, const NamedDecl *D) {
-    NodeVec.push_back(SExprNode(EOP_Call, NumArgs, D));
-    return NodeVec.size()-1;
+    return make(EOP_Call, NumArgs, D);
   }
 
   // Grab the very first declaration of virtual method D
@@ -242,28 +236,23 @@
   }
 
   unsigned makeMCall(unsigned NumArgs, const CXXMethodDecl *D) {
-    NodeVec.push_back(SExprNode(EOP_MCall, NumArgs, getFirstVirtualDecl(D)));
-    return NodeVec.size()-1;
+    return make(EOP_MCall, NumArgs, getFirstVirtualDecl(D));
   }
 
   unsigned makeIndex() {
-    NodeVec.push_back(SExprNode(EOP_Index, 0, 0));
-    return NodeVec.size()-1;
+    return make(EOP_Index);
   }
 
   unsigned makeUnary() {
-    NodeVec.push_back(SExprNode(EOP_Unary, 0, 0));
-    return NodeVec.size()-1;
+    return make(EOP_Unary);
   }
 
   unsigned makeBinary() {
-    NodeVec.push_back(SExprNode(EOP_Binary, 0, 0));
-    return NodeVec.size()-1;
+    return make(EOP_Binary);
   }
 
   unsigned makeUnknown(unsigned Arity) {
-    NodeVec.push_back(SExprNode(EOP_Unknown, Arity, 0));
-    return NodeVec.size()-1;
+    return make(EOP_Unknown, Arity);
   }
 
   inline bool isCalleeArrow(const Expr *E) {
@@ -277,7 +266,7 @@
   /// ensure that the original expression is a valid mutex expression.
   ///
   /// NDeref returns the number of Derefence and AddressOf operations
-  /// preceeding the Expr; this is used to decide whether to pretty-print
+  /// preceding the Expr; this is used to decide whether to pretty-print
   /// SExprs with . or ->.
   unsigned buildSExpr(const Expr *Exp, CallingContext* CallCtx,
                       int* NDeref = 0) {
@@ -575,15 +564,15 @@
 
   /// Issue a warning about an invalid lock expression
   static void warnInvalidLock(ThreadSafetyHandler &Handler,
-                              const Expr *MutexExp,
-                              const Expr *DeclExp, const NamedDecl* D) {
+                              const Expr *MutexExp, const Expr *DeclExp,
+                              const NamedDecl *D, StringRef Kind) {
     SourceLocation Loc;
     if (DeclExp)
       Loc = DeclExp->getExprLoc();
 
     // FIXME: add a note about the attribute location in MutexExp or D
     if (Loc.isValid())
-      Handler.handleInvalidLockExp(Loc);
+      Handler.handleInvalidLockExp(Kind, Loc);
   }
 
   bool operator==(const SExpr &other) const {
@@ -716,27 +705,16 @@
   }
 };
 
-
-
 /// \brief A short list of SExprs
 class MutexIDList : public SmallVector<SExpr, 3> {
 public:
-  /// \brief Return true if the list contains the specified SExpr
-  /// Performs a linear search, because these lists are almost always very small.
-  bool contains(const SExpr& M) {
-    for (iterator I=begin(),E=end(); I != E; ++I)
-      if ((*I) == M) return true;
-    return false;
-  }
-
-  /// \brief Push M onto list, bud discard duplicates
+  /// \brief Push M onto list, but discard duplicates.
   void push_back_nodup(const SExpr& M) {
-    if (!contains(M)) push_back(M);
+    if (end() == std::find(begin(), end(), M))
+      push_back(M);
   }
 };
 
-
-
 /// \brief This is a helper class that stores info about the most recent
 /// accquire of a Lock.
 ///
@@ -1450,9 +1428,10 @@
 public:
   ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
 
-  void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat);
-  void removeLock(FactSet &FSet, const SExpr &Mutex,
-                  SourceLocation UnlockLoc, bool FullyRemove=false);
+  void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat,
+               StringRef DiagKind);
+  void removeLock(FactSet &FSet, const SExpr &Mutex, SourceLocation UnlockLoc,
+                  bool FullyRemove, LockKind Kind, StringRef DiagKind);
 
   template <typename AttrType>
   void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
@@ -1485,12 +1464,89 @@
   void runAnalysis(AnalysisDeclContext &AC);
 };
 
+/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs.
+static const ValueDecl *getValueDecl(const Expr *Exp) {
+  if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
+    return getValueDecl(CE->getSubExpr());
+
+  if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
+    return DR->getDecl();
+
+  if (const auto *ME = dyn_cast<MemberExpr>(Exp))
+    return ME->getMemberDecl();
+
+  return nullptr;
+}
+
+template <typename Ty>
+class has_arg_iterator {
+  typedef char yes[1];
+  typedef char no[2];
+
+  template <typename Inner>
+  static yes& test(Inner *I, decltype(I->args_begin()) * = nullptr);
+
+  template <typename>
+  static no& test(...);
+
+public:
+  static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
+};
+
+static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
+  return A->getName();
+}
+
+static StringRef ClassifyDiagnostic(QualType VDT) {
+  // We need to look at the declaration of the type of the value to determine
+  // which it is. The type should either be a record or a typedef, or a pointer
+  // or reference thereof.
+  if (const auto *RT = VDT->getAs<RecordType>()) {
+    if (const auto *RD = RT->getDecl())
+      if (const auto *CA = RD->getAttr<CapabilityAttr>())
+        return ClassifyDiagnostic(CA);
+  } else if (const auto *TT = VDT->getAs<TypedefType>()) {
+    if (const auto *TD = TT->getDecl())
+      if (const auto *CA = TD->getAttr<CapabilityAttr>())
+        return ClassifyDiagnostic(CA);
+  } else if (VDT->isPointerType() || VDT->isReferenceType())
+    return ClassifyDiagnostic(VDT->getPointeeType());
+
+  return "mutex";
+}
+
+static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
+  assert(VD && "No ValueDecl passed");
+
+  // The ValueDecl is the declaration of a mutex or role (hopefully).
+  return ClassifyDiagnostic(VD->getType());
+}
+
+template <typename AttrTy>
+static typename std::enable_if<!has_arg_iterator<AttrTy>::value,
+                               StringRef>::type
+ClassifyDiagnostic(const AttrTy *A) {
+  if (const ValueDecl *VD = getValueDecl(A->getArg()))
+    return ClassifyDiagnostic(VD);
+  return "mutex";
+}
+
+template <typename AttrTy>
+static typename std::enable_if<has_arg_iterator<AttrTy>::value,
+                               StringRef>::type
+ClassifyDiagnostic(const AttrTy *A) {
+  for (auto I = A->args_begin(), E = A->args_end(); I != E; ++I) {
+    if (const ValueDecl *VD = getValueDecl(*I))
+      return ClassifyDiagnostic(VD);
+  }
+  return "mutex";
+}
 
 /// \brief Add a new lock to the lockset, warning if the lock is already there.
 /// \param Mutex -- the Mutex expression for the lock
 /// \param LDat  -- the LockData for the lock
 void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex,
-                                   const LockData &LDat) {
+                                   const LockData &LDat, StringRef DiagKind) {
   // FIXME: deal with acquired before/after annotations.
   // FIXME: Don't always warn when we have support for reentrant locks.
   if (Mutex.shouldIgnore())
@@ -1498,7 +1554,7 @@
 
   if (FSet.findLock(FactMan, Mutex)) {
     if (!LDat.Asserted)
-      Handler.handleDoubleLock(Mutex.toString(), LDat.AcquireLoc);
+      Handler.handleDoubleLock(DiagKind, Mutex.toString(), LDat.AcquireLoc);
   } else {
     FSet.addLock(FactMan, Mutex, LDat);
   }
@@ -1508,16 +1564,24 @@
 /// \brief Remove a lock from the lockset, warning if the lock is not there.
 /// \param Mutex The lock expression corresponding to the lock to be removed
 /// \param UnlockLoc The source location of the unlock (only used in error msg)
-void ThreadSafetyAnalyzer::removeLock(FactSet &FSet,
-                                      const SExpr &Mutex,
+void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const SExpr &Mutex,
                                       SourceLocation UnlockLoc,
-                                      bool FullyRemove) {
+                                      bool FullyRemove, LockKind ReceivedKind,
+                                      StringRef DiagKind) {
   if (Mutex.shouldIgnore())
     return;
 
   const LockData *LDat = FSet.findLock(FactMan, Mutex);
   if (!LDat) {
-    Handler.handleUnmatchedUnlock(Mutex.toString(), UnlockLoc);
+    Handler.handleUnmatchedUnlock(DiagKind, Mutex.toString(), UnlockLoc);
+    return;
+  }
+
+  // Generic lock removal doesn't care about lock kind mismatches, but
+  // otherwise diagnose when the lock kinds are mismatched.
+  if (ReceivedKind != LK_Generic && LDat->LKind != ReceivedKind) {
+    Handler.handleIncorrectUnlockKind(DiagKind, Mutex.toString(), LDat->LKind,
+                                      ReceivedKind, UnlockLoc);
     return;
   }
 
@@ -1532,8 +1596,8 @@
       // We're releasing the underlying mutex, but not destroying the
       // managing object.  Warn on dual release.
       if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) {
-        Handler.handleUnmatchedUnlock(LDat->UnderlyingMutex.toString(),
-                                      UnlockLoc);
+        Handler.handleUnmatchedUnlock(
+            DiagKind, LDat->UnderlyingMutex.toString(), UnlockLoc);
       }
       FSet.removeLock(FactMan, LDat->UnderlyingMutex);
       return;
@@ -1555,7 +1619,7 @@
     // The mutex held is the "this" object.
     SExpr Mu(0, Exp, D, SelfDecl);
     if (!Mu.isValid())
-      SExpr::warnInvalidLock(Handler, 0, Exp, D);
+      SExpr::warnInvalidLock(Handler, 0, Exp, D, ClassifyDiagnostic(Attr));
     else
       Mtxs.push_back_nodup(Mu);
     return;
@@ -1564,7 +1628,7 @@
   for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
     SExpr Mu(*I, Exp, D, SelfDecl);
     if (!Mu.isValid())
-      SExpr::warnInvalidLock(Handler, *I, Exp, D);
+      SExpr::warnInvalidLock(Handler, *I, Exp, D, ClassifyDiagnostic(Attr));
     else
       Mtxs.push_back_nodup(Mu);
   }
@@ -1697,6 +1761,7 @@
   bool Negate = false;
   const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
   const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
+  StringRef CapDiagKind = "mutex";
 
   CallExpr *Exp =
     const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate));
@@ -1720,6 +1785,7 @@
           cast<ExclusiveTrylockFunctionAttr>(Attr);
         getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
                     PredBlock, CurrBlock, A->getSuccessValue(), Negate);
+        CapDiagKind = ClassifyDiagnostic(A);
         break;
       }
       case attr::SharedTrylockFunction: {
@@ -1727,6 +1793,7 @@
           cast<SharedTrylockFunctionAttr>(Attr);
         getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
                     PredBlock, CurrBlock, A->getSuccessValue(), Negate);
+        CapDiagKind = ClassifyDiagnostic(A);
         break;
       }
       default:
@@ -1736,17 +1803,13 @@
 
   // Add and remove locks.
   SourceLocation Loc = Exp->getExprLoc();
-  for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
-    addLock(Result, ExclusiveLocksToAdd[i],
-            LockData(Loc, LK_Exclusive));
-  }
-  for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
-    addLock(Result, SharedLocksToAdd[i],
-            LockData(Loc, LK_Shared));
-  }
+  for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
+    addLock(Result, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive),
+            CapDiagKind);
+  for (const auto &SharedLockToAdd : SharedLocksToAdd)
+    addLock(Result, SharedLockToAdd, LockData(Loc, LK_Shared), CapDiagKind);
 }
 
-
 /// \brief We use this class to visit different types of expressions in
 /// CFGBlocks, and build up the lockset.
 /// An expression may cause us to add or remove locks from the lockset, or else
@@ -1761,11 +1824,12 @@
   unsigned CtxIndex;
 
   // Helper functions
-  const ValueDecl *getValueDecl(const Expr *Exp);
 
   void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
-                          Expr *MutexExp, ProtectedOperationKind POK);
-  void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp);
+                          Expr *MutexExp, ProtectedOperationKind POK,
+                          StringRef DiagKind);
+  void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
+                       StringRef DiagKind);
 
   void checkAccess(const Expr *Exp, AccessKind AK);
   void checkPtAccess(const Expr *Exp, AccessKind AK);
@@ -1789,31 +1853,17 @@
   void VisitDeclStmt(DeclStmt *S);
 };
 
-
-/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
-const ValueDecl *BuildLockset::getValueDecl(const Expr *Exp) {
-  if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Exp))
-    return getValueDecl(CE->getSubExpr());
-
-  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
-    return DR->getDecl();
-
-  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
-    return ME->getMemberDecl();
-
-  return 0;
-}
-
 /// \brief Warn if the LSet does not contain a lock sufficient to protect access
 /// of at least the passed in AccessKind.
 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
                                       AccessKind AK, Expr *MutexExp,
-                                      ProtectedOperationKind POK) {
+                                      ProtectedOperationKind POK,
+                                      StringRef DiagKind) {
   LockKind LK = getLockKindFromAccessKind(AK);
 
   SExpr Mutex(MutexExp, Exp, D);
   if (!Mutex.isValid()) {
-    SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D);
+    SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind);
     return;
   } else if (Mutex.shouldIgnore()) {
     return;
@@ -1829,40 +1879,38 @@
       LDat = &FEntry->LDat;
       std::string PartMatchStr = FEntry->MutID.toString();
       StringRef   PartMatchName(PartMatchStr);
-      Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK,
-                                           Exp->getExprLoc(), &PartMatchName);
+      Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(),
+                                           LK, Exp->getExprLoc(),
+                                           &PartMatchName);
     } else {
       // Warn that there's no match at all.
-      Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK,
-                                           Exp->getExprLoc());
+      Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(),
+                                           LK, Exp->getExprLoc());
     }
     NoError = false;
   }
   // Make sure the mutex we found is the right kind.
   if (NoError && LDat && !LDat->isAtLeast(LK))
-    Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK,
+    Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(), LK,
                                          Exp->getExprLoc());
 }
 
 /// \brief Warn if the LSet contains the given lock.
-void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr* Exp,
-                                   Expr *MutexExp) {
+void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
+                                   Expr *MutexExp,
+                                   StringRef DiagKind) {
   SExpr Mutex(MutexExp, Exp, D);
   if (!Mutex.isValid()) {
-    SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D);
+    SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind);
     return;
   }
 
   LockData* LDat = FSet.findLock(Analyzer->FactMan, Mutex);
-  if (LDat) {
-    std::string DeclName = D->getNameAsString();
-    StringRef   DeclNameSR (DeclName);
-    Analyzer->Handler.handleFunExcludesLock(DeclNameSR, Mutex.toString(),
-                                            Exp->getExprLoc());
-  }
+  if (LDat)
+    Analyzer->Handler.handleFunExcludesLock(
+        DiagKind, D->getNameAsString(), Mutex.toString(), Exp->getExprLoc());
 }
 
-
 /// \brief Checks guarded_by and pt_guarded_by attributes.
 /// Whenever we identify an access (read or write) to a DeclRefExpr that is
 /// marked with guarded_by, we must ensure the appropriate mutexes are held.
@@ -1879,10 +1927,8 @@
   }
 
   if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
-    if (Analyzer->Handler.issueBetaWarnings()) {
-      checkPtAccess(AE->getLHS(), AK);
-      return;
-    }
+    checkPtAccess(AE->getLHS(), AK);
+    return;
   }
 
   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
@@ -1896,55 +1942,48 @@
   if (!D || !D->hasAttrs())
     return;
 
-  if (D->getAttr<GuardedVarAttr>() && FSet.isEmpty())
-    Analyzer->Handler.handleNoMutexHeld(D, POK_VarAccess, AK,
+  if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty())
+    Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarAccess, AK,
                                         Exp->getExprLoc());
 
-  const AttrVec &ArgAttrs = D->getAttrs();
-  for (unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
-    if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
-      warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
+  for (const auto *I : D->specific_attrs<GuardedByAttr>())
+    warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarAccess,
+                       ClassifyDiagnostic(I));
 }
 
 /// \brief Checks pt_guarded_by and pt_guarded_var attributes.
 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) {
-  if (Analyzer->Handler.issueBetaWarnings()) {
-    while (true) {
-      if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
-        Exp = PE->getSubExpr();
-        continue;
-      }
-      if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
-        if (CE->getCastKind() == CK_ArrayToPointerDecay) {
-          // If it's an actual array, and not a pointer, then it's elements
-          // are protected by GUARDED_BY, not PT_GUARDED_BY;
-          checkAccess(CE->getSubExpr(), AK);
-          return;
-        }
-        Exp = CE->getSubExpr();
-        continue;
-      }
-      break;
+  while (true) {
+    if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
+      Exp = PE->getSubExpr();
+      continue;
     }
+    if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
+      if (CE->getCastKind() == CK_ArrayToPointerDecay) {
+        // If it's an actual array, and not a pointer, then it's elements
+        // are protected by GUARDED_BY, not PT_GUARDED_BY;
+        checkAccess(CE->getSubExpr(), AK);
+        return;
+      }
+      Exp = CE->getSubExpr();
+      continue;
+    }
+    break;
   }
-  else
-    Exp = Exp->IgnoreParenCasts();
 
   const ValueDecl *D = getValueDecl(Exp);
   if (!D || !D->hasAttrs())
     return;
 
-  if (D->getAttr<PtGuardedVarAttr>() && FSet.isEmpty())
-    Analyzer->Handler.handleNoMutexHeld(D, POK_VarDereference, AK,
+  if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty())
+    Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarDereference, AK,
                                         Exp->getExprLoc());
 
-  const AttrVec &ArgAttrs = D->getAttrs();
-  for (unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
-    if (PtGuardedByAttr *GBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
-      warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarDereference);
+  for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
+    warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarDereference,
+                       ClassifyDiagnostic(I));
 }
 
-
 /// \brief Process a function call, method call, constructor call,
 /// or destructor call.  This involves looking at the attributes on the
 /// corresponding function/method/constructor/destructor, issuing warnings,
@@ -1958,26 +1997,22 @@
 void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
   SourceLocation Loc = Exp->getExprLoc();
   const AttrVec &ArgAttrs = D->getAttrs();
-  MutexIDList ExclusiveLocksToAdd;
-  MutexIDList SharedLocksToAdd;
-  MutexIDList LocksToRemove;
+  MutexIDList ExclusiveLocksToAdd, SharedLocksToAdd;
+  MutexIDList ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
+  StringRef CapDiagKind = "mutex";
 
   for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
     Attr *At = const_cast<Attr*>(ArgAttrs[i]);
     switch (At->getKind()) {
-      // When we encounter an exclusive lock function, we need to add the lock
-      // to our lockset with kind exclusive.
-      case attr::ExclusiveLockFunction: {
-        ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(At);
-        Analyzer->getMutexIDs(ExclusiveLocksToAdd, A, Exp, D, VD);
-        break;
-      }
+      // When we encounter a lock function, we need to add the lock to our
+      // lockset.
+      case attr::AcquireCapability: {
+        auto *A = cast<AcquireCapabilityAttr>(At);
+        Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
+                                            : ExclusiveLocksToAdd,
+                              A, Exp, D, VD);
 
-      // When we encounter a shared lock function, we need to add the lock
-      // to our lockset with kind shared.
-      case attr::SharedLockFunction: {
-        SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(At);
-        Analyzer->getMutexIDs(SharedLocksToAdd, A, Exp, D, VD);
+        CapDiagKind = ClassifyDiagnostic(A);
         break;
       }
 
@@ -1989,10 +2024,10 @@
 
         MutexIDList AssertLocks;
         Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
-        for (unsigned i=0,n=AssertLocks.size(); i<n; ++i) {
-          Analyzer->addLock(FSet, AssertLocks[i],
-                            LockData(Loc, LK_Exclusive, false, true));
-        }
+        for (const auto &AssertLock : AssertLocks)
+          Analyzer->addLock(FSet, AssertLock,
+                            LockData(Loc, LK_Exclusive, false, true),
+                            ClassifyDiagnostic(A));
         break;
       }
       case attr::AssertSharedLock: {
@@ -2000,36 +2035,35 @@
 
         MutexIDList AssertLocks;
         Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
-        for (unsigned i=0,n=AssertLocks.size(); i<n; ++i) {
-          Analyzer->addLock(FSet, AssertLocks[i],
-                            LockData(Loc, LK_Shared, false, true));
-        }
+        for (const auto &AssertLock : AssertLocks)
+          Analyzer->addLock(FSet, AssertLock,
+                            LockData(Loc, LK_Shared, false, true),
+                            ClassifyDiagnostic(A));
         break;
       }
 
       // When we encounter an unlock function, we need to remove unlocked
       // mutexes from the lockset, and flag a warning if they are not there.
-      case attr::UnlockFunction: {
-        UnlockFunctionAttr *A = cast<UnlockFunctionAttr>(At);
-        Analyzer->getMutexIDs(LocksToRemove, A, Exp, D, VD);
+      case attr::ReleaseCapability: {
+        auto *A = cast<ReleaseCapabilityAttr>(At);
+        if (A->isGeneric())
+          Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
+        else if (A->isShared())
+          Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD);
+        else
+          Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD);
+
+        CapDiagKind = ClassifyDiagnostic(A);
         break;
       }
 
-      case attr::ExclusiveLocksRequired: {
-        ExclusiveLocksRequiredAttr *A = cast<ExclusiveLocksRequiredAttr>(At);
+      case attr::RequiresCapability: {
+        RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At);
 
-        for (ExclusiveLocksRequiredAttr::args_iterator
-             I = A->args_begin(), E = A->args_end(); I != E; ++I)
-          warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
-        break;
-      }
-
-      case attr::SharedLocksRequired: {
-        SharedLocksRequiredAttr *A = cast<SharedLocksRequiredAttr>(At);
-
-        for (SharedLocksRequiredAttr::args_iterator I = A->args_begin(),
+        for (RequiresCapabilityAttr::args_iterator I = A->args_begin(),
              E = A->args_end(); I != E; ++I)
-          warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
+          warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, *I,
+                             POK_FunctionCall, ClassifyDiagnostic(A));
         break;
       }
 
@@ -2038,12 +2072,12 @@
 
         for (LocksExcludedAttr::args_iterator I = A->args_begin(),
             E = A->args_end(); I != E; ++I) {
-          warnIfMutexHeld(D, Exp, *I);
+          warnIfMutexHeld(D, Exp, *I, ClassifyDiagnostic(A));
         }
         break;
       }
 
-      // Ignore other (non thread-safety) attributes
+      // Ignore attributes unrelated to thread-safety
       default:
         break;
     }
@@ -2054,20 +2088,18 @@
   if (VD) {
     if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) {
       const CXXRecordDecl* PD = CD->getParent();
-      if (PD && PD->getAttr<ScopedLockableAttr>())
+      if (PD && PD->hasAttr<ScopedLockableAttr>())
         isScopedVar = true;
     }
   }
 
   // Add locks.
-  for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
-    Analyzer->addLock(FSet, ExclusiveLocksToAdd[i],
-                            LockData(Loc, LK_Exclusive, isScopedVar));
-  }
-  for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
-    Analyzer->addLock(FSet, SharedLocksToAdd[i],
-                            LockData(Loc, LK_Shared, isScopedVar));
-  }
+  for (const auto &M : ExclusiveLocksToAdd)
+    Analyzer->addLock(FSet, M, LockData(Loc, LK_Exclusive, isScopedVar),
+                      CapDiagKind);
+  for (const auto &M : SharedLocksToAdd)
+    Analyzer->addLock(FSet, M, LockData(Loc, LK_Shared, isScopedVar),
+                      CapDiagKind);
 
   // Add the managing object as a dummy mutex, mapped to the underlying mutex.
   // FIXME -- this doesn't work if we acquire multiple locks.
@@ -2076,22 +2108,23 @@
     DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
     SExpr SMutex(&DRE, 0, 0);
 
-    for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
-      Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive,
-                                               ExclusiveLocksToAdd[i]));
-    }
-    for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
-      Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared,
-                                               SharedLocksToAdd[i]));
-    }
+    for (const auto &M : ExclusiveLocksToAdd)
+      Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive, M),
+                        CapDiagKind);
+    for (const auto &M : SharedLocksToAdd)
+      Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared, M),
+                        CapDiagKind);
   }
 
   // Remove locks.
   // FIXME -- should only fully remove if the attribute refers to 'this'.
   bool Dtor = isa<CXXDestructorDecl>(D);
-  for (unsigned i=0,n=LocksToRemove.size(); i<n; ++i) {
-    Analyzer->removeLock(FSet, LocksToRemove[i], Loc, Dtor);
-  }
+  for (const auto &M : ExclusiveLocksToRemove)
+    Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
+  for (const auto &M : SharedLocksToRemove)
+    Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
+  for (const auto &M : GenericLocksToRemove)
+    Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
 }
 
 
@@ -2168,11 +2201,9 @@
       case OO_Star:
       case OO_Arrow:
       case OO_Subscript: {
-        if (Analyzer->Handler.issueBetaWarnings()) {
-          const Expr *Obj = OE->getArg(0);
-          checkAccess(Obj, AK_Read);
-          checkPtAccess(Obj, AK_Read);
-        }
+        const Expr *Obj = OE->getArg(0);
+        checkAccess(Obj, AK_Read);
+        checkPtAccess(Obj, AK_Read);
         break;
       }
       default: {
@@ -2254,9 +2285,8 @@
     if (I1 != FSet1.end()) {
       const LockData* LDat1 = &FactMan[*I1].LDat;
       if (LDat1->LKind != LDat2.LKind) {
-        Handler.handleExclusiveAndShared(FSet2Mutex.toString(),
-                                         LDat2.AcquireLoc,
-                                         LDat1->AcquireLoc);
+        Handler.handleExclusiveAndShared("mutex", FSet2Mutex.toString(),
+                                         LDat2.AcquireLoc, LDat1->AcquireLoc);
         if (Modify && LDat1->LKind != LK_Exclusive) {
           // Take the exclusive lock, which is the one in FSet2.
           *I1 = *I;
@@ -2272,15 +2302,14 @@
           // If this is a scoped lock that manages another mutex, and if the
           // underlying mutex is still held, then warn about the underlying
           // mutex.
-          Handler.handleMutexHeldEndOfScope(LDat2.UnderlyingMutex.toString(),
-                                            LDat2.AcquireLoc,
-                                            JoinLoc, LEK1);
+          Handler.handleMutexHeldEndOfScope("mutex",
+                                            LDat2.UnderlyingMutex.toString(),
+                                            LDat2.AcquireLoc, JoinLoc, LEK1);
         }
       }
       else if (!LDat2.Managed && !FSet2Mutex.isUniversal() && !LDat2.Asserted)
-        Handler.handleMutexHeldEndOfScope(FSet2Mutex.toString(),
-                                          LDat2.AcquireLoc,
-                                          JoinLoc, LEK1);
+        Handler.handleMutexHeldEndOfScope("mutex", FSet2Mutex.toString(),
+                                          LDat2.AcquireLoc, JoinLoc, LEK1);
     }
   }
 
@@ -2296,15 +2325,14 @@
           // If this is a scoped lock that manages another mutex, and if the
           // underlying mutex is still held, then warn about the underlying
           // mutex.
-          Handler.handleMutexHeldEndOfScope(LDat1.UnderlyingMutex.toString(),
-                                            LDat1.AcquireLoc,
-                                            JoinLoc, LEK1);
+          Handler.handleMutexHeldEndOfScope("mutex",
+                                            LDat1.UnderlyingMutex.toString(),
+                                            LDat1.AcquireLoc, JoinLoc, LEK1);
         }
       }
       else if (!LDat1.Managed && !FSet1Mutex.isUniversal() && !LDat1.Asserted)
-        Handler.handleMutexHeldEndOfScope(FSet1Mutex.toString(),
-                                          LDat1.AcquireLoc,
-                                          JoinLoc, LEK2);
+        Handler.handleMutexHeldEndOfScope("mutex", FSet1Mutex.toString(),
+                                          LDat1.AcquireLoc, JoinLoc, LEK2);
       if (Modify)
         FSet1.removeLock(FactMan, FSet1Mutex);
     }
@@ -2342,7 +2370,7 @@
 
   if (!D)
     return;  // Ignore anonymous functions for now.
-  if (D->getAttr<NoThreadSafetyAnalysisAttr>())
+  if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
     return;
   // FIXME: Do something a bit more intelligent inside constructor and
   // destructor code.  Constructors and destructors must assume unique access
@@ -2385,18 +2413,17 @@
 
     MutexIDList ExclusiveLocksToAdd;
     MutexIDList SharedLocksToAdd;
+    StringRef CapDiagKind = "mutex";
 
     SourceLocation Loc = D->getLocation();
     for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
       Attr *Attr = ArgAttrs[i];
       Loc = Attr->getLocation();
-      if (ExclusiveLocksRequiredAttr *A
-            = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
-        getMutexIDs(ExclusiveLocksToAdd, A, (Expr*) 0, D);
-      } else if (SharedLocksRequiredAttr *A
-                   = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
-        getMutexIDs(SharedLocksToAdd, A, (Expr*) 0, D);
-      } else if (UnlockFunctionAttr *A = dyn_cast<UnlockFunctionAttr>(Attr)) {
+      if (RequiresCapabilityAttr *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
+        getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
+                    0, D);
+        CapDiagKind = ClassifyDiagnostic(A);
+      } else if (auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
         // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
         // We must ignore such methods.
         if (A->args_size() == 0)
@@ -2404,16 +2431,14 @@
         // FIXME -- deal with exclusive vs. shared unlock functions?
         getMutexIDs(ExclusiveLocksToAdd, A, (Expr*) 0, D);
         getMutexIDs(LocksReleased, A, (Expr*) 0, D);
-      } else if (ExclusiveLockFunctionAttr *A
-                   = dyn_cast<ExclusiveLockFunctionAttr>(Attr)) {
+        CapDiagKind = ClassifyDiagnostic(A);
+      } else if (auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
         if (A->args_size() == 0)
           return;
-        getMutexIDs(ExclusiveLocksAcquired, A, (Expr*) 0, D);
-      } else if (SharedLockFunctionAttr *A
-                   = dyn_cast<SharedLockFunctionAttr>(Attr)) {
-        if (A->args_size() == 0)
-          return;
-        getMutexIDs(SharedLocksAcquired, A, (Expr*) 0, D);
+        getMutexIDs(A->isShared() ? SharedLocksAcquired
+                                  : ExclusiveLocksAcquired,
+                    A, nullptr, D);
+        CapDiagKind = ClassifyDiagnostic(A);
       } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
         // Don't try to check trylock functions for now
         return;
@@ -2424,14 +2449,12 @@
     }
 
     // FIXME -- Loc can be wrong here.
-    for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
-      addLock(InitialLockset, ExclusiveLocksToAdd[i],
-              LockData(Loc, LK_Exclusive));
-    }
-    for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
-      addLock(InitialLockset, SharedLocksToAdd[i],
-              LockData(Loc, LK_Shared));
-    }
+    for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
+      addLock(InitialLockset, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive),
+              CapDiagKind);
+    for (const auto &SharedLockToAdd : SharedLocksToAdd)
+      addLock(InitialLockset, SharedLockToAdd, LockData(Loc, LK_Shared),
+              CapDiagKind);
   }
 
   for (PostOrderCFGView::iterator I = SortedGraph->begin(),
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index 332c02c..29c17c3 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -373,9 +373,8 @@
 }
 
 void ClassifyRefs::VisitDeclStmt(DeclStmt *DS) {
-  for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
-       DI != DE; ++DI) {
-    VarDecl *VD = dyn_cast<VarDecl>(*DI);
+  for (auto *DI : DS->decls()) {
+    VarDecl *VD = dyn_cast<VarDecl>(DI);
     if (VD && isTrackedVar(VD))
       if (const DeclRefExpr *DRE = getSelfInitExpr(VD))
         Classification[DRE] = SelfInit;
@@ -535,6 +534,9 @@
       for (CFGBlock::const_pred_iterator I = B->pred_begin(), E = B->pred_end();
            I != E; ++I) {
         const CFGBlock *Pred = *I;
+        if (!Pred)
+          continue;
+        
         Value AtPredExit = vals.getValue(Pred, B, vd);
         if (AtPredExit == Initialized)
           // This block initializes the variable.
@@ -630,12 +632,11 @@
 
 void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
   const BlockDecl *bd = be->getBlockDecl();
-  for (BlockDecl::capture_const_iterator i = bd->capture_begin(),
-        e = bd->capture_end() ; i != e; ++i) {
-    const VarDecl *vd = i->getVariable();
+  for (const auto &I : bd->captures()) {
+    const VarDecl *vd = I.getVariable();
     if (!isTrackedVar(vd))
       continue;
-    if (i->isByRef()) {
+    if (I.isByRef()) {
       vals[vd] = Initialized;
       continue;
     }
@@ -691,9 +692,8 @@
 }
 
 void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
-  for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
-       DI != DE; ++DI) {
-    VarDecl *VD = dyn_cast<VarDecl>(*DI);
+  for (auto *DI : DS->decls()) {
+    VarDecl *VD = dyn_cast<VarDecl>(DI);
     if (VD && isTrackedVar(VD)) {
       if (getSelfInitExpr(VD)) {
         // If the initializer consists solely of a reference to itself, we
@@ -751,6 +751,8 @@
   for (CFGBlock::const_pred_iterator I = block->pred_begin(),
        E = block->pred_end(); I != E; ++I) {
     const CFGBlock *pred = *I;
+    if (!pred)
+      continue;
     if (wasAnalyzed[pred->getBlockID()]) {
       vals.mergeIntoScratch(vals.getValueVector(pred), isFirst);
       isFirst = false;
@@ -787,8 +789,8 @@
   /// The current block to scribble use information.
   unsigned currentBlock;
 
-  virtual void handleUseOfUninitVariable(const VarDecl *vd,
-                                         const UninitUse &use) {
+  void handleUseOfUninitVariable(const VarDecl *vd,
+                                 const UninitUse &use) override {
     hadUse[currentBlock] = true;
     hadAnyUse = true;
   }
@@ -796,7 +798,7 @@
   /// Called when the uninitialized variable analysis detects the
   /// idiom 'int x = x'.  All other uses of 'x' within the initializer
   /// are handled by handleUseOfUninitVariable.
-  virtual void handleSelfInit(const VarDecl *vd) {
+  void handleSelfInit(const VarDecl *vd) override {
     hadUse[currentBlock] = true;
     hadAnyUse = true;
   }
diff --git a/lib/Basic/Android.mk b/lib/Basic/Android.mk
index 2407a00..54e61c2 100644
--- a/lib/Basic/Android.mk
+++ b/lib/Basic/Android.mk
@@ -6,6 +6,7 @@
 include $(CLEAR_TBLGEN_VARS)
 
 TBLGEN_TABLES := \
+  AttrHasAttributeImpl.inc \
   DiagnosticASTKinds.inc \
   DiagnosticAnalysisKinds.inc \
   DiagnosticCommentKinds.inc \
@@ -21,6 +22,7 @@
   arm_neon.inc
 
 clang_basic_SRC_FILES := \
+  Attributes.cpp \
   Builtins.cpp \
   CharInfo.cpp \
   Diagnostic.cpp \
@@ -39,7 +41,8 @@
   Targets.cpp \
   TokenKinds.cpp \
   Version.cpp \
-  VersionTuple.cpp
+  VersionTuple.cpp \
+  VirtualFileSystem.cpp
 
 LOCAL_SRC_FILES := $(clang_basic_SRC_FILES)
 
diff --git a/lib/Basic/Attributes.cpp b/lib/Basic/Attributes.cpp
new file mode 100644
index 0000000..a05ad05
--- /dev/null
+++ b/lib/Basic/Attributes.cpp
@@ -0,0 +1,17 @@
+#include "clang/Basic/Attributes.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+bool clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
+                         const IdentifierInfo *Attr, const llvm::Triple &T,
+                         const LangOptions &LangOpts) {
+  StringRef Name = Attr->getName();
+  // Normalize the attribute name, __foo__ becomes foo.
+  if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
+    Name = Name.substr(2, Name.size() - 4);
+
+#include "clang/Basic/AttrHasAttributeImpl.inc"
+
+  return false;
+}
diff --git a/lib/Basic/Builtins.cpp b/lib/Basic/Builtins.cpp
index c84dd6d..2fd00dd0 100644
--- a/lib/Basic/Builtins.cpp
+++ b/lib/Basic/Builtins.cpp
@@ -97,40 +97,35 @@
   Table.get(GetRecord(ID).Name).setBuiltinID(0);
 }
 
-bool
-Builtin::Context::isPrintfLike(unsigned ID, unsigned &FormatIdx,
-                               bool &HasVAListArg) {
-  const char *Printf = strpbrk(GetRecord(ID).Attributes, "pP");
-  if (!Printf)
+bool Builtin::Context::isLike(unsigned ID, unsigned &FormatIdx,
+                              bool &HasVAListArg, const char *Fmt) const {
+  assert(Fmt && "Not passed a format string");
+  assert(::strlen(Fmt) == 2 &&
+         "Format string needs to be two characters long");
+  assert(::toupper(Fmt[0]) == Fmt[1] &&
+         "Format string is not in the form \"xX\"");
+
+  const char *Like = ::strpbrk(GetRecord(ID).Attributes, Fmt);
+  if (!Like)
     return false;
 
-  HasVAListArg = (*Printf == 'P');
+  HasVAListArg = (*Like == Fmt[1]);
 
-  ++Printf;
-  assert(*Printf == ':' && "p or P specifier must have be followed by a ':'");
-  ++Printf;
+  ++Like;
+  assert(*Like == ':' && "Format specifier must be followed by a ':'");
+  ++Like;
 
-  assert(strchr(Printf, ':') && "printf specifier must end with a ':'");
-  FormatIdx = strtol(Printf, 0, 10);
+  assert(::strchr(Like, ':') && "Format specifier must end with a ':'");
+  FormatIdx = ::strtol(Like, 0, 10);
   return true;
 }
 
-// FIXME: Refactor with isPrintfLike.
-bool
-Builtin::Context::isScanfLike(unsigned ID, unsigned &FormatIdx,
-                              bool &HasVAListArg) {
-  const char *Scanf = strpbrk(GetRecord(ID).Attributes, "sS");
-  if (!Scanf)
-    return false;
-
-  HasVAListArg = (*Scanf == 'S');
-
-  ++Scanf;
-  assert(*Scanf == ':' && "s or S specifier must have be followed by a ':'");
-  ++Scanf;
-
-  assert(strchr(Scanf, ':') && "printf specifier must end with a ':'");
-  FormatIdx = strtol(Scanf, 0, 10);
-  return true;
+bool Builtin::Context::isPrintfLike(unsigned ID, unsigned &FormatIdx,
+                                    bool &HasVAListArg) {
+  return isLike(ID, FormatIdx, HasVAListArg, "pP");
 }
 
+bool Builtin::Context::isScanfLike(unsigned ID, unsigned &FormatIdx,
+                                   bool &HasVAListArg) {
+  return isLike(ID, FormatIdx, HasVAListArg, "sS");
+}
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
index 3411169..0448fdb 100644
--- a/lib/Basic/CMakeLists.txt
+++ b/lib/Basic/CMakeLists.txt
@@ -1,6 +1,10 @@
-set(LLVM_LINK_COMPONENTS mc)
+set(LLVM_LINK_COMPONENTS
+  MC
+  Support
+  )
 
 add_clang_library(clangBasic
+  Attributes.cpp
   Builtins.cpp
   CharInfo.cpp
   Diagnostic.cpp
@@ -20,6 +24,7 @@
   TokenKinds.cpp
   Version.cpp
   VersionTuple.cpp
+  VirtualFileSystem.cpp
   )
 
 # Determine Subversion revision.
@@ -29,17 +34,15 @@
   find_package(Subversion)
 endif()
 if (Subversion_FOUND AND EXISTS "${CLANG_SOURCE_DIR}/.svn")
-  # Create custom target to generate the Subversion version include.
-  add_custom_target(clang_revision_tag  ALL
-    COMMAND ${CMAKE_COMMAND} -DFIRST_SOURCE_DIR=${LLVM_MAIN_SRC_DIR}
-                             -DFIRST_REPOSITORY=LLVM_REPOSITORY
-                             -DSECOND_SOURCE_DIR=${CLANG_SOURCE_DIR}
-                             -DSECOND_REPOSITORY=SVN_REPOSITORY
-                             -DHEADER_FILE=${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc
-   -P ${LLVM_MAIN_SRC_DIR}/cmake/modules/GetSVN.cmake)
+  set(FIRST_SOURCE_DIR ${LLVM_MAIN_SRC_DIR})
+  set(FIRST_REPOSITORY LLVM_REPOSITORY)
+  set(SECOND_SOURCE_DIR ${CLANG_SOURCE_DIR})
+  set(SECOND_REPOSITORY SVN_REPOSITORY)
+  set(HEADER_FILE ${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc)
+  include(GetSVN)
 
   # Mark the generated header as being generated.
-message(STATUS "Expecting header to go in ${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc")
+  message(STATUS "Expecting header to go in ${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc")
   set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc
     PROPERTIES GENERATED TRUE
                HEADER_FILE_ONLY TRUE)
@@ -49,25 +52,3 @@
     PROPERTIES COMPILE_DEFINITIONS "HAVE_SVN_VERSION_INC")
 
 endif()
-
-add_dependencies(clangBasic
-  ClangARMNeon
-  ClangAttrList
-  ClangDiagnosticAnalysis
-  ClangDiagnosticAST
-  ClangDiagnosticComment
-  ClangDiagnosticCommon
-  ClangDiagnosticDriver
-  ClangDiagnosticFrontend
-  ClangDiagnosticGroups
-  ClangDiagnosticIndexName
-  ClangDiagnosticLex
-  ClangDiagnosticParse
-  ClangDiagnosticSema
-  ClangDiagnosticSerialization
-  )
-
-# clangBasic depends on the version.
-if (Subversion_FOUND AND EXISTS "${CLANG_SOURCE_DIR}/.svn")
-  add_dependencies(clangBasic clang_revision_tag)
-endif()
\ No newline at end of file
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 45d4b53..f507548 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -222,7 +222,7 @@
 
   // Create a new state/point and fit it into the vector of DiagStatePoints
   // so that the vector is always ordered according to location.
-  Pos->Loc.isBeforeInTranslationUnitThan(Loc);
+  assert(Pos->Loc.isBeforeInTranslationUnitThan(Loc));
   DiagStates.push_back(*Pos->State);
   DiagState *NewState = &DiagStates.back();
   GetCurDiagState()->setMappingInfo(Diag, MappingInfo);
@@ -245,24 +245,6 @@
   return false;
 }
 
-void DiagnosticsEngine::setDiagnosticWarningAsError(diag::kind Diag,
-                                                    bool Enabled) {
-  // If we are enabling this feature, just set the diagnostic mappings to map to
-  // errors.
-  if (Enabled) 
-    setDiagnosticMapping(Diag, diag::MAP_ERROR, SourceLocation());
-
-  // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
-  // potentially downgrade anything already mapped to be a warning.
-  DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(Diag);
-
-  if (Info.getMapping() == diag::MAP_ERROR ||
-      Info.getMapping() == diag::MAP_FATAL)
-    Info.setMapping(diag::MAP_WARNING);
-
-  Info.setNoWarningAsError(true);
-}
-
 bool DiagnosticsEngine::setDiagnosticGroupWarningAsError(StringRef Group,
                                                          bool Enabled) {
   // If we are enabling this feature, just set the diagnostic mappings to map to
@@ -293,23 +275,6 @@
   return false;
 }
 
-void DiagnosticsEngine::setDiagnosticErrorAsFatal(diag::kind Diag,
-                                                  bool Enabled) {
-  // If we are enabling this feature, just set the diagnostic mappings to map to
-  // errors.
-  if (Enabled)
-    setDiagnosticMapping(Diag, diag::MAP_FATAL, SourceLocation());
-  
-  // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
-  // potentially downgrade anything already mapped to be a warning.
-  DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(Diag);
-  
-  if (Info.getMapping() == diag::MAP_FATAL)
-    Info.setMapping(diag::MAP_ERROR);
-  
-  Info.setNoErrorAsFatal(true);
-}
-
 bool DiagnosticsEngine::setDiagnosticGroupErrorAsFatal(StringRef Group,
                                                        bool Enabled) {
   // If we are enabling this feature, just set the diagnostic mappings to map to
@@ -639,6 +604,17 @@
   }
 }
 
+/// \brief Returns the friendly description for a token kind that will appear
+/// without quotes in diagnostic messages. These strings may be translatable in
+/// future.
+static const char *getTokenDescForDiagnostic(tok::TokenKind Kind) {
+  switch (Kind) {
+  case tok::identifier:
+    return "identifier";
+  default:
+    return 0;
+  }
+}
 
 /// FormatDiagnostic - Format this diagnostic into a string, substituting the
 /// formal arguments into the %0 slots.  The result is appended onto the Str
@@ -812,6 +788,28 @@
       }
       break;
     }
+    // ---- TOKEN SPELLINGS ----
+    case DiagnosticsEngine::ak_tokenkind: {
+      tok::TokenKind Kind = static_cast<tok::TokenKind>(getRawArg(ArgNo));
+      assert(ModifierLen == 0 && "No modifiers for token kinds yet");
+
+      llvm::raw_svector_ostream Out(OutStr);
+      if (const char *S = tok::getPunctuatorSpelling(Kind))
+        // Quoted token spelling for punctuators.
+        Out << '\'' << S << '\'';
+      else if (const char *S = tok::getKeywordSpelling(Kind))
+        // Unquoted token spelling for keywords.
+        Out << S;
+      else if (const char *S = getTokenDescForDiagnostic(Kind))
+        // Unquoted translatable token name.
+        Out << S;
+      else if (const char *S = tok::getTokenName(Kind))
+        // Debug name, shouldn't appear in user-facing diagnostics.
+        Out << '<' << S << '>';
+      else
+        Out << "(null)";
+      break;
+    }
     // ---- NAMES and TYPES ----
     case DiagnosticsEngine::ak_identifierinfo: {
       const IdentifierInfo *II = getArgIdentifier(ArgNo);
@@ -832,6 +830,7 @@
     case DiagnosticsEngine::ak_nameddecl:
     case DiagnosticsEngine::ak_nestednamespec:
     case DiagnosticsEngine::ak_declcontext:
+    case DiagnosticsEngine::ak_attr:
       getDiags()->ConvertArgToString(Kind, getRawArg(ArgNo),
                                      Modifier, ModifierLen,
                                      Argument, ArgumentLen,
diff --git a/lib/Basic/DiagnosticIDs.cpp b/lib/Basic/DiagnosticIDs.cpp
index 9d99fbe..cf6933a 100644
--- a/lib/Basic/DiagnosticIDs.cpp
+++ b/lib/Basic/DiagnosticIDs.cpp
@@ -15,8 +15,8 @@
 #include "clang/Basic/AllDiagnostics.h"
 #include "clang/Basic/DiagnosticCategories.h"
 #include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/ErrorHandling.h"
 #include <map>
 using namespace clang;
@@ -30,9 +30,10 @@
 // Diagnostic classes.
 enum {
   CLASS_NOTE       = 0x01,
-  CLASS_WARNING    = 0x02,
-  CLASS_EXTENSION  = 0x03,
-  CLASS_ERROR      = 0x04
+  CLASS_REMARK     = 0x02,
+  CLASS_WARNING    = 0x03,
+  CLASS_EXTENSION  = 0x04,
+  CLASS_ERROR      = 0x05
 };
 
 struct StaticDiagInfoRec {
@@ -111,7 +112,7 @@
     return 0;
 
   // Compute the index of the requested diagnostic in the static table.
-  // 1. Add the number of diagnostics in each category preceeding the
+  // 1. Add the number of diagnostics in each category preceding the
   //    diagnostic and of the category the diagnostic is in. This gives us
   //    the offset of the category in the table.
   // 2. Subtract the number of IDs in each category from our ID. This gives us
@@ -311,10 +312,13 @@
 /// getCustomDiagID - Return an ID for a diagnostic with the specified message
 /// and level.  If this is the first request for this diagnostic, it is
 /// registered and created, otherwise the existing ID is returned.
-unsigned DiagnosticIDs::getCustomDiagID(Level L, StringRef Message) {
+///
+/// \param FormatString A fixed diagnostic format string that will be hashed and
+/// mapped to a unique DiagID.
+unsigned DiagnosticIDs::getCustomDiagID(Level L, StringRef FormatString) {
   if (CustomDiagInfo == 0)
     CustomDiagInfo = new diag::CustomDiagInfo();
-  return CustomDiagInfo->getOrCreateDiagID(L, Message, *this);
+  return CustomDiagInfo->getOrCreateDiagID(L, FormatString, *this);
 }
 
 
@@ -406,6 +410,9 @@
   case diag::MAP_IGNORE:
     Result = DiagnosticIDs::Ignored;
     break;
+  case diag::MAP_REMARK:
+    Result = DiagnosticIDs::Remark;
+    break;
   case diag::MAP_WARNING:
     Result = DiagnosticIDs::Warning;
     break;
@@ -422,6 +429,11 @@
       !MappingInfo.isUser())
     Result = DiagnosticIDs::Warning;
 
+  // Diagnostics of class REMARK are either printed as remarks or in case they
+  // have been added to -Werror they are printed as errors.
+  if (DiagClass == CLASS_REMARK && Result == DiagnosticIDs::Warning)
+    Result = DiagnosticIDs::Remark;
+
   // Ignore -pedantic diagnostics inside __extension__ blocks.
   // (The diagnostics controlled by -pedantic are the extension diagnostics
   // that are not enabled by default.)
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index af9b266..579c818 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -30,19 +30,6 @@
 #include <set>
 #include <string>
 
-// FIXME: This is terrible, we need this for ::close.
-#if !defined(_MSC_VER) && !defined(__MINGW32__)
-#include <unistd.h>
-#include <sys/uio.h>
-#else
-#include <io.h>
-#ifndef S_ISFIFO
-#define S_ISFIFO(x) (0)
-#endif
-#endif
-#if defined(LLVM_ON_UNIX)
-#include <limits.h>
-#endif
 using namespace clang;
 
 // FIXME: Enhance libsystem to support inode and other fields.
@@ -56,63 +43,24 @@
 /// represent a filename that doesn't exist on the disk.
 #define NON_EXISTENT_FILE reinterpret_cast<FileEntry*>((intptr_t)-1)
 
-
-FileEntry::~FileEntry() {
-  // If this FileEntry owns an open file descriptor that never got used, close
-  // it.
-  if (FD != -1) ::close(FD);
-}
-
-class FileManager::UniqueDirContainer {
-  /// UniqueDirs - Cache from ID's to existing directories/files.
-  std::map<llvm::sys::fs::UniqueID, DirectoryEntry> UniqueDirs;
-
-public:
-  /// getDirectory - Return an existing DirectoryEntry with the given
-  /// ID's if there is already one; otherwise create and return a
-  /// default-constructed DirectoryEntry.
-  DirectoryEntry &getDirectory(const llvm::sys::fs::UniqueID &UniqueID) {
-    return UniqueDirs[UniqueID];
-  }
-
-  size_t size() const { return UniqueDirs.size(); }
-};
-
-class FileManager::UniqueFileContainer {
-  /// UniqueFiles - Cache from ID's to existing directories/files.
-  std::set<FileEntry> UniqueFiles;
-
-public:
-  /// getFile - Return an existing FileEntry with the given ID's if
-  /// there is already one; otherwise create and return a
-  /// default-constructed FileEntry.
-  FileEntry &getFile(llvm::sys::fs::UniqueID UniqueID, bool IsNamedPipe,
-                     bool InPCH) {
-    return const_cast<FileEntry &>(
-        *UniqueFiles.insert(FileEntry(UniqueID, IsNamedPipe, InPCH)).first);
-  }
-
-  size_t size() const { return UniqueFiles.size(); }
-
-  void erase(const FileEntry *Entry) { UniqueFiles.erase(*Entry); }
-};
-
 //===----------------------------------------------------------------------===//
 // Common logic.
 //===----------------------------------------------------------------------===//
 
-FileManager::FileManager(const FileSystemOptions &FSO)
-  : FileSystemOpts(FSO),
-    UniqueRealDirs(*new UniqueDirContainer()),
-    UniqueRealFiles(*new UniqueFileContainer()),
+FileManager::FileManager(const FileSystemOptions &FSO,
+                         IntrusiveRefCntPtr<vfs::FileSystem> FS)
+  : FS(FS), FileSystemOpts(FSO),
     SeenDirEntries(64), SeenFileEntries(64), NextFileUID(0) {
   NumDirLookups = NumFileLookups = 0;
   NumDirCacheMisses = NumFileCacheMisses = 0;
+
+  // If the caller doesn't provide a virtual file system, just grab the real
+  // file system.
+  if (!FS)
+    this->FS = vfs::getRealFileSystem();
 }
 
 FileManager::~FileManager() {
-  delete &UniqueRealDirs;
-  delete &UniqueRealFiles;
   for (unsigned i = 0, e = VirtualFileEntries.size(); i != e; ++i)
     delete VirtualFileEntries[i];
   for (unsigned i = 0, e = VirtualDirectoryEntries.size(); i != e; ++i)
@@ -123,7 +71,7 @@
                                bool AtBeginning) {
   assert(statCache && "No stat cache provided?");
   if (AtBeginning || StatCache.get() == 0) {
-    statCache->setNextStatCache(StatCache.take());
+    statCache->setNextStatCache(StatCache.release());
     StatCache.reset(statCache);
     return;
   }
@@ -256,8 +204,7 @@
   // same inode (this occurs on Unix-like systems when one dir is
   // symlinked to another, for example) or the same path (on
   // Windows).
-  DirectoryEntry &UDE =
-      UniqueRealDirs.getDirectory(Data.UniqueID);
+  DirectoryEntry &UDE = UniqueRealDirs[Data.UniqueID];
 
   NamedDirEnt.setValue(&UDE);
   if (!UDE.getName()) {
@@ -309,10 +256,9 @@
   // FIXME: This will reduce the # syscalls.
 
   // Nope, there isn't.  Check to see if the file exists.
-  int FileDescriptor = -1;
+  vfs::File *F = 0;
   FileData Data;
-  if (getStatValue(InterndFileName, Data, true,
-                   openFile ? &FileDescriptor : 0)) {
+  if (getStatValue(InterndFileName, Data, true, openFile ? &F : 0)) {
     // There's no real file at the given path.
     if (!CacheFailure)
       SeenFileEntries.erase(Filename);
@@ -320,34 +266,32 @@
     return 0;
   }
 
-  if (FileDescriptor != -1 && !openFile) {
-    close(FileDescriptor);
-    FileDescriptor = -1;
-  }
+  assert((openFile || !F) && "undesired open file");
 
   // It exists.  See if we have already opened a file with the same inode.
   // This occurs when one dir is symlinked to another, for example.
-  FileEntry &UFE =
-      UniqueRealFiles.getFile(Data.UniqueID, Data.IsNamedPipe, Data.InPCH);
+  FileEntry &UFE = UniqueRealFiles[Data.UniqueID];
 
   NamedFileEnt.setValue(&UFE);
-  if (UFE.getName()) { // Already have an entry with this inode, return it.
+  if (UFE.isValid()) { // Already have an entry with this inode, return it.
     // If the stat process opened the file, close it to avoid a FD leak.
-    if (FileDescriptor != -1)
-      close(FileDescriptor);
+    if (F)
+      delete F;
 
     return &UFE;
   }
 
-  // Otherwise, we don't have this directory yet, add it.
-  // FIXME: Change the name to be a char* that points back to the
-  // 'SeenFileEntries' key.
-  UFE.Name    = InterndFileName;
+  // Otherwise, we don't have this file yet, add it.
+  UFE.Name    = Data.Name;
   UFE.Size = Data.Size;
   UFE.ModTime = Data.ModTime;
   UFE.Dir     = DirInfo;
   UFE.UID     = NextFileUID++;
-  UFE.FD      = FileDescriptor;
+  UFE.UniqueID = Data.UniqueID;
+  UFE.IsNamedPipe = Data.IsNamedPipe;
+  UFE.InPCH = Data.InPCH;
+  UFE.File.reset(F);
+  UFE.IsValid = true;
   return &UFE;
 }
 
@@ -386,21 +330,23 @@
   if (getStatValue(InterndFileName, Data, true, 0) == 0) {
     Data.Size = Size;
     Data.ModTime = ModificationTime;
-    UFE = &UniqueRealFiles.getFile(Data.UniqueID, Data.IsNamedPipe, Data.InPCH);
+    UFE = &UniqueRealFiles[Data.UniqueID];
 
     NamedFileEnt.setValue(UFE);
 
     // If we had already opened this file, close it now so we don't
     // leak the descriptor. We're not going to use the file
     // descriptor anyway, since this is a virtual file.
-    if (UFE->FD != -1) {
-      close(UFE->FD);
-      UFE->FD = -1;
-    }
+    if (UFE->File)
+      UFE->closeFile();
 
     // If we already have an entry with this inode, return it.
-    if (UFE->getName())
+    if (UFE->isValid())
       return UFE;
+
+    UFE->UniqueID = Data.UniqueID;
+    UFE->IsNamedPipe = Data.IsNamedPipe;
+    UFE->InPCH = Data.InPCH;
   }
 
   if (!UFE) {
@@ -414,7 +360,7 @@
   UFE->ModTime = ModificationTime;
   UFE->Dir     = DirInfo;
   UFE->UID     = NextFileUID++;
-  UFE->FD      = -1;
+  UFE->File.reset();
   return UFE;
 }
 
@@ -433,7 +379,7 @@
 llvm::MemoryBuffer *FileManager::
 getBufferForFile(const FileEntry *Entry, std::string *ErrorStr,
                  bool isVolatile) {
-  OwningPtr<llvm::MemoryBuffer> Result;
+  std::unique_ptr<llvm::MemoryBuffer> Result;
   llvm::error_code ec;
 
   uint64_t FileSize = Entry->getSize();
@@ -444,50 +390,48 @@
 
   const char *Filename = Entry->getName();
   // If the file is already open, use the open file descriptor.
-  if (Entry->FD != -1) {
-    ec = llvm::MemoryBuffer::getOpenFile(Entry->FD, Filename, Result, FileSize);
+  if (Entry->File) {
+    ec = Entry->File->getBuffer(Filename, Result, FileSize);
     if (ErrorStr)
       *ErrorStr = ec.message();
-
-    close(Entry->FD);
-    Entry->FD = -1;
-    return Result.take();
+    Entry->closeFile();
+    return Result.release();
   }
 
   // Otherwise, open the file.
 
   if (FileSystemOpts.WorkingDir.empty()) {
-    ec = llvm::MemoryBuffer::getFile(Filename, Result, FileSize);
+    ec = FS->getBufferForFile(Filename, Result, FileSize);
     if (ec && ErrorStr)
       *ErrorStr = ec.message();
-    return Result.take();
+    return Result.release();
   }
 
   SmallString<128> FilePath(Entry->getName());
   FixupRelativePath(FilePath);
-  ec = llvm::MemoryBuffer::getFile(FilePath.str(), Result, FileSize);
+  ec = FS->getBufferForFile(FilePath.str(), Result, FileSize);
   if (ec && ErrorStr)
     *ErrorStr = ec.message();
-  return Result.take();
+  return Result.release();
 }
 
 llvm::MemoryBuffer *FileManager::
 getBufferForFile(StringRef Filename, std::string *ErrorStr) {
-  OwningPtr<llvm::MemoryBuffer> Result;
+  std::unique_ptr<llvm::MemoryBuffer> Result;
   llvm::error_code ec;
   if (FileSystemOpts.WorkingDir.empty()) {
-    ec = llvm::MemoryBuffer::getFile(Filename, Result);
+    ec = FS->getBufferForFile(Filename, Result);
     if (ec && ErrorStr)
       *ErrorStr = ec.message();
-    return Result.take();
+    return Result.release();
   }
 
   SmallString<128> FilePath(Filename);
   FixupRelativePath(FilePath);
-  ec = llvm::MemoryBuffer::getFile(FilePath.c_str(), Result);
+  ec = FS->getBufferForFile(FilePath.c_str(), Result);
   if (ec && ErrorStr)
     *ErrorStr = ec.message();
-  return Result.take();
+  return Result.release();
 }
 
 /// getStatValue - Get the 'stat' information for the specified path,
@@ -496,26 +440,29 @@
 /// false if it's an existent real file.  If FileDescriptor is NULL,
 /// do directory look-up instead of file look-up.
 bool FileManager::getStatValue(const char *Path, FileData &Data, bool isFile,
-                               int *FileDescriptor) {
+                               vfs::File **F) {
   // FIXME: FileSystemOpts shouldn't be passed in here, all paths should be
   // absolute!
   if (FileSystemOpts.WorkingDir.empty())
-    return FileSystemStatCache::get(Path, Data, isFile, FileDescriptor,
-                                    StatCache.get());
+    return FileSystemStatCache::get(Path, Data, isFile, F,StatCache.get(), *FS);
 
   SmallString<128> FilePath(Path);
   FixupRelativePath(FilePath);
 
-  return FileSystemStatCache::get(FilePath.c_str(), Data, isFile,
-                                  FileDescriptor, StatCache.get());
+  return FileSystemStatCache::get(FilePath.c_str(), Data, isFile, F,
+                                  StatCache.get(), *FS);
 }
 
 bool FileManager::getNoncachedStatValue(StringRef Path,
-                                        llvm::sys::fs::file_status &Result) {
+                                        vfs::Status &Result) {
   SmallString<128> FilePath(Path);
   FixupRelativePath(FilePath);
 
-  return llvm::sys::fs::status(FilePath.c_str(), Result);
+  llvm::ErrorOr<vfs::Status> S = FS->status(FilePath.c_str());
+  if (!S)
+    return true;
+  Result = *S;
+  return false;
 }
 
 void FileManager::invalidateCache(const FileEntry *Entry) {
@@ -526,7 +473,7 @@
   // FileEntry invalidation should not block future optimizations in the file
   // caches. Possible alternatives are cache truncation (invalidate last N) or
   // invalidation of the whole cache.
-  UniqueRealFiles.erase(Entry);
+  UniqueRealFiles.erase(Entry->getUniqueID());
 }
 
 
diff --git a/lib/Basic/FileSystemStatCache.cpp b/lib/Basic/FileSystemStatCache.cpp
index 7a01bff..9d87999 100644
--- a/lib/Basic/FileSystemStatCache.cpp
+++ b/lib/Basic/FileSystemStatCache.cpp
@@ -12,7 +12,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Basic/FileSystemStatCache.h"
-#include "llvm/Support/FileSystem.h"
+#include "clang/Basic/VirtualFileSystem.h"
 #include "llvm/Support/Path.h"
 
 // FIXME: This is terrible, we need this for ::close.
@@ -30,13 +30,14 @@
 
 void FileSystemStatCache::anchor() { }
 
-static void copyStatusToFileData(const llvm::sys::fs::file_status &Status,
+static void copyStatusToFileData(const vfs::Status &Status,
                                  FileData &Data) {
+  Data.Name = Status.getName();
   Data.Size = Status.getSize();
   Data.ModTime = Status.getLastModificationTime().toEpochTime();
   Data.UniqueID = Status.getUniqueID();
-  Data.IsDirectory = is_directory(Status);
-  Data.IsNamedPipe = Status.type() == llvm::sys::fs::file_type::fifo_file;
+  Data.IsDirectory = Status.isDirectory();
+  Data.IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
   Data.InPCH = false;
 }
 
@@ -50,22 +51,23 @@
 /// implementation can optionally fill in FileDescriptor with a valid
 /// descriptor and the client guarantees that it will close it.
 bool FileSystemStatCache::get(const char *Path, FileData &Data, bool isFile,
-                              int *FileDescriptor, FileSystemStatCache *Cache) {
+                              vfs::File **F, FileSystemStatCache *Cache,
+                              vfs::FileSystem &FS) {
   LookupResult R;
   bool isForDir = !isFile;
 
   // If we have a cache, use it to resolve the stat query.
   if (Cache)
-    R = Cache->getStat(Path, Data, isFile, FileDescriptor);
-  else if (isForDir || !FileDescriptor) {
+    R = Cache->getStat(Path, Data, isFile, F, FS);
+  else if (isForDir || !F) {
     // If this is a directory or a file descriptor is not needed and we have
     // no cache, just go to the file system.
-    llvm::sys::fs::file_status Status;
-    if (llvm::sys::fs::status(Path, Status)) {
+    llvm::ErrorOr<vfs::Status> Status = FS.status(Path);
+    if (!Status) {
       R = CacheMissing;
     } else {
       R = CacheExists;
-      copyStatusToFileData(Status, Data);
+      copyStatusToFileData(*Status, Data);
     }
   } else {
     // Otherwise, we have to go to the filesystem.  We can always just use
@@ -75,7 +77,8 @@
     //
     // Because of this, check to see if the file exists with 'open'.  If the
     // open succeeds, use fstat to get the stat info.
-    llvm::error_code EC = llvm::sys::fs::openFileForRead(Path, *FileDescriptor);
+    std::unique_ptr<vfs::File> OwnedFile;
+    llvm::error_code EC = FS.openFileForRead(Path, OwnedFile);
 
     if (EC) {
       // If the open fails, our "stat" fails.
@@ -84,16 +87,16 @@
       // Otherwise, the open succeeded.  Do an fstat to get the information
       // about the file.  We'll end up returning the open file descriptor to the
       // client to do what they please with it.
-      llvm::sys::fs::file_status Status;
-      if (!llvm::sys::fs::status(*FileDescriptor, Status)) {
+      llvm::ErrorOr<vfs::Status> Status = OwnedFile->status();
+      if (Status) {
         R = CacheExists;
-        copyStatusToFileData(Status, Data);
+        copyStatusToFileData(*Status, Data);
+        *F = OwnedFile.release();
       } else {
         // fstat rarely fails.  If it does, claim the initial open didn't
         // succeed.
         R = CacheMissing;
-        ::close(*FileDescriptor);
-        *FileDescriptor = -1;
+        *F = 0;
       }
     }
   }
@@ -105,9 +108,9 @@
   // demands.
   if (Data.IsDirectory != isForDir) {
     // If not, close the file if opened.
-    if (FileDescriptor && *FileDescriptor != -1) {
-      ::close(*FileDescriptor);
-      *FileDescriptor = -1;
+    if (F && *F) {
+      (*F)->close();
+      *F = 0;
     }
     
     return true;
@@ -118,8 +121,8 @@
 
 MemorizeStatCalls::LookupResult
 MemorizeStatCalls::getStat(const char *Path, FileData &Data, bool isFile,
-                           int *FileDescriptor) {
-  LookupResult Result = statChained(Path, Data, isFile, FileDescriptor);
+                           vfs::File **F, vfs::FileSystem &FS) {
+  LookupResult Result = statChained(Path, Data, isFile, F, FS);
 
   // Do not cache failed stats, it is easy to construct common inconsistent
   // situations if we do, and they are not important for PCH performance (which
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index 500e732..3b8aeec 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -13,8 +13,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Basic/IdentifierTable.h"
-#include "clang/Basic/LangOptions.h"
 #include "clang/Basic/CharInfo.h"
+#include "clang/Basic/LangOptions.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/FoldingSet.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -60,7 +60,7 @@
   class EmptyLookupIterator : public IdentifierIterator
   {
   public:
-    virtual StringRef Next() { return StringRef(); }
+    StringRef Next() override { return StringRef(); }
   };
 }
 
@@ -105,6 +105,7 @@
     KEYARC = 0x800,
     KEYNOMS = 0x01000,
     WCHARSUPPORT = 0x02000,
+    HALFSUPPORT = 0x04000,
     KEYALL = (0xffff & ~KEYNOMS) // Because KEYNOMS is used to exclude.
   };
 }
@@ -129,6 +130,7 @@
   else if (LangOpts.MicrosoftExt && (Flags & KEYMS)) AddResult = 1;
   else if (LangOpts.Borland && (Flags & KEYBORLAND)) AddResult = 1;
   else if (LangOpts.Bool && (Flags & BOOLSUPPORT)) AddResult = 2;
+  else if (LangOpts.Half && (Flags & HALFSUPPORT)) AddResult = 2;
   else if (LangOpts.WChar && (Flags & WCHARSUPPORT)) AddResult = 2;
   else if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) AddResult = 2;
   else if (LangOpts.OpenCL && (Flags & KEYOPENCL)) AddResult = 2;
@@ -139,8 +141,8 @@
   else if (LangOpts.ObjC2 && (Flags & KEYARC)) AddResult = 2;
   else if (LangOpts.CPlusPlus && (Flags & KEYCXX11)) AddResult = 3;
 
-  // Don't add this keyword under MicrosoftMode.
-  if (LangOpts.MicrosoftMode && (Flags & KEYNOMS))
+  // Don't add this keyword under MSVCCompat.
+  if (LangOpts.MSVCCompat && (Flags & KEYNOMS))
      return;
   // Don't add this keyword if disabled in this language.
   if (AddResult == 0) return;
@@ -398,6 +400,10 @@
   return getMultiKeywordSelector()->getName();
 }
 
+void Selector::print(llvm::raw_ostream &OS) const {
+  OS << getAsString();
+}
+
 /// Interpreting the given string using the normal CamelCase
 /// conventions, determine whether the given string starts with the
 /// given "word", which is assumed to end in a lowercase letter.
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index d08cef1..237c789 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -24,20 +24,21 @@
 
 using namespace clang;
 
-Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent, 
+Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
                bool IsFramework, bool IsExplicit)
   : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent),
     Umbrella(), ASTFile(0), IsAvailable(true), IsFromModuleFile(false),
     IsFramework(IsFramework), IsExplicit(IsExplicit), IsSystem(false),
-    InferSubmodules(false), InferExplicitSubmodules(false), 
+    IsExternC(false), InferSubmodules(false), InferExplicitSubmodules(false),
     InferExportWildcard(false), ConfigMacrosExhaustive(false),
-    NameVisibility(Hidden)
-{ 
+    NameVisibility(Hidden) {
   if (Parent) {
     if (!Parent->isAvailable())
       IsAvailable = false;
     if (Parent->IsSystem)
       IsSystem = true;
+    if (Parent->IsExternC)
+      IsExternC = true;
     
     Parent->SubModuleIndex[Name] = Parent->SubModules.size();
     Parent->SubModules.push_back(this);
@@ -69,11 +70,15 @@
 
 bool
 Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
-                    Requirement &Req) const {
+                    Requirement &Req, HeaderDirective &MissingHeader) const {
   if (IsAvailable)
     return true;
 
   for (const Module *Current = this; Current; Current = Current->Parent) {
+    if (!Current->MissingHeaders.empty()) {
+      MissingHeader = Current->MissingHeaders.front();
+      return false;
+    }
     for (unsigned I = 0, N = Current->Requirements.size(); I != N; ++I) {
       if (hasFeature(Current->Requirements[I].first, LangOpts, Target) !=
               Current->Requirements[I].second) {
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index 1350934..ec09de1 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -77,9 +77,13 @@
              .Default(OMPC_DEFAULT_unknown);
   case OMPC_unknown:
   case OMPC_threadprivate:
+  case OMPC_if:
+  case OMPC_num_threads:
+  case OMPC_safelen:
   case OMPC_private:
   case OMPC_firstprivate:
   case OMPC_shared:
+  case OMPC_copyin:
   case NUM_OPENMP_CLAUSES:
     break;
   }
@@ -100,9 +104,13 @@
     llvm_unreachable("Invalid OpenMP 'default' clause type");
   case OMPC_unknown:
   case OMPC_threadprivate:
+  case OMPC_if:
+  case OMPC_num_threads:
+  case OMPC_safelen:
   case OMPC_private:
   case OMPC_firstprivate:
   case OMPC_shared:
+  case OMPC_copyin:
   case NUM_OPENMP_CLAUSES:
     break;
   }
@@ -123,6 +131,15 @@
       break;
     }
     break;
+  case OMPD_simd:
+    switch (CKind) {
+#define OPENMP_SIMD_CLAUSE(Name) \
+    case OMPC_##Name: return true;
+#include "clang/Basic/OpenMPKinds.def"
+    default:
+      break;
+    }
+    break;
   case OMPD_unknown:
   case OMPD_threadprivate:
   case OMPD_task:
diff --git a/lib/Basic/SourceLocation.cpp b/lib/Basic/SourceLocation.cpp
index 1822091..0c06a48 100644
--- a/lib/Basic/SourceLocation.cpp
+++ b/lib/Basic/SourceLocation.cpp
@@ -61,14 +61,15 @@
   OS << '>';
 }
 
-std::string SourceLocation::printToString(const SourceManager &SM) const {
+LLVM_DUMP_METHOD std::string
+SourceLocation::printToString(const SourceManager &SM) const {
   std::string S;
   llvm::raw_string_ostream OS(S);
   print(OS, SM);
   return OS.str();
 }
 
-void SourceLocation::dump(const SourceManager &SM) const {
+LLVM_DUMP_METHOD void SourceLocation::dump(const SourceManager &SM) const {
   print(llvm::errs(), SM);
 }
 
@@ -122,7 +123,7 @@
   return SrcMgr->isBeforeInTranslationUnit(*this, Loc);
 }
 
-void FullSourceLoc::dump() const {
+LLVM_DUMP_METHOD void FullSourceLoc::dump() const {
   SourceLocation::dump(*SrcMgr);
 }
 
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index 9d79551..b78e9f5 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -404,10 +404,7 @@
   delete FakeBufferForRecovery;
   delete FakeContentCacheForRecovery;
 
-  for (llvm::DenseMap<FileID, MacroArgsMap *>::iterator
-         I = MacroArgsCacheMap.begin(),E = MacroArgsCacheMap.end(); I!=E; ++I) {
-    delete I->second;
-  }
+  llvm::DeleteContainerSeconds(MacroArgsCacheMap);
 }
 
 void SourceManager::clearIDTables() {
@@ -1381,31 +1378,6 @@
     }
   }
 
-  // If the spread is large, do a "radix" test as our initial guess, based on
-  // the assumption that lines average to approximately the same length.
-  // NOTE: This is currently disabled, as it does not appear to be profitable in
-  // initial measurements.
-  if (0 && SourceLineCacheEnd-SourceLineCache > 20) {
-    unsigned FileLen = Content->SourceLineCache[Content->NumLines-1];
-
-    // Take a stab at guessing where it is.
-    unsigned ApproxPos = Content->NumLines*QueriedFilePos / FileLen;
-
-    // Check for -10 and +10 lines.
-    unsigned LowerBound = std::max(int(ApproxPos-10), 0);
-    unsigned UpperBound = std::min(ApproxPos+10, FileLen);
-
-    // If the computed lower bound is less than the query location, move it in.
-    if (SourceLineCache < SourceLineCacheStart+LowerBound &&
-        SourceLineCacheStart[LowerBound] < QueriedFilePos)
-      SourceLineCache = SourceLineCacheStart+LowerBound;
-
-    // If the computed upper bound is greater than the query location, move it.
-    if (SourceLineCacheEnd > SourceLineCacheStart+UpperBound &&
-        SourceLineCacheStart[UpperBound] >= QueriedFilePos)
-      SourceLineCacheEnd = SourceLineCacheStart+UpperBound;
-  }
-
   unsigned *Pos
     = std::lower_bound(SourceLineCache, SourceLineCacheEnd, QueriedFilePos);
   unsigned LineNo = Pos-SourceLineCacheStart;
@@ -1583,7 +1555,7 @@
   return FI.getIncludeLoc().isInvalid();
 }
 
-/// \brief The size of the SLocEnty that \arg FID represents.
+/// \brief The size of the SLocEntry that \p FID represents.
 unsigned SourceManager::getFileIDSize(FileID FID) const {
   bool Invalid = false;
   const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
@@ -1893,7 +1865,7 @@
 
     FileID SpellFID; // Current FileID in the spelling range.
     unsigned SpellRelativeOffs;
-    llvm::tie(SpellFID, SpellRelativeOffs) = getDecomposedLoc(SpellLoc);
+    std::tie(SpellFID, SpellRelativeOffs) = getDecomposedLoc(SpellLoc);
     while (1) {
       const SLocEntry &Entry = getSLocEntry(SpellFID);
       unsigned SpellFIDBeginOffs = Entry.getOffset();
@@ -1972,7 +1944,7 @@
 
   FileID FID;
   unsigned Offset;
-  llvm::tie(FID, Offset) = getDecomposedLoc(Loc);
+  std::tie(FID, Offset) = getDecomposedLoc(Loc);
   if (FID.isInvalid())
     return Loc;
 
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index e993055..01eced2 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -69,8 +69,7 @@
   FloatFormat = &llvm::APFloat::IEEEsingle;
   DoubleFormat = &llvm::APFloat::IEEEdouble;
   LongDoubleFormat = &llvm::APFloat::IEEEdouble;
-  DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                      "i64:64:64-f32:32:32-f64:64:64-n32";
+  DescriptionString = 0;
   UserLabelPrefix = "_";
   MCountName = "mcount";
   RegParmMax = 0;
@@ -83,8 +82,10 @@
   // Default to not using fp2ret for __Complex long double
   ComplexLongDoubleUsesFP2Ret = false;
 
-  // Default to using the Itanium ABI.
-  TheCXXABI.set(TargetCXXABI::GenericItanium);
+  // Set the C++ ABI based on the triple.
+  TheCXXABI.set(Triple.isKnownWindowsMSVCEnvironment()
+                    ? TargetCXXABI::Microsoft
+                    : TargetCXXABI::GenericItanium);
 
   // Default to an empty address space map.
   AddrSpaceMap = &DefaultAddrSpaceMap;
@@ -245,7 +246,14 @@
     LongLongWidth = LongLongAlign = 128;
     HalfWidth = HalfAlign = 16;
     FloatWidth = FloatAlign = 32;
-    DoubleWidth = DoubleAlign = 64;
+    
+    // Embedded 32-bit targets (OpenCL EP) might have double C type 
+    // defined as float. Let's not override this as it might lead 
+    // to generating illegal code that uses 64bit doubles.
+    if (DoubleWidth != FloatWidth) {
+      DoubleWidth = DoubleAlign = 64;
+      DoubleFormat = &llvm::APFloat::IEEEdouble;
+    }
     LongDoubleWidth = LongDoubleAlign = 128;
 
     assert(PointerWidth == 32 || PointerWidth == 64);
@@ -260,7 +268,6 @@
 
     HalfFormat = &llvm::APFloat::IEEEhalf;
     FloatFormat = &llvm::APFloat::IEEEsingle;
-    DoubleFormat = &llvm::APFloat::IEEEdouble;
     LongDoubleFormat = &llvm::APFloat::IEEEquad;
   }
 }
@@ -483,6 +490,9 @@
                                          ConstraintInfo &Info) const {
   const char *Name = Info.ConstraintStr.c_str();
 
+  if (!*Name)
+    return false;
+
   while (*Name) {
     switch (*Name) {
     default:
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 8e76130..fd59c00 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -20,7 +20,6 @@
 #include "clang/Basic/TargetBuiltins.h"
 #include "clang/Basic/TargetOptions.h"
 #include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/StringSwitch.h"
@@ -29,6 +28,7 @@
 #include "llvm/MC/MCSectionMachO.h"
 #include "llvm/Support/ErrorHandling.h"
 #include <algorithm>
+#include <memory>
 using namespace clang;
 
 //===----------------------------------------------------------------------===//
@@ -74,8 +74,8 @@
                             MacroBuilder &Builder) const=0;
 public:
   OSTargetInfo(const llvm::Triple &Triple) : TgtInfo(Triple) {}
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     TgtInfo::getTargetDefines(Opts, Builder);
     getOSDefines(Opts, TgtInfo::getTriple(), Builder);
   }
@@ -90,7 +90,6 @@
                              VersionTuple &PlatformMinVersion) {
   Builder.defineMacro("__APPLE_CC__", "6000");
   Builder.defineMacro("__APPLE__");
-  Builder.defineMacro("__MACH__");
   Builder.defineMacro("OBJC_NEW_PROPERTIES");
   // AddressSanitizer doesn't play well with source fortification, which is on
   // by default on Darwin.
@@ -138,39 +137,37 @@
     return;
   }
 
-  // If there's an environment specified in the triple, that means we're dealing
-  // with an embedded variant of some sort and don't want the platform
-  // version-min defines, so only add them if there's not one.
-  if (Triple.getEnvironmentName().empty()) {
-    // Set the appropriate OS version define.
-    if (Triple.isiOS()) {
-      assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
-      char Str[6];
-      Str[0] = '0' + Maj;
-      Str[1] = '0' + (Min / 10);
-      Str[2] = '0' + (Min % 10);
-      Str[3] = '0' + (Rev / 10);
-      Str[4] = '0' + (Rev % 10);
-      Str[5] = '\0';
-      Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__",
-                          Str);
-    } else {
-      // Note that the Driver allows versions which aren't representable in the
-      // define (because we only get a single digit for the minor and micro
-      // revision numbers). So, we limit them to the maximum representable
-      // version.
-      assert(Triple.getEnvironmentName().empty() && "Invalid environment!");
-      assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!");
-      char Str[5];
-      Str[0] = '0' + (Maj / 10);
-      Str[1] = '0' + (Maj % 10);
-      Str[2] = '0' + std::min(Min, 9U);
-      Str[3] = '0' + std::min(Rev, 9U);
-      Str[4] = '\0';
-      Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str);
-    }
+  // Set the appropriate OS version define.
+  if (Triple.isiOS()) {
+    assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
+    char Str[6];
+    Str[0] = '0' + Maj;
+    Str[1] = '0' + (Min / 10);
+    Str[2] = '0' + (Min % 10);
+    Str[3] = '0' + (Rev / 10);
+    Str[4] = '0' + (Rev % 10);
+    Str[5] = '\0';
+    Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__",
+                        Str);
+  } else if (Triple.isMacOSX()) {
+    // Note that the Driver allows versions which aren't representable in the
+    // define (because we only get a single digit for the minor and micro
+    // revision numbers). So, we limit them to the maximum representable
+    // version.
+    assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!");
+    char Str[5];
+    Str[0] = '0' + (Maj / 10);
+    Str[1] = '0' + (Maj % 10);
+    Str[2] = '0' + std::min(Min, 9U);
+    Str[3] = '0' + std::min(Rev, 9U);
+    Str[4] = '\0';
+    Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str);
   }
 
+  // Tell users about the kernel if there is one.
+  if (Triple.isOSDarwin())
+    Builder.defineMacro("__MACH__");
+
   PlatformMinVersion = VersionTuple(Maj, Min, Rev);
 }
 
@@ -178,8 +175,8 @@
 template<typename Target>
 class DarwinTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     getDarwinDefines(Builder, Opts, Triple, this->PlatformName,
                      this->PlatformMinVersion);
   }
@@ -190,7 +187,7 @@
     this->MCountName = "\01mcount";
   }
 
-  virtual std::string isValidSectionSpecifier(StringRef SR) const {
+  std::string isValidSectionSpecifier(StringRef SR) const override {
     // Let MCSectionMachO validate this.
     StringRef Segment, Section;
     unsigned TAA, StubSize;
@@ -199,7 +196,7 @@
                                                        TAA, HasTAA, StubSize);
   }
 
-  virtual const char *getStaticInitSectionSpecifier() const {
+  const char *getStaticInitSectionSpecifier() const override {
     // FIXME: We should return 0 when building kexts.
     return "__TEXT,__StaticInit,regular,pure_instructions";
   }
@@ -207,7 +204,7 @@
   /// Darwin does not support protected visibility.  Darwin's "default"
   /// is very similar to ELF's "protected";  Darwin requires a "weak"
   /// attribute on declarations that can be dynamically replaced.
-  virtual bool hasProtectedVisibility() const {
+  bool hasProtectedVisibility() const override {
     return false;
   }
 };
@@ -217,8 +214,8 @@
 template<typename Target>
 class DragonFlyBSDTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // DragonFly defines; list based off of gcc output
     Builder.defineMacro("__DragonFly__");
     Builder.defineMacro("__DragonFly_cc_version", "100001");
@@ -246,8 +243,8 @@
 template<typename Target>
 class FreeBSDTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // FreeBSD defines; list based off of gcc output
 
     unsigned Release = Triple.getOSMajorVersion();
@@ -293,8 +290,8 @@
 template<typename Target>
 class KFreeBSDTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // GNU/kFreeBSD defines; list based off of gcc output
 
     DefineStd(Builder, "unix", Opts);
@@ -316,8 +313,8 @@
 template<typename Target>
 class MinixTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // Minix defines
 
     Builder.defineMacro("__minix", "3");
@@ -340,8 +337,8 @@
 template<typename Target>
 class LinuxTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // Linux defines; list based off of gcc output
     DefineStd(Builder, "unix", Opts);
     DefineStd(Builder, "linux", Opts);
@@ -358,9 +355,19 @@
   LinuxTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
     this->UserLabelPrefix = "";
     this->WIntType = TargetInfo::UnsignedInt;
+
+    switch (Triple.getArch()) {
+    default:
+      break;
+    case llvm::Triple::ppc:
+    case llvm::Triple::ppc64:
+    case llvm::Triple::ppc64le:
+      this->MCountName = "_mcount";
+      break;
+    }
   }
 
-  virtual const char *getStaticInitSectionSpecifier() const {
+  const char *getStaticInitSectionSpecifier() const override {
     return ".text.startup";
   }
 };
@@ -369,8 +376,8 @@
 template<typename Target>
 class NetBSDTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // NetBSD defines; list based off of gcc output
     Builder.defineMacro("__NetBSD__");
     Builder.defineMacro("__unix__");
@@ -388,8 +395,8 @@
 template<typename Target>
 class OpenBSDTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // OpenBSD defines; list based off of gcc output
 
     Builder.defineMacro("__OpenBSD__");
@@ -425,8 +432,8 @@
 template<typename Target>
 class BitrigTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // Bitrig defines; list based off of gcc output
 
     Builder.defineMacro("__Bitrig__");
@@ -447,8 +454,8 @@
 template<typename Target>
 class PSPTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // PSP defines; list based on the output of the pspdev gcc toolchain.
     Builder.defineMacro("PSP");
     Builder.defineMacro("_PSP");
@@ -465,8 +472,8 @@
 template<typename Target>
 class PS3PPUTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // PS3 PPU defines.
     Builder.defineMacro("__PPC__");
     Builder.defineMacro("__PPU__");
@@ -485,25 +492,7 @@
     this->UIntMaxType = TargetInfo::UnsignedLongLong;
     this->Int64Type = TargetInfo::SignedLongLong;
     this->SizeType = TargetInfo::UnsignedInt;
-    this->DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                              "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
-  }
-};
-
-// FIXME: Need a real SPU target.
-// PS3 SPU Target
-template<typename Target>
-class PS3SPUTargetInfo : public OSTargetInfo<Target> {
-protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
-    // PS3 PPU defines.
-    Builder.defineMacro("__SPU__");
-    Builder.defineMacro("__ELF__");
-  }
-public:
-  PS3SPUTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
-    this->UserLabelPrefix = "";
+    this->DescriptionString = "E-m:e-p:32:32-i64:64-n32:64";
   }
 };
 
@@ -511,8 +500,8 @@
 template<typename Target>
 class AuroraUXTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     DefineStd(Builder, "sun", Opts);
     DefineStd(Builder, "unix", Opts);
     Builder.defineMacro("__ELF__");
@@ -532,8 +521,8 @@
 template<typename Target>
 class SolarisTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     DefineStd(Builder, "sun", Opts);
     DefineStd(Builder, "unix", Opts);
     Builder.defineMacro("__ELF__");
@@ -566,8 +555,8 @@
 template<typename Target>
 class WindowsTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     Builder.defineMacro("_WIN32");
   }
   void getVisualStudioDefines(const LangOptions &Opts,
@@ -612,8 +601,8 @@
 template <typename Target>
 class NaClTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     if (Opts.POSIXThreads)
       Builder.defineMacro("_REENTRANT");
     if (Opts.CPlusPlus)
@@ -637,16 +626,28 @@
     this->DoubleAlign = 64;
     this->LongDoubleWidth = 64;
     this->LongDoubleAlign = 64;
+    this->LongLongWidth = 64;
+    this->LongLongAlign = 64;
     this->SizeType = TargetInfo::UnsignedInt;
     this->PtrDiffType = TargetInfo::SignedInt;
     this->IntPtrType = TargetInfo::SignedInt;
     // RegParmMax is inherited from the underlying architecture
     this->LongDoubleFormat = &llvm::APFloat::IEEEdouble;
-    this->DescriptionString = "e-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
-                              "f32:32:32-f64:64:64-p:32:32:32-v128:32:32";
+    if (Triple.getArch() == llvm::Triple::arm) {
+      this->DescriptionString = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S128";
+    } else if (Triple.getArch() == llvm::Triple::x86) {
+      this->DescriptionString = "e-m:e-p:32:32-i64:64-n8:16:32-S128";
+    } else if (Triple.getArch() == llvm::Triple::x86_64) {
+      this->DescriptionString = "e-m:e-p:32:32-i64:64-n8:16:32:64-S128";
+    } else if (Triple.getArch() == llvm::Triple::mipsel) {
+      // Handled on mips' setDescriptionString.
+    } else {
+      assert(Triple.getArch() == llvm::Triple::le32);
+      this->DescriptionString = "e-p:32:32-i64:64";
+    }
   }
-  virtual typename Target::CallingConvCheckResult checkCallingConvention(
-      CallingConv CC) const {
+  typename Target::CallingConvCheckResult checkCallingConvention(
+      CallingConv CC) const override {
     return CC == CC_PnaclCall ? Target::CCCR_OK :
         Target::checkCallingConvention(CC);
   }
@@ -699,7 +700,7 @@
   //  401, 403, 405, 405fp, 440fp, 464, 464fp, 476, 476fp, 505, 740, 801,
   //  821, 823, 8540, 8548, e300c2, e300c3, e500mc64, e6500, 860, cell,
   //  titan, rs64.
-  virtual bool setCPU(const std::string &Name) {
+  bool setCPU(const std::string &Name) override {
     bool CPUKnown = llvm::StringSwitch<bool>(Name)
       .Case("generic", true)
       .Case("440", true)
@@ -753,29 +754,29 @@
     return CPUKnown;
   }
 
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
     Records = BuiltinInfo;
     NumRecords = clang::PPC::LastTSBuiltin-Builtin::FirstTSBuiltin;
   }
 
-  virtual bool isCLZForZeroUndef() const { return false; }
+  bool isCLZForZeroUndef() const override { return false; }
 
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const;
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override;
 
-  virtual void getDefaultFeatures(llvm::StringMap<bool> &Features) const;
+  void getDefaultFeatures(llvm::StringMap<bool> &Features) const override;
 
-  virtual bool handleTargetFeatures(std::vector<std::string> &Features,
-                                    DiagnosticsEngine &Diags);
-  virtual bool hasFeature(StringRef Feature) const;
-  
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &NumNames) const;
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const;
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &Info) const {
+  bool handleTargetFeatures(std::vector<std::string> &Features,
+                            DiagnosticsEngine &Diags) override;
+  bool hasFeature(StringRef Feature) const override;
+
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &NumNames) const override;
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override;
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &Info) const override {
     switch (*Name) {
     default: return false;
     case 'O': // Zero
@@ -797,6 +798,7 @@
         case 'f':// VSX vector register to hold vector float data
         case 's':// VSX vector register to hold scalar float data
         case 'a':// Any VSX register
+        case 'c':// An individual CR bit
           break;
         default:
           return false;
@@ -872,10 +874,24 @@
     }
     return true;
   }
-  virtual const char *getClobbers() const {
+  std::string convertConstraint(const char *&Constraint) const override {
+    std::string R;
+    switch (*Constraint) {
+    case 'e':
+    case 'w':
+      // Two-character constraint; add "^" hint for later parsing.
+      R = std::string("^") + std::string(Constraint, 2);
+      Constraint++;
+      break;
+    default:
+      return TargetInfo::convertConstraint(Constraint);
+    }
+    return R;
+  }
+  const char *getClobbers() const override {
     return "";
   }
-  int getEHDataRegisterNumber(unsigned RegNo) const {
+  int getEHDataRegisterNumber(unsigned RegNo) const override {
     if (RegNo == 0) return 3;
     if (RegNo == 1) return 4;
     return -1;
@@ -933,12 +949,11 @@
   // Target properties.
   if (getTriple().getArch() == llvm::Triple::ppc64le) {
     Builder.defineMacro("_LITTLE_ENDIAN");
-    Builder.defineMacro("__LITTLE_ENDIAN__");
+    Builder.defineMacro("_CALL_ELF","2");
   } else {
     if (getTriple().getOS() != llvm::Triple::NetBSD &&
         getTriple().getOS() != llvm::Triple::OpenBSD)
       Builder.defineMacro("_BIG_ENDIAN");
-    Builder.defineMacro("__BIG_ENDIAN__");
   }
 
   // Subtarget options.
@@ -1196,8 +1211,7 @@
 class PPC32TargetInfo : public PPCTargetInfo {
 public:
   PPC32TargetInfo(const llvm::Triple &Triple) : PPCTargetInfo(Triple) {
-    DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
+    DescriptionString = "E-m:e-p:32:32-i64:64-n32";
 
     switch (getTriple().getOS()) {
     case llvm::Triple::Linux:
@@ -1220,7 +1234,7 @@
     MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
   }
 
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     // This is the ELF definition, and is overridden by the Darwin sub-target
     return TargetInfo::PowerABIBuiltinVaList;
   }
@@ -1241,18 +1255,19 @@
     if (getTriple().getOS() == llvm::Triple::FreeBSD) {
       LongDoubleWidth = LongDoubleAlign = 64;
       LongDoubleFormat = &llvm::APFloat::IEEEdouble;
-      DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                          "i64:64:64-f32:32:32-f64:64:64-"
-                          "v128:128:128-n32:64";
-    } else
-      DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                          "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
-                          "v128:128:128-n32:64";
+      DescriptionString = "E-m:e-i64:64-n32:64";
+    } else {
+      if ((Triple.getArch() == llvm::Triple::ppc64le)) {
+        DescriptionString = "e-m:e-i64:64-n32:64";
+      } else {
+        DescriptionString = "E-m:e-i64:64-n32:64";
+      }
+}
 
     // PPC64 supports atomics up to 8 bytes.
     MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::CharPtrBuiltinVaList;
   }
 };
@@ -1270,10 +1285,9 @@
     PtrDiffType = SignedInt;    // for http://llvm.org/bugs/show_bug.cgi?id=15726
     LongLongAlign = 32;
     SuitableAlign = 128;
-    DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:32:64-f32:32:32-f64:64:64-v128:128:128-n32";
+    DescriptionString = "E-m:o-p:32:32-f64:32:64-n32";
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::CharPtrBuiltinVaList;
   }
 };
@@ -1285,8 +1299,7 @@
       : DarwinTargetInfo<PPC64TargetInfo>(Triple) {
     HasAlignMac68kSupport = true;
     SuitableAlign = 128;
-    DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64";
+    DescriptionString = "E-m:o-i64:64-n32:64";
   }
 };
 } // end anonymous namespace.
@@ -1314,30 +1327,30 @@
       // These must be defined in sorted order!
       NoAsmVariants = true;
     }
-    virtual void getTargetDefines(const LangOptions &Opts,
-                                  MacroBuilder &Builder) const {
+    void getTargetDefines(const LangOptions &Opts,
+                          MacroBuilder &Builder) const override {
       Builder.defineMacro("__PTX__");
       Builder.defineMacro("__NVPTX__");
     }
-    virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                   unsigned &NumRecords) const {
+    void getTargetBuiltins(const Builtin::Info *&Records,
+                           unsigned &NumRecords) const override {
       Records = BuiltinInfo;
       NumRecords = clang::NVPTX::LastTSBuiltin-Builtin::FirstTSBuiltin;
     }
-    virtual bool hasFeature(StringRef Feature) const {
+    bool hasFeature(StringRef Feature) const override {
       return Feature == "ptx" || Feature == "nvptx";
     }
-    
-    virtual void getGCCRegNames(const char * const *&Names,
-                                unsigned &NumNames) const;
-    virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                  unsigned &NumAliases) const {
+
+    void getGCCRegNames(const char * const *&Names,
+                        unsigned &NumNames) const override;
+    void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                                  unsigned &NumAliases) const override {
       // No aliases.
       Aliases = 0;
       NumAliases = 0;
     }
-    virtual bool validateAsmConstraint(const char *&Name,
-                                       TargetInfo::ConstraintInfo &Info) const {
+    bool validateAsmConstraint(const char *&Name,
+                               TargetInfo::ConstraintInfo &Info) const override {
       switch (*Name) {
       default: return false;
       case 'c':
@@ -1350,15 +1363,15 @@
         return true;
       }
     }
-    virtual const char *getClobbers() const {
+    const char *getClobbers() const override {
       // FIXME: Is this really right?
       return "";
     }
-    virtual BuiltinVaListKind getBuiltinVaListKind() const {
+    BuiltinVaListKind getBuiltinVaListKind() const override {
       // FIXME: implement
       return TargetInfo::CharPtrBuiltinVaList;
     }
-    virtual bool setCPU(const std::string &Name) {
+    bool setCPU(const std::string &Name) override {
       bool Valid = llvm::StringSwitch<bool>(Name)
         .Case("sm_20", true)
         .Case("sm_21", true)
@@ -1392,10 +1405,7 @@
     NVPTX32TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) {
       PointerWidth = PointerAlign = 32;
       SizeType     = PtrDiffType = IntPtrType = TargetInfo::UnsignedInt;
-      DescriptionString
-        = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
-          "f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"
-          "n16:32:64";
+      DescriptionString = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64";
   }
   };
 
@@ -1404,10 +1414,7 @@
     NVPTX64TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) {
       PointerWidth = PointerAlign = 64;
       SizeType     = PtrDiffType = IntPtrType = TargetInfo::UnsignedLongLong;
-      DescriptionString
-        = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
-          "f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"
-          "n16:32:64";
+      DescriptionString = "e-i64:64-v16:16-v32:32-n16:32:64";
   }
   };
 }
@@ -1424,29 +1431,17 @@
 };
 
 static const char *DescriptionStringR600 =
-  "e"
-  "-p:32:32:32"
-  "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32"
-  "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128"
-  "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048"
-  "-n32:64";
+  "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+  "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
 
 static const char *DescriptionStringR600DoubleOps =
-  "e"
-  "-p:32:32:32"
-  "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64"
-  "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128"
-  "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048"
-  "-n32:64";
+  "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+  "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
 
 static const char *DescriptionStringSI =
-  "e"
-  "-p:64:64:64"
-  "-p3:32:32:32"
-  "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64"
-  "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128"
-  "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048"
-  "-n32:64";
+  "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64"
+  "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+  "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
 
 class R600TargetInfo : public TargetInfo {
   /// \brief The GPU profiles supported by the R600 target.
@@ -1472,44 +1467,44 @@
     UseAddrSpaceMapMangling = true;
   }
 
-  virtual const char * getClobbers() const {
+  const char * getClobbers() const override {
     return "";
   }
 
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &numNames) const  {
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &numNames) const override {
     Names = NULL;
     numNames = 0;
   }
 
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const {
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override {
     Aliases = NULL;
     NumAliases = 0;
   }
 
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &info) const {
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &info) const override {
     return true;
   }
 
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
     Records = NULL;
     NumRecords = 0;
   }
 
 
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     Builder.defineMacro("__R600__");
   }
 
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::CharPtrBuiltinVaList;
   }
 
-  virtual bool setCPU(const std::string &Name) {
+  bool setCPU(const std::string &Name) override {
     GPU = llvm::StringSwitch<GPUKind>(Name)
       .Case("r600" ,    GK_R600)
       .Case("rv610",    GK_R600)
@@ -1807,57 +1802,57 @@
     BigEndian = false;
     LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
   }
-  virtual unsigned getFloatEvalMethod() const {
+  unsigned getFloatEvalMethod() const override {
     // X87 evaluates with 80 bits "long double" precision.
     return SSELevel == NoSSE ? 2 : 0;
   }
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                                 unsigned &NumRecords) const override {
     Records = BuiltinInfo;
     NumRecords = clang::X86::LastTSBuiltin-Builtin::FirstTSBuiltin;
   }
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &NumNames) const {
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &NumNames) const override {
     Names = GCCRegNames;
     NumNames = llvm::array_lengthof(GCCRegNames);
   }
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const {
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override {
     Aliases = 0;
     NumAliases = 0;
   }
-  virtual void getGCCAddlRegNames(const AddlRegName *&Names,
-                                  unsigned &NumNames) const {
+  void getGCCAddlRegNames(const AddlRegName *&Names,
+                          unsigned &NumNames) const override {
     Names = AddlRegNames;
     NumNames = llvm::array_lengthof(AddlRegNames);
   }
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &info) const;
-  virtual std::string convertConstraint(const char *&Constraint) const;
-  virtual const char *getClobbers() const {
+  bool validateAsmConstraint(const char *&Name,
+                                     TargetInfo::ConstraintInfo &info) const override;
+  std::string convertConstraint(const char *&Constraint) const override;
+  const char *getClobbers() const override {
     return "~{dirflag},~{fpsr},~{flags}";
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const;
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override;
   static void setSSELevel(llvm::StringMap<bool> &Features, X86SSEEnum Level,
                           bool Enabled);
   static void setMMXLevel(llvm::StringMap<bool> &Features, MMX3DNowEnum Level,
                           bool Enabled);
   static void setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
                           bool Enabled);
-  virtual void setFeatureEnabled(llvm::StringMap<bool> &Features,
-                                 StringRef Name, bool Enabled) const {
+  void setFeatureEnabled(llvm::StringMap<bool> &Features,
+                         StringRef Name, bool Enabled) const override {
     setFeatureEnabledImpl(Features, Name, Enabled);
   }
   // This exists purely to cut down on the number of virtual calls in
   // getDefaultFeatures which calls this repeatedly.
   static void setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
                                     StringRef Name, bool Enabled);
-  virtual void getDefaultFeatures(llvm::StringMap<bool> &Features) const;
-  virtual bool hasFeature(StringRef Feature) const;
-  virtual bool handleTargetFeatures(std::vector<std::string> &Features,
-                                    DiagnosticsEngine &Diags);
-  virtual const char* getABI() const {
+  void getDefaultFeatures(llvm::StringMap<bool> &Features) const override;
+  bool hasFeature(StringRef Feature) const override;
+  bool handleTargetFeatures(std::vector<std::string> &Features,
+                            DiagnosticsEngine &Diags) override;
+  const char* getABI() const override {
     if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX)
       return "avx";
     else if (getTriple().getArch() == llvm::Triple::x86 &&
@@ -1865,7 +1860,7 @@
       return "no-mmx";
     return "";
   }
-  virtual bool setCPU(const std::string &Name) {
+  bool setCPU(const std::string &Name) override {
     CPU = llvm::StringSwitch<CPUKind>(Name)
       .Case("i386", CK_i386)
       .Case("i486", CK_i486)
@@ -1992,9 +1987,9 @@
     llvm_unreachable("Unhandled CPU kind");
   }
 
-  virtual bool setFPMath(StringRef Name);
+  bool setFPMath(StringRef Name) override;
 
-  virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const {
+  CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
     // We accept all non-ARM calling conventions
     return (CC == CC_X86ThisCall ||
             CC == CC_X86FastCall ||
@@ -2004,7 +1999,7 @@
             CC == CC_IntelOclBicc) ? CCCR_OK : CCCR_Warning;
   }
 
-  virtual CallingConv getDefaultCallingConv(CallingConvMethodType MT) const {
+  CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
     return MT == CCMT_Member ? CC_X86ThisCall : CC_C;
   }
 };
@@ -2697,7 +2692,6 @@
   }
 
   // Target properties.
-  Builder.defineMacro("__LITTLE_ENDIAN__");
   Builder.defineMacro("__REGISTER_PREFIX__", "");
 
   // Define __NO_MATH_INLINES on linux/x86 so that we don't get inline
@@ -2950,9 +2944,7 @@
     LongDoubleWidth = 96;
     LongDoubleAlign = 32;
     SuitableAlign = 128;
-    DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-"
-                        "a0:0:64-f80:32:32-n8:16:32-S128";
+    DescriptionString = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128";
     SizeType = UnsignedInt;
     PtrDiffType = SignedInt;
     IntPtrType = SignedInt;
@@ -2968,17 +2960,17 @@
     // MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.)
     MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::CharPtrBuiltinVaList;
   }
 
-  int getEHDataRegisterNumber(unsigned RegNo) const {
+  int getEHDataRegisterNumber(unsigned RegNo) const override {
     if (RegNo == 0) return 0;
     if (RegNo == 1) return 2;
     return -1;
   }
-  virtual bool validateInputSize(StringRef Constraint,
-                                 unsigned Size) const {
+  bool validateInputSize(StringRef Constraint,
+                         unsigned Size) const override {
     switch (Constraint[0]) {
     default: break;
     case 'a':
@@ -2999,7 +2991,7 @@
   NetBSDI386TargetInfo(const llvm::Triple &Triple)
       : NetBSDTargetInfo<X86_32TargetInfo>(Triple) {}
 
-  virtual unsigned getFloatEvalMethod() const {
+  unsigned getFloatEvalMethod() const override {
     unsigned Major, Minor, Micro;
     getTriple().getOSVersion(Major, Minor, Micro);
     // New NetBSD uses the default rounding mode.
@@ -3046,9 +3038,7 @@
     MaxVectorAlign = 256;
     SizeType = UnsignedLong;
     IntPtrType = SignedLong;
-    DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-"
-                        "a0:0:64-f80:128:128-n8:16:32-S128";
+    DescriptionString = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128";
     HasAlignMac68kSupport = true;
   }
 
@@ -3064,12 +3054,10 @@
     TLSSupported = false;
     WCharType = UnsignedShort;
     DoubleAlign = LongLongAlign = 64;
-    DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-"
-                        "v128:128:128-a0:0:64-f80:32:32-n8:16:32-S32";
+    DescriptionString = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32";
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     WindowsTargetInfo<X86_32TargetInfo>::getTargetDefines(Opts, Builder);
   }
 };
@@ -3085,8 +3073,8 @@
     LongDoubleWidth = LongDoubleAlign = 64;
     LongDoubleFormat = &llvm::APFloat::IEEEdouble;
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
     WindowsX86_32TargetInfo::getVisualStudioDefines(Opts, Builder);
     // The value of the following reflects processor type.
@@ -3103,8 +3091,8 @@
 public:
   MinGWX86_32TargetInfo(const llvm::Triple &Triple)
       : WindowsX86_32TargetInfo(Triple) {}
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
     DefineStd(Builder, "WIN32", Opts);
     DefineStd(Builder, "WINNT", Opts);
@@ -3133,12 +3121,10 @@
     TLSSupported = false;
     WCharType = UnsignedShort;
     DoubleAlign = LongLongAlign = 64;
-    DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-"
-                        "a0:0:64-f80:32:32-n8:16:32-S32";
+    DescriptionString = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32";
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     X86_32TargetInfo::getTargetDefines(Opts, Builder);
     Builder.defineMacro("_X86_");
     Builder.defineMacro("__CYGWIN__");
@@ -3162,8 +3148,8 @@
     this->UserLabelPrefix = "";
     this->TLSSupported = false;
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     X86_32TargetInfo::getTargetDefines(Opts, Builder);
     Builder.defineMacro("__INTEL__");
     Builder.defineMacro("__HAIKU__");
@@ -3175,8 +3161,8 @@
 template<typename Target>
 class RTEMSTargetInfo : public OSTargetInfo<Target> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     // RTEMS defines; list based off of gcc output
 
     Builder.defineMacro("__rtems__");
@@ -3216,8 +3202,8 @@
     PtrDiffType = SignedLong;
     this->UserLabelPrefix = "";
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     X86_32TargetInfo::getTargetDefines(Opts, Builder);
     Builder.defineMacro("__INTEL__");
     Builder.defineMacro("__rtems__");
@@ -3241,9 +3227,7 @@
     Int64Type = SignedLong;
     RegParmMax = 6;
 
-    DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-"
-                        "a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128";
+    DescriptionString = "e-m:e-i64:64-f80:128-n8:16:32:64-S128";
 
     // Use fpret only for long double.
     RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble);
@@ -3257,23 +3241,23 @@
     MaxAtomicPromoteWidth = 128;
     MaxAtomicInlineWidth = 64;
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::X86_64ABIBuiltinVaList;
   }
 
-  int getEHDataRegisterNumber(unsigned RegNo) const {
+  int getEHDataRegisterNumber(unsigned RegNo) const override {
     if (RegNo == 0) return 0;
     if (RegNo == 1) return 1;
     return -1;
   }
 
-  virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const {
+  CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
     return (CC == CC_C ||
             CC == CC_IntelOclBicc ||
             CC == CC_X86_64Win64) ? CCCR_OK : CCCR_Warning;
   }
 
-  virtual CallingConv getDefaultCallingConv(CallingConvMethodType MT) const {
+  CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
     return CC_C;
   }
 
@@ -3298,15 +3282,15 @@
     IntPtrType = SignedLongLong;
     this->UserLabelPrefix = "";
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                                MacroBuilder &Builder) const override {
     WindowsTargetInfo<X86_64TargetInfo>::getTargetDefines(Opts, Builder);
     Builder.defineMacro("_WIN64");
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::CharPtrBuiltinVaList;
   }
-  virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const {
+  CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
     return (CC == CC_C ||
             CC == CC_IntelOclBicc ||
             CC == CC_X86_64SysV) ? CCCR_OK : CCCR_Warning;
@@ -3323,8 +3307,8 @@
     LongDoubleWidth = LongDoubleAlign = 64;
     LongDoubleFormat = &llvm::APFloat::IEEEdouble;
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
     WindowsX86_64TargetInfo::getVisualStudioDefines(Opts, Builder);
     Builder.defineMacro("_M_X64");
@@ -3339,8 +3323,8 @@
 public:
   MinGWX86_64TargetInfo(const llvm::Triple &Triple)
       : WindowsX86_64TargetInfo(Triple) {}
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
     DefineStd(Builder, "WIN64", Opts);
     Builder.defineMacro("__MSVCRT__");
@@ -3366,6 +3350,11 @@
       : DarwinTargetInfo<X86_64TargetInfo>(Triple) {
     Int64Type = SignedLongLong;
     MaxVectorAlign = 256;
+    // The 64-bit iOS simulator uses the builtin bool type for Objective-C.
+    llvm::Triple T = llvm::Triple(Triple);
+    if (T.getOS() == llvm::Triple::IOS)
+      UseSignedCharForObjCBool = false;
+    DescriptionString = "e-m:o-i64:64-f80:128-n8:16:32:64-S128";
   }
 };
 } // end anonymous namespace
@@ -3396,6 +3385,7 @@
 
 namespace {
 class AArch64TargetInfo : public TargetInfo {
+  virtual void setDescriptionString() = 0;
   static const char * const GCCRegNames[];
   static const TargetInfo::GCCRegAlias GCCRegAliases[];
 
@@ -3410,16 +3400,23 @@
 
 public:
   AArch64TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
-    BigEndian = false;
     LongWidth = LongAlign = 64;
     LongDoubleWidth = LongDoubleAlign = 128;
     PointerWidth = PointerAlign = 64;
     SuitableAlign = 128;
-    DescriptionString = "e-p:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:64:64-i128:128:128-f32:32:32-f64:64:64-"
-                        "f128:128:128-n32:64-S128";
 
     WCharType = UnsignedInt;
+    if (getTriple().getOS() == llvm::Triple::NetBSD) {
+      WCharType = SignedInt;
+      Int64Type = SignedLongLong;
+      IntMaxType = SignedLongLong;
+      UIntMaxType = UnsignedLongLong;
+    } else {
+      WCharType = UnsignedInt;
+      Int64Type = SignedLong;
+      IntMaxType = SignedLong;
+      UIntMaxType = UnsignedLong;
+    }
     LongDoubleFormat = &llvm::APFloat::IEEEquad;
 
     // AArch64 backend supports 64-bit operations at the moment. In principle
@@ -3428,11 +3425,10 @@
 
     TheCXXABI.set(TargetCXXABI::GenericAArch64);
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     // GCC defines theses currently
     Builder.defineMacro("__aarch64__");
-    Builder.defineMacro("__AARCH64EL__");
 
     // ACLE predefines. Many can only have one possible value on v8 AArch64.
     Builder.defineMacro("__ARM_ACLE",         "200");
@@ -3469,9 +3465,6 @@
     Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM",
                         Opts.ShortEnums ? "1" : "4");
 
-    if (BigEndian)
-      Builder.defineMacro("__AARCH_BIG_ENDIAN");
-
     if (FPU == NeonMode) {
       Builder.defineMacro("__ARM_NEON");
       // 64-bit NEON supports half, single and double precision operations.
@@ -3482,24 +3475,24 @@
       Builder.defineMacro("__ARM_FEATURE_CRYPTO");
     }
   }
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
     Records = BuiltinInfo;
     NumRecords = clang::AArch64::LastTSBuiltin-Builtin::FirstTSBuiltin;
   }
-  virtual bool hasFeature(StringRef Feature) const {
+  bool hasFeature(StringRef Feature) const override {
     return Feature == "aarch64" || (Feature == "neon" && FPU == NeonMode);
   }
 
-  virtual bool setCPU(const std::string &Name) {
+  bool setCPU(const std::string &Name) override {
     return llvm::StringSwitch<bool>(Name)
              .Case("generic", true)
              .Cases("cortex-a53", "cortex-a57", true)
              .Default(false);
   }
 
-  virtual bool handleTargetFeatures(std::vector<std::string> &Features,
-                                    DiagnosticsEngine &Diags) {
+  bool handleTargetFeatures(std::vector<std::string> &Features,
+                            DiagnosticsEngine &Diags) override {
     FPU = FPUMode;
     Crypto = 0;
     for (unsigned i = 0, e = Features.size(); i != e; ++i) {
@@ -3508,18 +3501,21 @@
       if (Features[i] == "+crypto")
         Crypto = 1;
     }
+
+    setDescriptionString();
+
     return true;
   }
 
-  virtual void getGCCRegNames(const char *const *&Names,
-                              unsigned &NumNames) const;
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const;
+  void getGCCRegNames(const char *const *&Names,
+                      unsigned &NumNames) const override;
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override;
 
-  virtual bool isCLZForZeroUndef() const { return false; }
+  bool isCLZForZeroUndef() const override { return false; }
 
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &Info) const {
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &Info) const override {
     switch (*Name) {
     default: return false;
     case 'w': // An FP/SIMD vector register
@@ -3549,12 +3545,12 @@
     }
   }
 
-  virtual const char *getClobbers() const {
+  const char *getClobbers() const override {
     // There are no AArch64 clobbers shared by all asm statements.
     return "";
   }
 
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::AArch64ABIBuiltinVaList;
   }
 };
@@ -3620,9 +3616,48 @@
 #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
 #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
                                               ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsNEON.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+                                              ALL_LANGUAGES },
 #include "clang/Basic/BuiltinsAArch64.def"
 };
 
+class AArch64leTargetInfo : public AArch64TargetInfo {
+  void setDescriptionString() override {
+    DescriptionString = "e-m:e-i64:64-i128:128-n32:64-S128";
+  }
+
+public:
+  AArch64leTargetInfo(const llvm::Triple &Triple)
+    : AArch64TargetInfo(Triple) {
+    BigEndian = false;
+    }
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
+    Builder.defineMacro("__AARCH64EL__");
+    AArch64TargetInfo::getTargetDefines(Opts, Builder);
+  }
+};
+
+class AArch64beTargetInfo : public AArch64TargetInfo {
+  void setDescriptionString() override {
+    DescriptionString = "E-m:e-i64:64-i128:128-n32:64-S128";
+  }
+
+public:
+  AArch64beTargetInfo(const llvm::Triple &Triple)
+    : AArch64TargetInfo(Triple) { }
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
+    Builder.defineMacro("__AARCH64EB__");
+    Builder.defineMacro("__AARCH_BIG_ENDIAN");
+    Builder.defineMacro("__ARM_BIG_ENDIAN");
+    AArch64TargetInfo::getTargetDefines(Opts, Builder);
+  }
+};
+
 } // end anonymous namespace
 
 namespace {
@@ -3668,6 +3703,7 @@
   unsigned SoftFloatABI : 1;
 
   unsigned CRC : 1;
+  unsigned Crypto : 1;
 
   static const Builtin::Info BuiltinInfo[];
 
@@ -3683,40 +3719,153 @@
         T.getOS() != llvm::Triple::Bitrig)
       return false;
     StringRef ArchName = T.getArchName();
-    if (T.getArch() == llvm::Triple::arm) {
-      if (!ArchName.startswith("armv"))
+    if (T.getArch() == llvm::Triple::arm ||
+        T.getArch() == llvm::Triple::armeb) {
+      StringRef VersionStr;
+      if (ArchName.startswith("armv"))
+        VersionStr = ArchName.substr(4);
+      else if (ArchName.startswith("armebv"))
+        VersionStr = ArchName.substr(6);
+      else
         return false;
-      StringRef VersionStr = ArchName.substr(4);
       unsigned Version;
       if (VersionStr.getAsInteger(10, Version))
         return false;
       return Version >= 6;
     }
-    assert(T.getArch() == llvm::Triple::thumb);
-    if (!ArchName.startswith("thumbv"))
+    assert(T.getArch() == llvm::Triple::thumb ||
+           T.getArch() == llvm::Triple::thumbeb);
+    StringRef VersionStr;
+    if (ArchName.startswith("thumbv"))
+      VersionStr = ArchName.substr(6);
+    else if (ArchName.startswith("thumbebv"))
+      VersionStr = ArchName.substr(8);
+    else
       return false;
-    StringRef VersionStr = ArchName.substr(6);
     unsigned Version;
     if (VersionStr.getAsInteger(10, Version))
       return false;
     return Version >= 7;
   }
 
-public:
-  ARMTargetInfo(const llvm::Triple &Triple)
-      : TargetInfo(Triple), ABI("aapcs-linux"), CPU("arm1136j-s"),
-        FPMath(FP_Default), IsAAPCS(true) {
-    BigEndian = false;
-    switch (getTriple().getOS()) {
-    case llvm::Triple::NetBSD:
+  void setABIAAPCS() {
+    IsAAPCS = true;
+
+    DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
+    const llvm::Triple &T = getTriple();
+
+    // size_t is unsigned long on Darwin and NetBSD.
+    if (T.isOSDarwin() || T.getOS() == llvm::Triple::NetBSD)
       SizeType = UnsignedLong;
-      PtrDiffType = SignedLong;
+    else
+      SizeType = UnsignedInt;
+
+    if (T.getOS() == llvm::Triple::NetBSD) {
       WCharType = SignedInt;
-      break;
-    default:
+    } else {
       // AAPCS 7.1.1, ARM-Linux ABI 2.4: type of wchar_t is unsigned int.
       WCharType = UnsignedInt;
+    }
+
+    UseBitFieldTypeAlignment = true;
+
+    ZeroLengthBitfieldBoundary = 0;
+
+    if (IsThumb) {
+      // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
+      // so set preferred for small types to 32.
+      if (T.isOSBinFormatMachO())
+        DescriptionString = BigEndian ?
+                              "E-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-"
+                              "v128:64:128-a:0:32-n32-S64" :
+                              "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-"
+                              "v128:64:128-a:0:32-n32-S64";
+      else
+        DescriptionString = BigEndian ?
+                              "E-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-"
+                              "v128:64:128-a:0:32-n32-S64" :
+                              "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-"
+                              "v128:64:128-a:0:32-n32-S64";
+
+    } else {
+      if (T.isOSBinFormatMachO())
+        DescriptionString = BigEndian ?
+                              "E-m:o-p:32:32-i64:64-v128:64:128-n32-S64" :
+                              "e-m:o-p:32:32-i64:64-v128:64:128-n32-S64";
+      else
+        DescriptionString = BigEndian ?
+                              "E-m:e-p:32:32-i64:64-v128:64:128-n32-S64" :
+                              "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64";
+    }
+
+    // FIXME: Enumerated types are variable width in straight AAPCS.
+  }
+
+  void setABIAPCS() {
+    const llvm::Triple &T = getTriple();
+
+    IsAAPCS = false;
+
+    DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
+
+    // size_t is unsigned int on FreeBSD.
+    if (T.getOS() == llvm::Triple::FreeBSD)
       SizeType = UnsignedInt;
+    else
+      SizeType = UnsignedLong;
+
+    // Revert to using SignedInt on apcs-gnu to comply with existing behaviour.
+    WCharType = SignedInt;
+
+    // Do not respect the alignment of bit-field types when laying out
+    // structures. This corresponds to PCC_BITFIELD_TYPE_MATTERS in gcc.
+    UseBitFieldTypeAlignment = false;
+
+    /// gcc forces the alignment to 4 bytes, regardless of the type of the
+    /// zero length bitfield.  This corresponds to EMPTY_FIELD_BOUNDARY in
+    /// gcc.
+    ZeroLengthBitfieldBoundary = 32;
+
+    if (IsThumb) {
+      // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
+      // so set preferred for small types to 32.
+      if (T.isOSBinFormatMachO())
+        DescriptionString = BigEndian ?
+            "E-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64"
+            "-v64:32:64-v128:32:128-a:0:32-n32-S32" :
+            "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64"
+            "-v64:32:64-v128:32:128-a:0:32-n32-S32";
+      else
+        DescriptionString = BigEndian ?
+            "E-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64"
+            "-v64:32:64-v128:32:128-a:0:32-n32-S32" :
+            "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64"
+            "-v64:32:64-v128:32:128-a:0:32-n32-S32";
+    } else {
+      if (T.isOSBinFormatMachO())
+        DescriptionString = BigEndian ?
+            "E-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32" :
+            "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
+      else
+        DescriptionString = BigEndian ?
+            "E-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32" :
+            "e-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
+    }
+
+    // FIXME: Override "preferred align" for double and long long.
+  }
+
+public:
+  ARMTargetInfo(const llvm::Triple &Triple, bool IsBigEndian)
+      : TargetInfo(Triple), CPU("arm1136j-s"), FPMath(FP_Default),
+        IsAAPCS(true) {
+    BigEndian = IsBigEndian;
+
+    switch (getTriple().getOS()) {
+    case llvm::Triple::NetBSD:
+      PtrDiffType = SignedLong;
+      break;
+    default:
       PtrDiffType = SignedInt;
       break;
     }
@@ -3727,17 +3876,8 @@
 
     // FIXME: Should we just treat this as a feature?
     IsThumb = getTriple().getArchName().startswith("thumb");
-    if (IsThumb) {
-      // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
-      // so set preferred for small types to 32.
-      DescriptionString = ("e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-"
-                           "i64:64:64-f32:32:32-f64:64:64-"
-                           "v64:64:64-v128:64:128-a0:0:32-n32-S64");
-    } else {
-      DescriptionString = ("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                           "i64:64:64-f32:32:32-f64:64:64-"
-                           "v64:64:64-v128:64:128-a0:0:64-n32-S64");
-    }
+
+    setABI("aapcs-linux");
 
     // ARM targets default to using the ARM C++ ABI.
     TheCXXABI.set(TargetCXXABI::GenericARM);
@@ -3753,8 +3893,8 @@
     // zero length bitfield.
     UseZeroLengthBitfieldAlignment = true;
   }
-  virtual const char *getABI() const { return ABI.c_str(); }
-  virtual bool setABI(const std::string &Name) {
+  const char *getABI() const override { return ABI.c_str(); }
+  bool setABI(const std::string &Name) override {
     ABI = Name;
 
     // The defaults (above) are for AAPCS, check if we need to change them.
@@ -3762,53 +3902,22 @@
     // FIXME: We need support for -meabi... we could just mangle it into the
     // name.
     if (Name == "apcs-gnu") {
-      DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
-      // size_t is unsigned int on FreeBSD.
-      if (getTriple().getOS() != llvm::Triple::FreeBSD)
-        SizeType = UnsignedLong;
-
-      // Revert to using SignedInt on apcs-gnu to comply with existing behaviour.
-      WCharType = SignedInt;
-
-      // Do not respect the alignment of bit-field types when laying out
-      // structures. This corresponds to PCC_BITFIELD_TYPE_MATTERS in gcc.
-      UseBitFieldTypeAlignment = false;
-
-      /// gcc forces the alignment to 4 bytes, regardless of the type of the
-      /// zero length bitfield.  This corresponds to EMPTY_FIELD_BOUNDARY in
-      /// gcc.
-      ZeroLengthBitfieldBoundary = 32;
-
-      IsAAPCS = false;
-
-      if (IsThumb) {
-        // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
-        // so set preferred for small types to 32.
-        DescriptionString = ("e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-"
-                             "i64:32:64-f32:32:32-f64:32:64-"
-                             "v64:32:64-v128:32:128-a0:0:32-n32-S32");
-      } else {
-        DescriptionString = ("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                             "i64:32:64-f32:32:32-f64:32:64-"
-                             "v64:32:64-v128:32:128-a0:0:32-n32-S32");
-      }
-
-      // FIXME: Override "preferred align" for double and long long.
-    } else if (Name == "aapcs" || Name == "aapcs-vfp") {
-      // size_t is unsigned long on Darwin.
-      if (getTriple().isOSDarwin())
-        SizeType = UnsignedLong;
-      IsAAPCS = true;
-      // FIXME: Enumerated types are variable width in straight AAPCS.
-    } else if (Name == "aapcs-linux") {
-      IsAAPCS = true;
-    } else
-      return false;
-
-    return true;
+      setABIAPCS();
+      return true;
+    }
+    if (Name == "aapcs" || Name == "aapcs-vfp" || Name == "aapcs-linux") {
+      setABIAAPCS();
+      return true;
+    }
+    return false;
   }
 
-  void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+  void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
+    if (IsAAPCS)
+      Features["aapcs"] = true;
+    else
+      Features["apcs"] = true;
+
     StringRef ArchName = getTriple().getArchName();
     if (CPU == "arm1136jf-s" || CPU == "arm1176jzf-s" || CPU == "mpcore")
       Features["vfp2"] = true;
@@ -3820,32 +3929,44 @@
     else if (CPU == "cortex-a5") {
       Features["vfp4"] = true;
       Features["neon"] = true;
-    } else if (CPU == "swift" || CPU == "cortex-a7" || CPU == "cortex-a15") {
+    } else if (CPU == "swift" || CPU == "cortex-a7" ||
+               CPU == "cortex-a12" || CPU == "cortex-a15" ||
+               CPU == "krait") {
       Features["vfp4"] = true;
       Features["neon"] = true;
       Features["hwdiv"] = true;
       Features["hwdiv-arm"] = true;
+    } else if (CPU == "cyclone") {
+      Features["v8fp"] = true;
+      Features["neon"] = true;
+      Features["hwdiv"] = true;
+      Features["hwdiv-arm"] = true;
     } else if (CPU == "cortex-a53" || CPU == "cortex-a57") {
       Features["fp-armv8"] = true;
       Features["neon"] = true;
       Features["hwdiv"] = true;
       Features["hwdiv-arm"] = true;
       Features["crc"] = true;
-    } else if (CPU == "cortex-r5" || CPU == "cortex-m3" ||
-               CPU == "cortex-m4" ||
+      Features["crypto"] = true;
+    } else if (CPU == "cortex-r5" ||
                // Enable the hwdiv extension for all v8a AArch32 cores by
                // default.
                ArchName == "armv8a" || ArchName == "armv8" ||
-               ArchName == "thumbv8a" || ArchName == "thumbv8") {
+               ArchName == "armebv8a" || ArchName == "armebv8" ||
+               ArchName == "thumbv8a" || ArchName == "thumbv8" ||
+               ArchName == "thumbebv8a" || ArchName == "thumbebv8") {
       Features["hwdiv"] = true;
       Features["hwdiv-arm"] = true;
+    } else if (CPU == "cortex-m3" || CPU == "cortex-m4") {
+      Features["hwdiv"] = true;
     }
   }
 
-  virtual bool handleTargetFeatures(std::vector<std::string> &Features,
-                                    DiagnosticsEngine &Diags) {
+  bool handleTargetFeatures(std::vector<std::string> &Features,
+                            DiagnosticsEngine &Diags) override {
     FPU = 0;
     CRC = 0;
+    Crypto = 0;
     SoftFloat = SoftFloatABI = false;
     HWDiv = 0;
     for (unsigned i = 0, e = Features.size(); i != e; ++i) {
@@ -3871,6 +3992,8 @@
         HWDiv |= HWDivARM;
       else if (Features[i] == "+crc")
         CRC = 1;
+      else if (Features[i] == "+crypto")
+        Crypto = 1;
     }
 
     if (!(FPU & NeonFPU) && FPMath == FP_Neon) {
@@ -3894,7 +4017,7 @@
     return true;
   }
 
-  virtual bool hasFeature(StringRef Feature) const {
+  bool hasFeature(StringRef Feature) const override {
     return llvm::StringSwitch<bool>(Feature)
         .Case("arm", true)
         .Case("softfloat", SoftFloat)
@@ -3921,11 +4044,11 @@
       .Cases("arm1176jz-s", "arm1176jzf-s", "6ZK")
       .Cases("arm1136jf-s", "mpcorenovfp", "mpcore", "6K")
       .Cases("arm1156t2-s", "arm1156t2f-s", "6T2")
-      .Cases("cortex-a5", "cortex-a7", "cortex-a8", "7A")
-      .Cases("cortex-a9", "cortex-a12", "cortex-a15", "7A")
+      .Cases("cortex-a5", "cortex-a7", "cortex-a8", "cortex-a9-mp", "7A")
+      .Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "7A")
       .Cases("cortex-r4", "cortex-r5", "7R")
-      .Case("cortex-a9-mp", "7F")
       .Case("swift", "7S")
+      .Case("cyclone", "8A")
       .Cases("cortex-m3", "cortex-m4", "7M")
       .Case("cortex-m0", "6M")
       .Cases("cortex-a53", "cortex-a57", "8A")
@@ -3934,29 +4057,27 @@
   static const char *getCPUProfile(StringRef Name) {
     return llvm::StringSwitch<const char*>(Name)
       .Cases("cortex-a5", "cortex-a7", "cortex-a8", "A")
-      .Cases("cortex-a9", "cortex-a12", "cortex-a15", "A")
+      .Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "A")
       .Cases("cortex-a53", "cortex-a57", "A")
       .Cases("cortex-m3", "cortex-m4", "cortex-m0", "M")
       .Cases("cortex-r4", "cortex-r5", "R")
       .Default("");
   }
-  virtual bool setCPU(const std::string &Name) {
+  bool setCPU(const std::string &Name) override {
     if (!getCPUDefineSuffix(Name))
       return false;
 
     CPU = Name;
     return true;
   }
-  virtual bool setFPMath(StringRef Name);
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  bool setFPMath(StringRef Name) override;
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     // Target identification.
     Builder.defineMacro("__arm");
     Builder.defineMacro("__arm__");
 
     // Target properties.
-    Builder.defineMacro("__ARMEL__");
-    Builder.defineMacro("__LITTLE_ENDIAN__");
     Builder.defineMacro("__REGISTER_PREFIX__", "");
 
     StringRef CPUArch = getCPUDefineSuffix(CPU);
@@ -3969,12 +4090,12 @@
     StringRef CPUProfile = getCPUProfile(CPU);
     if (!CPUProfile.empty())
       Builder.defineMacro("__ARM_ARCH_PROFILE", CPUProfile);
-    
+
     // Subtarget options.
 
     // FIXME: It's more complicated than this and we don't really support
     // interworking.
-    if (5 <= CPUArchVer && CPUArchVer <= 7)
+    if (5 <= CPUArchVer && CPUArchVer <= 8)
       Builder.defineMacro("__THUMB_INTERWORK__");
 
     if (ABI == "aapcs" || ABI == "aapcs-linux" || ABI == "aapcs-vfp") {
@@ -3996,7 +4117,11 @@
     if (IsThumb) {
       Builder.defineMacro("__THUMBEL__");
       Builder.defineMacro("__thumb__");
-      if (CPUArch == "6T2" || CPUArchVer == 7)
+      // We check both CPUArchVer and ArchName because when only triple is
+      // specified, the default CPU is arm1136j-s.
+      StringRef ArchName = getTriple().getArchName();
+      if (CPUArch == "6T2" || CPUArchVer >= 7 || ArchName.endswith("v6t2") ||
+          ArchName.endswith("v7") || ArchName.endswith("v8"))
         Builder.defineMacro("__thumb2__");
     }
     if (((HWDiv & HWDivThumb) && IsThumb) || ((HWDiv & HWDivARM) && !IsThumb))
@@ -4014,17 +4139,28 @@
       if (FPU & VFP4FPU)
         Builder.defineMacro("__ARM_VFPV4__");
     }
-    
+
     // This only gets set when Neon instructions are actually available, unlike
     // the VFP define, hence the soft float and arch check. This is subtly
     // different from gcc, we follow the intent which was that it should be set
     // when Neon instructions are actually available.
-    if ((FPU & NeonFPU) && !SoftFloat && CPUArchVer >= 7)
+    if ((FPU & NeonFPU) && !SoftFloat && CPUArchVer >= 7) {
+      Builder.defineMacro("__ARM_NEON");
       Builder.defineMacro("__ARM_NEON__");
+    }
+
+    Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
+                        Opts.ShortWChar ? "2" : "4");
+
+    Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM",
+                        Opts.ShortEnums ? "1" : "4");
 
     if (CRC)
       Builder.defineMacro("__ARM_FEATURE_CRC32");
 
+    if (Crypto)
+      Builder.defineMacro("__ARM_FEATURE_CRYPTO");
+
     if (CPUArchVer >= 6 && CPUArch != "6M") {
       Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
       Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -4032,21 +4168,21 @@
       Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
     }
   }
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
     Records = BuiltinInfo;
     NumRecords = clang::ARM::LastTSBuiltin-Builtin::FirstTSBuiltin;
   }
-  virtual bool isCLZForZeroUndef() const { return false; }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  bool isCLZForZeroUndef() const override { return false; }
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return IsAAPCS ? AAPCSABIBuiltinVaList : TargetInfo::VoidPtrBuiltinVaList;
   }
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &NumNames) const;
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const;
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &Info) const {
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &NumNames) const override;
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override;
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &Info) const override {
     switch (*Name) {
     default: break;
     case 'l': // r0-r7
@@ -4076,7 +4212,7 @@
     }
     return false;
   }
-  virtual std::string convertConstraint(const char *&Constraint) const {
+  std::string convertConstraint(const char *&Constraint) const override {
     std::string R;
     switch (*Constraint) {
     case 'U':   // Two-character constraint; add "^" hint for later parsing.
@@ -4091,9 +4227,8 @@
     }
     return R;
   }
-  virtual bool validateConstraintModifier(StringRef Constraint,
-                                          const char Modifier,
-                                          unsigned Size) const {
+  bool validateConstraintModifier(StringRef Constraint, const char Modifier,
+                                  unsigned Size) const override {
     bool isOutput = (Constraint[0] == '=');
     bool isInOut = (Constraint[0] == '+');
 
@@ -4118,16 +4253,16 @@
 
     return true;
   }
-  virtual const char *getClobbers() const {
+  const char *getClobbers() const override {
     // FIXME: Is this really right?
     return "";
   }
 
-  virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const {
+  CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
     return (CC == CC_AAPCS || CC == CC_AAPCS_VFP) ? CCCR_OK : CCCR_Warning;
   }
 
-  virtual int getEHDataRegisterNumber(unsigned RegNo) const {
+  int getEHDataRegisterNumber(unsigned RegNo) const override {
     if (RegNo == 0) return 0;
     if (RegNo == 1) return 1;
     return -1;
@@ -4205,25 +4340,53 @@
 #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
 #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
                                               ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsNEON.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+                                              ALL_LANGUAGES },
 #include "clang/Basic/BuiltinsARM.def"
 };
+
+class ARMleTargetInfo : public ARMTargetInfo {
+public:
+  ARMleTargetInfo(const llvm::Triple &Triple)
+    : ARMTargetInfo(Triple, false) { }
+  virtual void getTargetDefines(const LangOptions &Opts,
+                                MacroBuilder &Builder) const {
+    Builder.defineMacro("__ARMEL__");
+    ARMTargetInfo::getTargetDefines(Opts, Builder);
+  }
+};
+
+class ARMbeTargetInfo : public ARMTargetInfo {
+public:
+  ARMbeTargetInfo(const llvm::Triple &Triple)
+    : ARMTargetInfo(Triple, true) { }
+  virtual void getTargetDefines(const LangOptions &Opts,
+                                MacroBuilder &Builder) const {
+    Builder.defineMacro("__ARMEB__");
+    Builder.defineMacro("__ARM_BIG_ENDIAN");
+    ARMTargetInfo::getTargetDefines(Opts, Builder);
+  }
+};
 } // end anonymous namespace.
 
 namespace {
 class DarwinARMTargetInfo :
-  public DarwinTargetInfo<ARMTargetInfo> {
+  public DarwinTargetInfo<ARMleTargetInfo> {
 protected:
-  virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
-                            MacroBuilder &Builder) const {
+  void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+                    MacroBuilder &Builder) const override {
     getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
   }
 
 public:
   DarwinARMTargetInfo(const llvm::Triple &Triple)
-      : DarwinTargetInfo<ARMTargetInfo>(Triple) {
+      : DarwinTargetInfo<ARMleTargetInfo>(Triple) {
     HasAlignMac68kSupport = true;
     // iOS always has 64-bit atomic instructions.
-    // FIXME: This should be based off of the target features in ARMTargetInfo.
+    // FIXME: This should be based off of the target features in ARMleTargetInfo.
     MaxAtomicInlineWidth = 64;
 
     // Darwin on iOS uses a variant of the ARM C++ ABI.
@@ -4234,6 +4397,294 @@
 
 
 namespace {
+class ARM64TargetInfo : public TargetInfo {
+  static const TargetInfo::GCCRegAlias GCCRegAliases[];
+  static const char *const GCCRegNames[];
+
+  static const Builtin::Info BuiltinInfo[];
+
+  std::string ABI;
+
+public:
+  ARM64TargetInfo(const llvm::Triple &Triple)
+      : TargetInfo(Triple), ABI("aapcs") {
+    BigEndian = false;
+    LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+    IntMaxType = SignedLong;
+    UIntMaxType = UnsignedLong;
+    Int64Type = SignedLong;
+    WCharType = UnsignedInt;
+    MaxVectorAlign = 128;
+    RegParmMax = 8;
+    MaxAtomicInlineWidth = 128;
+    MaxAtomicPromoteWidth = 128;
+
+    LongDoubleWidth = LongDoubleAlign = 128;
+    LongDoubleFormat = &llvm::APFloat::IEEEquad;
+
+    if (Triple.isOSBinFormatMachO())
+      DescriptionString = "e-m:o-i64:64-i128:128-n32:64-S128";
+    else
+      DescriptionString = "e-m:e-i64:64-i128:128-n32:64-S128";
+
+    // {} in inline assembly are neon specifiers, not assembly variant
+    // specifiers.
+    NoAsmVariants = true;
+
+    // ARM64 targets default to using the ARM C++ ABI.
+    TheCXXABI.set(TargetCXXABI::GenericAArch64);
+  }
+
+  virtual const char *getABI() const { return ABI.c_str(); }
+  virtual bool setABI(const std::string &Name) {
+    if (Name != "aapcs" && Name != "darwinpcs")
+      return false;
+
+    ABI = Name;
+    return true;
+  }
+
+  virtual bool setCPU(const std::string &Name) {
+    bool CPUKnown = llvm::StringSwitch<bool>(Name)
+                        .Case("arm64-generic", true)
+                        .Case("cyclone", true)
+                        .Default(false);
+    return CPUKnown;
+  }
+
+  virtual void getTargetDefines(const LangOptions &Opts,
+                                MacroBuilder &Builder) const {
+    // Target identification.
+    Builder.defineMacro("__arm64");
+    Builder.defineMacro("__arm64__");
+    Builder.defineMacro("__aarch64__");
+    Builder.defineMacro("__ARM64_ARCH_8__");
+    Builder.defineMacro("__AARCH64_SIMD__");
+    Builder.defineMacro("__ARM_NEON__");
+
+    // Target properties.
+    Builder.defineMacro("_LP64");
+    Builder.defineMacro("__LP64__");
+    Builder.defineMacro("__LITTLE_ENDIAN__");
+
+    // Subtarget options.
+    Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+    Builder.defineMacro("__aarch64__");
+    Builder.defineMacro("__AARCH64EL__");
+
+    // ACLE predefines. Many can only have one possible value on v8 AArch64.
+    Builder.defineMacro("__ARM_ACLE", "200");
+    Builder.defineMacro("__ARM_ARCH", "8");
+    Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
+
+    Builder.defineMacro("__ARM_64BIT_STATE");
+    Builder.defineMacro("__ARM_PCS_AAPCS64");
+    Builder.defineMacro("__ARM_ARCH_ISA_A64");
+
+    Builder.defineMacro("__ARM_FEATURE_UNALIGNED");
+    Builder.defineMacro("__ARM_FEATURE_CLZ");
+    Builder.defineMacro("__ARM_FEATURE_FMA");
+    Builder.defineMacro("__ARM_FEATURE_DIV");
+
+    Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
+
+    // 0xe implies support for half, single and double precision operations.
+    Builder.defineMacro("__ARM_FP", "0xe");
+
+    // PCS specifies this for SysV variants, which is all we support. Other ABIs
+    // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
+    Builder.defineMacro("__ARM_FP16_FORMAT_IEEE");
+
+    if (Opts.FastMath || Opts.FiniteMathOnly)
+      Builder.defineMacro("__ARM_FP_FAST");
+
+    if ((Opts.C99 || Opts.C11) && !Opts.Freestanding)
+      Builder.defineMacro("__ARM_FP_FENV_ROUNDING");
+
+    Builder.defineMacro("__ARM_SIZEOF_WCHAR_T", Opts.ShortWChar ? "2" : "4");
+
+    Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM",
+                        Opts.ShortEnums ? "1" : "4");
+
+    if (BigEndian)
+      Builder.defineMacro("__ARM_BIG_ENDIAN");
+
+    // FIXME: the target should support NEON as an optional extension, like
+    // the OSS AArch64.
+    Builder.defineMacro("__ARM_NEON");
+    // 64-bit NEON supports half, single and double precision operations.
+    Builder.defineMacro("__ARM_NEON_FP", "7");
+
+    // FIXME: the target should support crypto as an optional extension, like
+    // the OSS AArch64
+    Builder.defineMacro("__ARM_FEATURE_CRYPTO");
+  }
+
+  virtual void getTargetBuiltins(const Builtin::Info *&Records,
+                                 unsigned &NumRecords) const {
+    Records = BuiltinInfo;
+    NumRecords = clang::ARM64::LastTSBuiltin - Builtin::FirstTSBuiltin;
+  }
+
+  virtual bool hasFeature(StringRef Feature) const {
+    return llvm::StringSwitch<bool>(Feature)
+        .Case("arm64", true)
+        .Case("neon", true)
+        .Default(false);
+  }
+
+  virtual bool isCLZForZeroUndef() const { return false; }
+
+  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+    return TargetInfo::AArch64ABIBuiltinVaList;
+  }
+
+  virtual void getGCCRegNames(const char *const *&Names,
+                              unsigned &NumNames) const;
+  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                                unsigned &NumAliases) const;
+
+  virtual bool validateAsmConstraint(const char *&Name,
+                                     TargetInfo::ConstraintInfo &Info) const {
+    switch (*Name) {
+    default:
+      return false;
+    case 'w': // Floating point and SIMD registers (V0-V31)
+      Info.setAllowsRegister();
+      return true;
+    case 'z': // Zero register, wzr or xzr
+      Info.setAllowsRegister();
+      return true;
+    case 'x': // Floating point and SIMD registers (V0-V15)
+      Info.setAllowsRegister();
+      return true;
+    case 'Q': // A memory address that is a single base register.
+      Info.setAllowsMemory();
+      return true;
+    }
+    return false;
+  }
+
+  virtual bool validateConstraintModifier(StringRef Constraint,
+                                          const char Modifier,
+                                          unsigned Size) const {
+    // Strip off constraint modifiers.
+    while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
+      Constraint = Constraint.substr(1);
+
+    switch (Constraint[0]) {
+    default:
+      return true;
+    case 'z':
+    case 'r': {
+      switch (Modifier) {
+      case 'x':
+      case 'w':
+        // For now assume that the person knows what they're
+        // doing with the modifier.
+        return true;
+      default:
+        // By default an 'r' constraint will be in the 'x'
+        // registers.
+        return (Size == 64);
+      }
+    }
+    }
+  }
+
+  virtual const char *getClobbers() const { return ""; }
+
+  int getEHDataRegisterNumber(unsigned RegNo) const {
+    if (RegNo == 0)
+      return 0;
+    if (RegNo == 1)
+      return 1;
+    return -1;
+  }
+};
+
+const char *const ARM64TargetInfo::GCCRegNames[] = {
+  // 32-bit Integer registers
+  "w0",  "w1",  "w2",  "w3",  "w4",  "w5",  "w6",  "w7",  "w8",  "w9",  "w10",
+  "w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21",
+  "w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
+
+  // 64-bit Integer registers
+  "x0",  "x1",  "x2",  "x3",  "x4",  "x5",  "x6",  "x7",  "x8",  "x9",  "x10",
+  "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21",
+  "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp",  "lr",  "sp",
+
+  // 32-bit floating point regsisters
+  "s0",  "s1",  "s2",  "s3",  "s4",  "s5",  "s6",  "s7",  "s8",  "s9",  "s10",
+  "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21",
+  "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+
+  // 64-bit floating point regsisters
+  "d0",  "d1",  "d2",  "d3",  "d4",  "d5",  "d6",  "d7",  "d8",  "d9",  "d10",
+  "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
+  "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
+
+  // Vector registers
+  "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",  "v8",  "v9",  "v10",
+  "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
+  "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
+};
+
+void ARM64TargetInfo::getGCCRegNames(const char *const *&Names,
+                                     unsigned &NumNames) const {
+  Names = GCCRegNames;
+  NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias ARM64TargetInfo::GCCRegAliases[] = {
+  { { "w31" }, "wsp" },
+  { { "x29" }, "fp" },
+  { { "x30" }, "lr" },
+  { { "x31" }, "sp" },
+  // The S/D/Q and W/X registers overlap, but aren't really aliases; we
+  // don't want to substitute one of these for a different-sized one.
+};
+
+void ARM64TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+                                       unsigned &NumAliases) const {
+  Aliases = GCCRegAliases;
+  NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+
+const Builtin::Info ARM64TargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS)                                               \
+  { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsNEON.def"
+
+#define BUILTIN(ID, TYPE, ATTRS)                                               \
+  { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsARM64.def"
+};
+} // end anonymous namespace.
+
+namespace {
+class DarwinARM64TargetInfo : public DarwinTargetInfo<ARM64TargetInfo> {
+public:
+  DarwinARM64TargetInfo(const llvm::Triple &Triple)
+      : DarwinTargetInfo<ARM64TargetInfo>(Triple) {
+    Int64Type = SignedLongLong;
+    WCharType = SignedInt;
+    UseSignedCharForObjCBool = false;
+
+    LongDoubleWidth = LongDoubleAlign = 64;
+    LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+
+    TheCXXABI.set(TargetCXXABI::iOS64);
+  }
+
+  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+    return TargetInfo::CharPtrBuiltinVaList;
+  }
+};
+} // end anonymous namespace
+
+namespace {
 // Hexagon abstract base class
 class HexagonTargetInfo : public TargetInfo {
   static const Builtin::Info BuiltinInfo[];
@@ -4243,41 +4694,39 @@
 public:
   HexagonTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
     BigEndian = false;
-    DescriptionString = ("e-p:32:32:32-"
-                         "i64:64:64-i32:32:32-i16:16:16-i1:32:32-"
-                         "f64:64:64-f32:32:32-a0:0-n32");
+    DescriptionString = "e-m:e-p:32:32-i1:32-i64:64-a:0-n32";
 
     // {} in inline assembly are packet specifiers, not assembly variant
     // specifiers.
     NoAsmVariants = true;
   }
 
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
     Records = BuiltinInfo;
     NumRecords = clang::Hexagon::LastTSBuiltin-Builtin::FirstTSBuiltin;
   }
 
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &Info) const {
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &Info) const override {
     return true;
   }
 
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const;
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override;
 
-  virtual bool hasFeature(StringRef Feature) const {
+  bool hasFeature(StringRef Feature) const override {
     return Feature == "hexagon";
   }
-  
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::CharPtrBuiltinVaList;
   }
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &NumNames) const;
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const;
-  virtual const char *getClobbers() const {
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &NumNames) const override;
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override;
+  const char *getClobbers() const override {
     return "";
   }
 
@@ -4288,7 +4737,7 @@
       .Default(0);
   }
 
-  virtual bool setCPU(const std::string &Name) {
+  bool setCPU(const std::string &Name) override {
     if (!getHexagonCPUSuffix(Name))
       return false;
 
@@ -4396,47 +4845,47 @@
 public:
   SparcTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {}
 
-  virtual bool handleTargetFeatures(std::vector<std::string> &Features,
-                                    DiagnosticsEngine &Diags) {
+  bool handleTargetFeatures(std::vector<std::string> &Features,
+                            DiagnosticsEngine &Diags) override {
     SoftFloat = false;
     for (unsigned i = 0, e = Features.size(); i != e; ++i)
       if (Features[i] == "+soft-float")
         SoftFloat = true;
     return true;
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     DefineStd(Builder, "sparc", Opts);
     Builder.defineMacro("__REGISTER_PREFIX__", "");
 
     if (SoftFloat)
       Builder.defineMacro("SOFT_FLOAT", "1");
   }
-  
-  virtual bool hasFeature(StringRef Feature) const {
+
+  bool hasFeature(StringRef Feature) const override {
     return llvm::StringSwitch<bool>(Feature)
              .Case("softfloat", SoftFloat)
              .Case("sparc", true)
              .Default(false);
   }
-  
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
     // FIXME: Implement!
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::VoidPtrBuiltinVaList;
   }
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &NumNames) const;
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const;
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &info) const {
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &NumNames) const override;
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override;
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &info) const override {
     // FIXME: Implement!
     return false;
   }
-  virtual const char *getClobbers() const {
+  const char *getClobbers() const override {
     // FIXME: Implement!
     return "";
   }
@@ -4500,13 +4949,11 @@
 class SparcV8TargetInfo : public SparcTargetInfo {
 public:
   SparcV8TargetInfo(const llvm::Triple &Triple) : SparcTargetInfo(Triple) {
-    // FIXME: Support Sparc quad-precision long double?
-    DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64";
+    DescriptionString = "E-m:e-p:32:32-i64:64-f128:64-n32-S64";
   }
 
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     SparcTargetInfo::getTargetDefines(Opts, Builder);
     Builder.defineMacro("__sparcv8");
   }
@@ -4517,8 +4964,7 @@
 public:
   SparcV9TargetInfo(const llvm::Triple &Triple) : SparcTargetInfo(Triple) {
     // FIXME: Support Sparc quad-precision long double?
-    DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32:64-S128";
+    DescriptionString = "E-m:e-i64:64-n32:64-S128";
     // This is an LP64 platform.
     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
 
@@ -4531,10 +4977,17 @@
       UIntMaxType = UnsignedLong;
     }
     Int64Type = IntMaxType;
+
+    // The SPARCv8 System V ABI has long double 128-bits in size, but 64-bit
+    // aligned. The SPARCv9 SCD 2.4.1 says 16-byte aligned.
+    LongDoubleWidth = 128;
+    LongDoubleAlign = 128;
+    LongDoubleFormat = &llvm::APFloat::IEEEquad;
+    MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
   }
 
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     SparcTargetInfo::getTargetDefines(Opts, Builder);
     Builder.defineMacro("__sparcv9");
     Builder.defineMacro("__arch64__");
@@ -4547,6 +5000,22 @@
       Builder.defineMacro("__sparcv9__");
     }
   }
+
+  bool setCPU(const std::string &Name) override {
+    bool CPUKnown = llvm::StringSwitch<bool>(Name)
+      .Case("v9", true)
+      .Case("ultrasparc", true)
+      .Case("ultrasparc3", true)
+      .Case("niagara", true)
+      .Case("niagara2", true)
+      .Case("niagara3", true)
+      .Case("niagara4", true)
+      .Default(false);
+
+    // No need to store the CPU yet.  There aren't any CPU-specific
+    // macros to define.
+    return CPUKnown;
+  }
 };
 
 } // end anonymous namespace.
@@ -4584,42 +5053,41 @@
       LongDoubleAlign = 64;
       LongDoubleFormat = &llvm::APFloat::IEEEquad;
       MinGlobalAlign = 16;
-      DescriptionString = "E-p:64:64:64-i1:8:16-i8:8:16-i16:16-i32:32-i64:64"
-       "-f32:32-f64:64-f128:64-a0:8:16-n32:64";
+      DescriptionString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64";
       MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
     }
-    virtual void getTargetDefines(const LangOptions &Opts,
-                                  MacroBuilder &Builder) const {
+    void getTargetDefines(const LangOptions &Opts,
+                          MacroBuilder &Builder) const override {
       Builder.defineMacro("__s390__");
       Builder.defineMacro("__s390x__");
       Builder.defineMacro("__zarch__");
       Builder.defineMacro("__LONG_DOUBLE_128__");
     }
-    virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                   unsigned &NumRecords) const {
+    void getTargetBuiltins(const Builtin::Info *&Records,
+                           unsigned &NumRecords) const override {
       // FIXME: Implement.
       Records = 0;
       NumRecords = 0;
     }
 
-    virtual void getGCCRegNames(const char *const *&Names,
-                                unsigned &NumNames) const;
-    virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                  unsigned &NumAliases) const {
+    void getGCCRegNames(const char *const *&Names,
+                        unsigned &NumNames) const override;
+    void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                          unsigned &NumAliases) const override {
       // No aliases.
       Aliases = 0;
       NumAliases = 0;
     }
-    virtual bool validateAsmConstraint(const char *&Name,
-                                       TargetInfo::ConstraintInfo &info) const;
-    virtual const char *getClobbers() const {
+    bool validateAsmConstraint(const char *&Name,
+                               TargetInfo::ConstraintInfo &info) const override;
+    const char *getClobbers() const override {
       // FIXME: Is this really right?
       return "";
     }
-    virtual BuiltinVaListKind getBuiltinVaListKind() const {
+    BuiltinVaListKind getBuiltinVaListKind() const override {
       return TargetInfo::SystemZBuiltinVaList;
     }
-    virtual bool setCPU(const std::string &Name) {
+    bool setCPU(const std::string &Name) override {
       bool CPUKnown = llvm::StringSwitch<bool>(Name)
         .Case("z10", true)
         .Case("z196", true)
@@ -4693,41 +5161,41 @@
       IntPtrType = SignedInt;
       PtrDiffType = SignedInt;
       SigAtomicType = SignedLong;
-      DescriptionString = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16";
-   }
-    virtual void getTargetDefines(const LangOptions &Opts,
-                                  MacroBuilder &Builder) const {
+      DescriptionString = "e-m:e-p:16:16-i32:16:32-n8:16";
+    }
+    void getTargetDefines(const LangOptions &Opts,
+                          MacroBuilder &Builder) const override {
       Builder.defineMacro("MSP430");
       Builder.defineMacro("__MSP430__");
       // FIXME: defines for different 'flavours' of MCU
     }
-    virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                   unsigned &NumRecords) const {
-     // FIXME: Implement.
+    void getTargetBuiltins(const Builtin::Info *&Records,
+                           unsigned &NumRecords) const override {
+      // FIXME: Implement.
       Records = 0;
       NumRecords = 0;
     }
-    virtual bool hasFeature(StringRef Feature) const {
+    bool hasFeature(StringRef Feature) const override {
       return Feature == "msp430";
     }
-    virtual void getGCCRegNames(const char * const *&Names,
-                                unsigned &NumNames) const;
-    virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                  unsigned &NumAliases) const {
+    void getGCCRegNames(const char * const *&Names,
+                        unsigned &NumNames) const override;
+    void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                          unsigned &NumAliases) const override {
       // No aliases.
       Aliases = 0;
       NumAliases = 0;
     }
-    virtual bool validateAsmConstraint(const char *&Name,
-                                       TargetInfo::ConstraintInfo &info) const {
+    bool validateAsmConstraint(const char *&Name,
+                               TargetInfo::ConstraintInfo &info) const override {
       // No target constraints for now.
       return false;
     }
-    virtual const char *getClobbers() const {
+    const char *getClobbers() const override {
       // FIXME: Is this really right?
       return "";
     }
-    virtual BuiltinVaListKind getBuiltinVaListKind() const {
+    BuiltinVaListKind getBuiltinVaListKind() const override {
       // FIXME: implement
       return TargetInfo::CharPtrBuiltinVaList;
    }
@@ -4789,40 +5257,38 @@
       FloatFormat = &llvm::APFloat::IEEEsingle;
       DoubleFormat = &llvm::APFloat::IEEEsingle;
       LongDoubleFormat = &llvm::APFloat::IEEEsingle;
-      DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-"
-                          "i16:16:32-i32:32:32-i64:32:32-"
-                          "f32:32:32-f64:32:32-v64:32:32-"
-                          "v128:32:32-a0:0:32-n32";
+      DescriptionString = "E-p:32:32-i8:8:32-i16:16:32-i64:32"
+                          "-f64:32-v64:32-v128:32-a:0:32-n32";
       AddrSpaceMap = &TCEOpenCLAddrSpaceMap;
       UseAddrSpaceMapMangling = true;
     }
 
-    virtual void getTargetDefines(const LangOptions &Opts,
-                                  MacroBuilder &Builder) const {
+    void getTargetDefines(const LangOptions &Opts,
+                          MacroBuilder &Builder) const override {
       DefineStd(Builder, "tce", Opts);
       Builder.defineMacro("__TCE__");
       Builder.defineMacro("__TCE_V1__");
     }
-    virtual bool hasFeature(StringRef Feature) const {
+    bool hasFeature(StringRef Feature) const override {
       return Feature == "tce";
     }
-    
-    virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                   unsigned &NumRecords) const {}
-    virtual const char *getClobbers() const {
+
+    void getTargetBuiltins(const Builtin::Info *&Records,
+                           unsigned &NumRecords) const override {}
+    const char *getClobbers() const override {
       return "";
     }
-    virtual BuiltinVaListKind getBuiltinVaListKind() const {
+    BuiltinVaListKind getBuiltinVaListKind() const override {
       return TargetInfo::VoidPtrBuiltinVaList;
     }
-    virtual void getGCCRegNames(const char * const *&Names,
-                                unsigned &NumNames) const {}
-    virtual bool validateAsmConstraint(const char *&Name,
-                                       TargetInfo::ConstraintInfo &info) const {
+    void getGCCRegNames(const char * const *&Names,
+                        unsigned &NumNames) const override {}
+    bool validateAsmConstraint(const char *&Name,
+                               TargetInfo::ConstraintInfo &info) const override{
       return true;
     }
-    virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                  unsigned &NumAliases) const {}
+    void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                          unsigned &NumAliases) const override {}
   };
 }
 
@@ -4855,21 +5321,31 @@
         IsNan2008(false), IsSingleFloat(false), FloatABI(HardFloat),
         DspRev(NoDSP), HasMSA(false), HasFP64(false), ABI(ABIStr) {}
 
-  virtual const char *getABI() const { return ABI.c_str(); }
-  virtual bool setABI(const std::string &Name) = 0;
-  virtual bool setCPU(const std::string &Name) {
+  const char *getABI() const override { return ABI.c_str(); }
+  bool setABI(const std::string &Name) override = 0;
+  bool setCPU(const std::string &Name) override {
     CPU = Name;
     return true;
   }
-  void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+  void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
+    // The backend enables certain ABI's by default according to the
+    // architecture.
+    // Disable both possible defaults so that we don't end up with multiple
+    // ABI's selected and trigger an assertion.
+    Features["o32"] = false;
+    Features["n64"] = false;
+
     Features[ABI] = true;
     Features[CPU] = true;
   }
 
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
-    DefineStd(Builder, "mips", Opts);
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
+    Builder.defineMacro("__mips__");
     Builder.defineMacro("_mips");
+    if (Opts.GNUMode)
+      Builder.defineMacro("mips");
+
     Builder.defineMacro("__REGISTER_PREFIX__", "");
 
     switch (FloatABI) {
@@ -4922,22 +5398,22 @@
     Builder.defineMacro("_MIPS_ARCH_" + StringRef(CPU).upper());
   }
 
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
     Records = BuiltinInfo;
     NumRecords = clang::Mips::LastTSBuiltin - Builtin::FirstTSBuiltin;
   }
-  virtual bool hasFeature(StringRef Feature) const {
+  bool hasFeature(StringRef Feature) const override {
     return llvm::StringSwitch<bool>(Feature)
       .Case("mips", true)
       .Case("fp64", HasFP64)
       .Default(false);
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::VoidPtrBuiltinVaList;
   }
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &NumNames) const {
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &NumNames) const override {
     static const char *const GCCRegNames[] = {
       // CPU register names
       // Must match second column of GCCRegAliases
@@ -4965,10 +5441,10 @@
     Names = GCCRegNames;
     NumNames = llvm::array_lengthof(GCCRegNames);
   }
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const = 0;
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &Info) const {
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override = 0;
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &Info) const override {
     switch (*Name) {
     default:
       return false;
@@ -4988,13 +5464,13 @@
     }
   }
 
-  virtual const char *getClobbers() const {
+  const char *getClobbers() const override {
     // FIXME: Implement!
     return "";
   }
 
-  virtual bool handleTargetFeatures(std::vector<std::string> &Features,
-                                    DiagnosticsEngine &Diags) {
+  bool handleTargetFeatures(std::vector<std::string> &Features,
+                            DiagnosticsEngine &Diags) override {
     IsMips16 = false;
     IsMicromips = false;
     IsNan2008 = false;
@@ -5041,7 +5517,7 @@
     return true;
   }
 
-  virtual int getEHDataRegisterNumber(unsigned RegNo) const {
+  int getEHDataRegisterNumber(unsigned RegNo) const override {
     if (RegNo == 0) return 4;
     if (RegNo == 1) return 5;
     return -1;
@@ -5058,12 +5534,12 @@
 class Mips32TargetInfoBase : public MipsTargetInfoBase {
 public:
   Mips32TargetInfoBase(const llvm::Triple &Triple)
-      : MipsTargetInfoBase(Triple, "o32", "mips32") {
+      : MipsTargetInfoBase(Triple, "o32", "mips32r2") {
     SizeType = UnsignedInt;
     PtrDiffType = SignedInt;
     MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
   }
-  virtual bool setABI(const std::string &Name) {
+  bool setABI(const std::string &Name) override {
     if ((Name == "o32") || (Name == "eabi")) {
       ABI = Name;
       return true;
@@ -5073,10 +5549,12 @@
     } else
       return false;
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     MipsTargetInfoBase::getTargetDefines(Opts, Builder);
 
+    Builder.defineMacro("__mips", "32");
+
     if (ABI == "o32") {
       Builder.defineMacro("__mips_o32");
       Builder.defineMacro("_ABIO32", "1");
@@ -5087,8 +5565,8 @@
     else
       llvm_unreachable("Invalid ABI for Mips32.");
   }
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const {
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override {
     static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
       { { "at" },  "$1" },
       { { "v0" },  "$2" },
@@ -5128,17 +5606,16 @@
 };
 
 class Mips32EBTargetInfo : public Mips32TargetInfoBase {
-  virtual void setDescriptionString() {
-    DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64";
+  void setDescriptionString() override {
+    DescriptionString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64";
   }
 
 public:
   Mips32EBTargetInfo(const llvm::Triple &Triple)
       : Mips32TargetInfoBase(Triple) {
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     DefineStd(Builder, "MIPSEB", Opts);
     Builder.defineMacro("_MIPSEB");
     Mips32TargetInfoBase::getTargetDefines(Opts, Builder);
@@ -5146,9 +5623,8 @@
 };
 
 class Mips32ELTargetInfo : public Mips32TargetInfoBase {
-  virtual void setDescriptionString() {
-    DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
-                        "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64";
+  void setDescriptionString() override {
+    DescriptionString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64";
   }
 
 public:
@@ -5156,8 +5632,8 @@
       : Mips32TargetInfoBase(Triple) {
     BigEndian = false;
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     DefineStd(Builder, "MIPSEL", Opts);
     Builder.defineMacro("_MIPSEL");
     Mips32TargetInfoBase::getTargetDefines(Opts, Builder);
@@ -5167,7 +5643,7 @@
 class Mips64TargetInfoBase : public MipsTargetInfoBase {
 public:
   Mips64TargetInfoBase(const llvm::Triple &Triple)
-      : MipsTargetInfoBase(Triple, "n64", "mips64") {
+      : MipsTargetInfoBase(Triple, "n64", "mips64r2") {
     LongWidth = LongAlign = 64;
     PointerWidth = PointerAlign = 64;
     LongDoubleWidth = LongDoubleAlign = 128;
@@ -5179,7 +5655,7 @@
     SuitableAlign = 128;
     MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
   }
-  virtual bool setABI(const std::string &Name) {
+  bool setABI(const std::string &Name) override {
     if (Name == "n32") {
       LongWidth = LongAlign = 32;
       PointerWidth = PointerAlign = 32;
@@ -5194,10 +5670,11 @@
     } else
       return false;
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     MipsTargetInfoBase::getTargetDefines(Opts, Builder);
 
+    Builder.defineMacro("__mips", "64");
     Builder.defineMacro("__mips64");
     Builder.defineMacro("__mips64__");
 
@@ -5214,8 +5691,8 @@
     else
       llvm_unreachable("Invalid ABI for Mips64.");
   }
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const {
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override {
     static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
       { { "at" },  "$1" },
       { { "v0" },  "$2" },
@@ -5255,23 +5732,19 @@
 };
 
 class Mips64EBTargetInfo : public Mips64TargetInfoBase {
-  virtual void setDescriptionString() {
+  void setDescriptionString() override {
     if (ABI == "n32")
-      DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
-                          "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
-                          "v64:64:64-n32:64-S128";
+      DescriptionString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128";
     else
-      DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
-                          "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
-                          "v64:64:64-n32:64-S128";
+      DescriptionString = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128";
 
   }
 
 public:
   Mips64EBTargetInfo(const llvm::Triple &Triple)
       : Mips64TargetInfoBase(Triple) {}
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     DefineStd(Builder, "MIPSEB", Opts);
     Builder.defineMacro("_MIPSEB");
     Mips64TargetInfoBase::getTargetDefines(Opts, Builder);
@@ -5279,15 +5752,11 @@
 };
 
 class Mips64ELTargetInfo : public Mips64TargetInfoBase {
-  virtual void setDescriptionString() {
+  void setDescriptionString() override {
     if (ABI == "n32")
-      DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
-                          "i64:64:64-f32:32:32-f64:64:64-f128:128:128"
-                          "-v64:64:64-n32:64-S128";
+      DescriptionString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128";
     else
-      DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
-                          "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
-                          "v64:64:64-n32:64-S128";
+      DescriptionString = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128";
   }
 public:
   Mips64ELTargetInfo(const llvm::Triple &Triple)
@@ -5295,8 +5764,8 @@
     // Default ABI is n64.
     BigEndian = false;
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     DefineStd(Builder, "MIPSEL", Opts);
     Builder.defineMacro("_MIPSEL");
     Mips64TargetInfoBase::getTargetDefines(Opts, Builder);
@@ -5324,41 +5793,37 @@
     this->PtrDiffType = TargetInfo::SignedInt;
     this->IntPtrType = TargetInfo::SignedInt;
     this->RegParmMax = 0; // Disallow regparm
-    DescriptionString = "e-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
-                        "f32:32:32-f64:64:64-p:32:32:32-v128:32:32";
   }
 
-  void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+  void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
   }
-  virtual void getArchDefines(const LangOptions &Opts,
-                              MacroBuilder &Builder) const {
+  void getArchDefines(const LangOptions &Opts, MacroBuilder &Builder) const {
     Builder.defineMacro("__le32__");
     Builder.defineMacro("__pnacl__");
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
-    Builder.defineMacro("__LITTLE_ENDIAN__");
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     getArchDefines(Opts, Builder);
   }
-  virtual bool hasFeature(StringRef Feature) const {
+  bool hasFeature(StringRef Feature) const override {
     return Feature == "pnacl";
   }
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::PNaClABIBuiltinVaList;
   }
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &NumNames) const;
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const;
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &Info) const {
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &NumNames) const override;
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override;
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &Info) const override {
     return false;
   }
 
-  virtual const char *getClobbers() const {
+  const char *getClobbers() const override {
     return "";
   }
 };
@@ -5401,28 +5866,28 @@
       // These must be defined in sorted order!
       NoAsmVariants = true;
     }
-    virtual void getTargetDefines(const LangOptions &Opts,
-                                  MacroBuilder &Builder) const {
+    void getTargetDefines(const LangOptions &Opts,
+                          MacroBuilder &Builder) const override {
       DefineStd(Builder, "SPIR", Opts);
     }
-    virtual bool hasFeature(StringRef Feature) const {
+    bool hasFeature(StringRef Feature) const override {
       return Feature == "spir";
     }
-    
-    virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                   unsigned &NumRecords) const {}
-    virtual const char *getClobbers() const {
+
+    void getTargetBuiltins(const Builtin::Info *&Records,
+                           unsigned &NumRecords) const override {}
+    const char *getClobbers() const override {
       return "";
     }
-    virtual void getGCCRegNames(const char * const *&Names,
-                                unsigned &NumNames) const {}
-    virtual bool validateAsmConstraint(const char *&Name,
-                                       TargetInfo::ConstraintInfo &info) const {
+    void getGCCRegNames(const char * const *&Names,
+                        unsigned &NumNames) const override {}
+    bool validateAsmConstraint(const char *&Name,
+                               TargetInfo::ConstraintInfo &info) const override {
       return true;
     }
-    virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                  unsigned &NumAliases) const {}
-    virtual BuiltinVaListKind getBuiltinVaListKind() const {
+    void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                          unsigned &NumAliases) const override {}
+    BuiltinVaListKind getBuiltinVaListKind() const override {
       return TargetInfo::VoidPtrBuiltinVaList;
     }
   };
@@ -5435,13 +5900,11 @@
       SizeType     = TargetInfo::UnsignedInt;
       PtrDiffType = IntPtrType = TargetInfo::SignedInt;
       DescriptionString
-        = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
-          "f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-"
-          "v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-"
-          "v512:512:512-v1024:1024:1024";
+        = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
+          "v96:128-v192:256-v256:256-v512:512-v1024:1024";
     }
-    virtual void getTargetDefines(const LangOptions &Opts,
-                                  MacroBuilder &Builder) const {
+    void getTargetDefines(const LangOptions &Opts,
+                          MacroBuilder &Builder) const override {
       DefineStd(Builder, "SPIR32", Opts);
     }
   };
@@ -5452,14 +5915,11 @@
       PointerWidth = PointerAlign = 64;
       SizeType     = TargetInfo::UnsignedLong;
       PtrDiffType = IntPtrType = TargetInfo::SignedLong;
-      DescriptionString
-        = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
-          "f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-"
-          "v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-"
-          "v512:512:512-v1024:1024:1024";
+      DescriptionString = "e-i64:64-v16:16-v24:32-v32:32-v48:64-"
+                          "v96:128-v192:256-v256:256-v512:512-v1024:1024";
     }
-    virtual void getTargetDefines(const LangOptions &Opts,
-                                  MacroBuilder &Builder) const {
+    void getTargetDefines(const LangOptions &Opts,
+                          MacroBuilder &Builder) const override {
       DefineStd(Builder, "SPIR64", Opts);
     }
   };
@@ -5481,27 +5941,26 @@
     WCharType = UnsignedChar;
     WIntType = UnsignedInt;
     UseZeroLengthBitfieldAlignment = true;
-    DescriptionString = "e-p:32:32:32-a0:0:32-n32"
-                        "-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32"
-                        "-f16:16:32-f32:32:32-f64:32:32";
+    DescriptionString = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32"
+                        "-f64:32-a:0:32-n32";
   }
-  virtual void getTargetDefines(const LangOptions &Opts,
-                                MacroBuilder &Builder) const {
+  void getTargetDefines(const LangOptions &Opts,
+                        MacroBuilder &Builder) const override {
     Builder.defineMacro("__XS1B__");
   }
-  virtual void getTargetBuiltins(const Builtin::Info *&Records,
-                                 unsigned &NumRecords) const {
+  void getTargetBuiltins(const Builtin::Info *&Records,
+                         unsigned &NumRecords) const override {
     Records = BuiltinInfo;
     NumRecords = clang::XCore::LastTSBuiltin-Builtin::FirstTSBuiltin;
   }
-  virtual BuiltinVaListKind getBuiltinVaListKind() const {
+  BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::VoidPtrBuiltinVaList;
   }
-  virtual const char *getClobbers() const {
+  const char *getClobbers() const override {
     return "";
   }
-  virtual void getGCCRegNames(const char * const *&Names,
-                              unsigned &NumNames) const {
+  void getGCCRegNames(const char * const *&Names,
+                      unsigned &NumNames) const override {
     static const char * const GCCRegNames[] = {
       "r0",   "r1",   "r2",   "r3",   "r4",   "r5",   "r6",   "r7",
       "r8",   "r9",   "r10",  "r11",  "cp",   "dp",   "sp",   "lr"
@@ -5509,15 +5968,19 @@
     Names = GCCRegNames;
     NumNames = llvm::array_lengthof(GCCRegNames);
   }
-  virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
-                                unsigned &NumAliases) const {
+  void getGCCRegAliases(const GCCRegAlias *&Aliases,
+                        unsigned &NumAliases) const override {
     Aliases = NULL;
     NumAliases = 0;
   }
-  virtual bool validateAsmConstraint(const char *&Name,
-                                     TargetInfo::ConstraintInfo &Info) const {
+  bool validateAsmConstraint(const char *&Name,
+                             TargetInfo::ConstraintInfo &Info) const override {
     return false;
   }
+  int getEHDataRegisterNumber(unsigned RegNo) const override {
+    // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
+    return (RegNo < 2)? RegNo : -1;
+  }
 };
 
 const Builtin::Info XCoreTargetInfo::BuiltinInfo[] = {
@@ -5540,6 +6003,17 @@
   default:
     return NULL;
 
+  case llvm::Triple::arm64:
+    if (Triple.isOSDarwin())
+      return new DarwinARM64TargetInfo(Triple);
+
+    switch (os) {
+    case llvm::Triple::Linux:
+      return new LinuxTargetInfo<ARM64TargetInfo>(Triple);
+    default:
+      return new ARM64TargetInfo(Triple);
+    }
+
   case llvm::Triple::xcore:
     return new XCoreTargetInfo(Triple);
 
@@ -5549,33 +6023,69 @@
   case llvm::Triple::aarch64:
     switch (os) {
     case llvm::Triple::Linux:
-      return new LinuxTargetInfo<AArch64TargetInfo>(Triple);
+      return new LinuxTargetInfo<AArch64leTargetInfo>(Triple);
+    case llvm::Triple::NetBSD:
+      return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple);
     default:
-      return new AArch64TargetInfo(Triple);
+      return new AArch64leTargetInfo(Triple);
+    }
+
+  case llvm::Triple::aarch64_be:
+    switch (os) {
+    case llvm::Triple::Linux:
+      return new LinuxTargetInfo<AArch64beTargetInfo>(Triple);
+    case llvm::Triple::NetBSD:
+      return new NetBSDTargetInfo<AArch64beTargetInfo>(Triple);
+    default:
+      return new AArch64beTargetInfo(Triple);
     }
 
   case llvm::Triple::arm:
   case llvm::Triple::thumb:
+    if (Triple.isOSBinFormatMachO())
+      return new DarwinARMTargetInfo(Triple);
+
+    switch (os) {
+    case llvm::Triple::Linux:
+      return new LinuxTargetInfo<ARMleTargetInfo>(Triple);
+    case llvm::Triple::FreeBSD:
+      return new FreeBSDTargetInfo<ARMleTargetInfo>(Triple);
+    case llvm::Triple::NetBSD:
+      return new NetBSDTargetInfo<ARMleTargetInfo>(Triple);
+    case llvm::Triple::OpenBSD:
+      return new OpenBSDTargetInfo<ARMleTargetInfo>(Triple);
+    case llvm::Triple::Bitrig:
+      return new BitrigTargetInfo<ARMleTargetInfo>(Triple);
+    case llvm::Triple::RTEMS:
+      return new RTEMSTargetInfo<ARMleTargetInfo>(Triple);
+    case llvm::Triple::NaCl:
+      return new NaClTargetInfo<ARMleTargetInfo>(Triple);
+    default:
+      return new ARMleTargetInfo(Triple);
+    }
+
+  case llvm::Triple::armeb:
+  case llvm::Triple::thumbeb:
     if (Triple.isOSDarwin())
       return new DarwinARMTargetInfo(Triple);
 
     switch (os) {
     case llvm::Triple::Linux:
-      return new LinuxTargetInfo<ARMTargetInfo>(Triple);
+      return new LinuxTargetInfo<ARMbeTargetInfo>(Triple);
     case llvm::Triple::FreeBSD:
-      return new FreeBSDTargetInfo<ARMTargetInfo>(Triple);
+      return new FreeBSDTargetInfo<ARMbeTargetInfo>(Triple);
     case llvm::Triple::NetBSD:
-      return new NetBSDTargetInfo<ARMTargetInfo>(Triple);
+      return new NetBSDTargetInfo<ARMbeTargetInfo>(Triple);
     case llvm::Triple::OpenBSD:
-      return new OpenBSDTargetInfo<ARMTargetInfo>(Triple);
+      return new OpenBSDTargetInfo<ARMbeTargetInfo>(Triple);
     case llvm::Triple::Bitrig:
-      return new BitrigTargetInfo<ARMTargetInfo>(Triple);
+      return new BitrigTargetInfo<ARMbeTargetInfo>(Triple);
     case llvm::Triple::RTEMS:
-      return new RTEMSTargetInfo<ARMTargetInfo>(Triple);
+      return new RTEMSTargetInfo<ARMbeTargetInfo>(Triple);
     case llvm::Triple::NaCl:
-      return new NaClTargetInfo<ARMTargetInfo>(Triple);
+      return new NaClTargetInfo<ARMbeTargetInfo>(Triple);
     default:
-      return new ARMTargetInfo(Triple);
+      return new ARMbeTargetInfo(Triple);
     }
 
   case llvm::Triple::msp430:
@@ -5773,12 +6283,18 @@
       return new MinixTargetInfo<X86_32TargetInfo>(Triple);
     case llvm::Triple::Solaris:
       return new SolarisTargetInfo<X86_32TargetInfo>(Triple);
-    case llvm::Triple::Cygwin:
-      return new CygwinX86_32TargetInfo(Triple);
-    case llvm::Triple::MinGW32:
-      return new MinGWX86_32TargetInfo(Triple);
-    case llvm::Triple::Win32:
-      return new VisualStudioWindowsX86_32TargetInfo(Triple);
+    case llvm::Triple::Win32: {
+      switch (Triple.getEnvironment()) {
+      default:
+        return new X86_32TargetInfo(Triple);
+      case llvm::Triple::Cygnus:
+        return new CygwinX86_32TargetInfo(Triple);
+      case llvm::Triple::GNU:
+        return new MinGWX86_32TargetInfo(Triple);
+      case llvm::Triple::MSVC:
+        return new VisualStudioWindowsX86_32TargetInfo(Triple);
+      }
+    }
     case llvm::Triple::Haiku:
       return new HaikuX86_32TargetInfo(Triple);
     case llvm::Triple::RTEMS:
@@ -5790,7 +6306,7 @@
     }
 
   case llvm::Triple::x86_64:
-    if (Triple.isOSDarwin() || Triple.getEnvironment() == llvm::Triple::MachO)
+    if (Triple.isOSDarwin() || Triple.isOSBinFormatMachO())
       return new DarwinX86_64TargetInfo(Triple);
 
     switch (os) {
@@ -5812,10 +6328,16 @@
       return new KFreeBSDTargetInfo<X86_64TargetInfo>(Triple);
     case llvm::Triple::Solaris:
       return new SolarisTargetInfo<X86_64TargetInfo>(Triple);
-    case llvm::Triple::MinGW32:
-      return new MinGWX86_64TargetInfo(Triple);
-    case llvm::Triple::Win32:   // This is what Triple.h supports now.
-      return new VisualStudioWindowsX86_64TargetInfo(Triple);
+    case llvm::Triple::Win32: {
+      switch (Triple.getEnvironment()) {
+      default:
+        return new X86_64TargetInfo(Triple);
+      case llvm::Triple::GNU:
+        return new MinGWX86_64TargetInfo(Triple);
+      case llvm::Triple::MSVC:
+        return new VisualStudioWindowsX86_64TargetInfo(Triple);
+      }
+    }
     case llvm::Triple::NaCl:
       return new NaClTargetInfo<X86_64TargetInfo>(Triple);
     default:
@@ -5844,7 +6366,7 @@
   llvm::Triple Triple(Opts->Triple);
 
   // Construct the target
-  OwningPtr<TargetInfo> Target(AllocateTarget(Triple));
+  std::unique_ptr<TargetInfo> Target(AllocateTarget(Triple));
   if (!Target) {
     Diags.Report(diag::err_target_unknown_triple) << Triple.str();
     return 0;
@@ -5863,12 +6385,6 @@
     return 0;
   }
 
-  // Set the target C++ ABI.
-  if (!Opts->CXXABI.empty() && !Target->setCXXABI(Opts->CXXABI)) {
-    Diags.Report(diag::err_target_unknown_cxxabi) << Opts->CXXABI;
-    return 0;
-  }
-
   // Set the fp math unit.
   if (!Opts->FPMath.empty() && !Target->setFPMath(Opts->FPMath)) {
     Diags.Report(diag::err_target_unknown_fpmath) << Opts->FPMath;
@@ -5900,5 +6416,5 @@
   if (!Target->handleTargetFeatures(Opts->Features, Diags))
     return 0;
 
-  return Target.take();
+  return Target.release();
 }
diff --git a/lib/Basic/TokenKinds.cpp b/lib/Basic/TokenKinds.cpp
index 6ce076e..92fc2a2 100644
--- a/lib/Basic/TokenKinds.cpp
+++ b/lib/Basic/TokenKinds.cpp
@@ -12,7 +12,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Basic/TokenKinds.h"
-#include <cassert>
+#include "llvm/Support/ErrorHandling.h"
 using namespace clang;
 
 static const char * const TokNames[] = {
@@ -22,17 +22,27 @@
   0
 };
 
-const char *tok::getTokenName(enum TokenKind Kind) {
-  assert(Kind < tok::NUM_TOKENS);
-  return TokNames[Kind];
+const char *tok::getTokenName(TokenKind Kind) {
+  if (Kind < tok::NUM_TOKENS)
+    return TokNames[Kind];
+  llvm_unreachable("unknown TokenKind");
+  return 0;
 }
 
-const char *tok::getTokenSimpleSpelling(enum TokenKind Kind) {
+const char *tok::getPunctuatorSpelling(TokenKind Kind) {
   switch (Kind) {
 #define PUNCTUATOR(X,Y) case X: return Y;
 #include "clang/Basic/TokenKinds.def"
   default: break;
   }
+  return 0;
+}
 
+const char *tok::getKeywordSpelling(TokenKind Kind) {
+  switch (Kind) {
+#define KEYWORD(X,Y) case kw_ ## X: return #X;
+#include "clang/Basic/TokenKinds.def"
+    default: break;
+  }
   return 0;
 }
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index 88b827d..ae32c01 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -102,11 +102,11 @@
       OS << Revision;
     }
     OS << ')';
-  }  
+  }
   // Support LLVM in a separate repository.
   std::string LLVMRev = getLLVMRevision();
   if (!LLVMRev.empty() && LLVMRev != Revision) {
-    OS << " (";    
+    OS << " (";
     std::string LLVMRepo = getLLVMRepositoryPath();
     if (!LLVMRepo.empty())
       OS << LLVMRepo << ' ';
@@ -116,12 +116,16 @@
 }
 
 std::string getClangFullVersion() {
+  return getClangToolFullVersion("clang");
+}
+
+std::string getClangToolFullVersion(StringRef ToolName) {
   std::string buf;
   llvm::raw_string_ostream OS(buf);
 #ifdef CLANG_VENDOR
   OS << CLANG_VENDOR;
 #endif
-  OS << "clang version " CLANG_VERSION_STRING " "
+  OS << ToolName << " version " CLANG_VERSION_STRING " "
      << getClangFullRepositoryVersion();
 
   // If vendor supplied, include the base LLVM version as well.
diff --git a/lib/Basic/VirtualFileSystem.cpp b/lib/Basic/VirtualFileSystem.cpp
new file mode 100644
index 0000000..9a88cfd
--- /dev/null
+++ b/lib/Basic/VirtualFileSystem.cpp
@@ -0,0 +1,841 @@
+//===- VirtualFileSystem.cpp - Virtual File System Layer --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file implements the VirtualFileSystem interface.
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/VirtualFileSystem.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/YAMLParser.h"
+#include <atomic>
+#include <memory>
+
+using namespace clang;
+using namespace clang::vfs;
+using namespace llvm;
+using llvm::sys::fs::file_status;
+using llvm::sys::fs::file_type;
+using llvm::sys::fs::perms;
+using llvm::sys::fs::UniqueID;
+
+Status::Status(const file_status &Status)
+    : UID(Status.getUniqueID()), MTime(Status.getLastModificationTime()),
+      User(Status.getUser()), Group(Status.getGroup()), Size(Status.getSize()),
+      Type(Status.type()), Perms(Status.permissions()) {}
+
+Status::Status(StringRef Name, StringRef ExternalName, UniqueID UID,
+               sys::TimeValue MTime, uint32_t User, uint32_t Group,
+               uint64_t Size, file_type Type, perms Perms)
+    : Name(Name), UID(UID), MTime(MTime), User(User), Group(Group), Size(Size),
+      Type(Type), Perms(Perms) {}
+
+bool Status::equivalent(const Status &Other) const {
+  return getUniqueID() == Other.getUniqueID();
+}
+bool Status::isDirectory() const {
+  return Type == file_type::directory_file;
+}
+bool Status::isRegularFile() const {
+  return Type == file_type::regular_file;
+}
+bool Status::isOther() const {
+  return exists() && !isRegularFile() && !isDirectory() && !isSymlink();
+}
+bool Status::isSymlink() const {
+  return Type == file_type::symlink_file;
+}
+bool Status::isStatusKnown() const {
+  return Type != file_type::status_error;
+}
+bool Status::exists() const {
+  return isStatusKnown() && Type != file_type::file_not_found;
+}
+
+File::~File() {}
+
+FileSystem::~FileSystem() {}
+
+error_code FileSystem::getBufferForFile(const llvm::Twine &Name,
+                                        std::unique_ptr<MemoryBuffer> &Result,
+                                        int64_t FileSize,
+                                        bool RequiresNullTerminator) {
+  std::unique_ptr<File> F;
+  if (error_code EC = openFileForRead(Name, F))
+    return EC;
+
+  error_code EC = F->getBuffer(Name, Result, FileSize, RequiresNullTerminator);
+  return EC;
+}
+
+//===-----------------------------------------------------------------------===/
+// RealFileSystem implementation
+//===-----------------------------------------------------------------------===/
+
+namespace {
+/// \brief Wrapper around a raw file descriptor.
+class RealFile : public File {
+  int FD;
+  Status S;
+  friend class RealFileSystem;
+  RealFile(int FD) : FD(FD) {
+    assert(FD >= 0 && "Invalid or inactive file descriptor");
+  }
+
+public:
+  ~RealFile();
+  ErrorOr<Status> status() override;
+  error_code getBuffer(const Twine &Name, std::unique_ptr<MemoryBuffer> &Result,
+                       int64_t FileSize = -1,
+                       bool RequiresNullTerminator = true) override;
+  error_code close() override;
+  void setName(StringRef Name) override;
+};
+} // end anonymous namespace
+RealFile::~RealFile() { close(); }
+
+ErrorOr<Status> RealFile::status() {
+  assert(FD != -1 && "cannot stat closed file");
+  if (!S.isStatusKnown()) {
+    file_status RealStatus;
+    if (error_code EC = sys::fs::status(FD, RealStatus))
+      return EC;
+    Status NewS(RealStatus);
+    NewS.setName(S.getName());
+    S = std::move(NewS);
+  }
+  return S;
+}
+
+error_code RealFile::getBuffer(const Twine &Name,
+                               std::unique_ptr<MemoryBuffer> &Result,
+                               int64_t FileSize, bool RequiresNullTerminator) {
+  assert(FD != -1 && "cannot get buffer for closed file");
+  return MemoryBuffer::getOpenFile(FD, Name.str().c_str(), Result, FileSize,
+                                   RequiresNullTerminator);
+}
+
+// FIXME: This is terrible, we need this for ::close.
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#include <sys/uio.h>
+#else
+#include <io.h>
+#ifndef S_ISFIFO
+#define S_ISFIFO(x) (0)
+#endif
+#endif
+error_code RealFile::close() {
+  if (::close(FD))
+    return error_code(errno, system_category());
+  FD = -1;
+  return error_code::success();
+}
+
+void RealFile::setName(StringRef Name) {
+  S.setName(Name);
+}
+
+namespace {
+/// \brief The file system according to your operating system.
+class RealFileSystem : public FileSystem {
+public:
+  ErrorOr<Status> status(const Twine &Path) override;
+  error_code openFileForRead(const Twine &Path,
+                             std::unique_ptr<File> &Result) override;
+};
+} // end anonymous namespace
+
+ErrorOr<Status> RealFileSystem::status(const Twine &Path) {
+  sys::fs::file_status RealStatus;
+  if (error_code EC = sys::fs::status(Path, RealStatus))
+    return EC;
+  Status Result(RealStatus);
+  Result.setName(Path.str());
+  return Result;
+}
+
+error_code RealFileSystem::openFileForRead(const Twine &Name,
+                                           std::unique_ptr<File> &Result) {
+  int FD;
+  if (error_code EC = sys::fs::openFileForRead(Name, FD))
+    return EC;
+  Result.reset(new RealFile(FD));
+  Result->setName(Name.str());
+  return error_code::success();
+}
+
+IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
+  static IntrusiveRefCntPtr<FileSystem> FS = new RealFileSystem();
+  return FS;
+}
+
+//===-----------------------------------------------------------------------===/
+// OverlayFileSystem implementation
+//===-----------------------------------------------------------------------===/
+OverlayFileSystem::OverlayFileSystem(IntrusiveRefCntPtr<FileSystem> BaseFS) {
+  pushOverlay(BaseFS);
+}
+
+void OverlayFileSystem::pushOverlay(IntrusiveRefCntPtr<FileSystem> FS) {
+  FSList.push_back(FS);
+}
+
+ErrorOr<Status> OverlayFileSystem::status(const Twine &Path) {
+  // FIXME: handle symlinks that cross file systems
+  for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
+    ErrorOr<Status> Status = (*I)->status(Path);
+    if (Status || Status.getError() != errc::no_such_file_or_directory)
+      return Status;
+  }
+  return error_code(errc::no_such_file_or_directory, system_category());
+}
+
+error_code OverlayFileSystem::openFileForRead(const llvm::Twine &Path,
+                                              std::unique_ptr<File> &Result) {
+  // FIXME: handle symlinks that cross file systems
+  for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
+    error_code EC = (*I)->openFileForRead(Path, Result);
+    if (!EC || EC != errc::no_such_file_or_directory)
+      return EC;
+  }
+  return error_code(errc::no_such_file_or_directory, system_category());
+}
+
+//===-----------------------------------------------------------------------===/
+// VFSFromYAML implementation
+//===-----------------------------------------------------------------------===/
+
+// Allow DenseMap<StringRef, ...>.  This is useful below because we know all the
+// strings are literals and will outlive the map, and there is no reason to
+// store them.
+namespace llvm {
+  template<>
+  struct DenseMapInfo<StringRef> {
+    // This assumes that "" will never be a valid key.
+    static inline StringRef getEmptyKey() { return StringRef(""); }
+    static inline StringRef getTombstoneKey() { return StringRef(); }
+    static unsigned getHashValue(StringRef Val) { return HashString(Val); }
+    static bool isEqual(StringRef LHS, StringRef RHS) { return LHS == RHS; }
+  };
+}
+
+namespace {
+
+enum EntryKind {
+  EK_Directory,
+  EK_File
+};
+
+/// \brief A single file or directory in the VFS.
+class Entry {
+  EntryKind Kind;
+  std::string Name;
+
+public:
+  virtual ~Entry();
+  Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
+  StringRef getName() const { return Name; }
+  EntryKind getKind() const { return Kind; }
+};
+
+class DirectoryEntry : public Entry {
+  std::vector<Entry *> Contents;
+  Status S;
+
+public:
+  virtual ~DirectoryEntry();
+  DirectoryEntry(StringRef Name, std::vector<Entry *> Contents, Status S)
+      : Entry(EK_Directory, Name), Contents(std::move(Contents)),
+        S(std::move(S)) {}
+  Status getStatus() { return S; }
+  typedef std::vector<Entry *>::iterator iterator;
+  iterator contents_begin() { return Contents.begin(); }
+  iterator contents_end() { return Contents.end(); }
+  static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
+};
+
+class FileEntry : public Entry {
+public:
+  enum NameKind {
+    NK_NotSet,
+    NK_External,
+    NK_Virtual
+  };
+private:
+  std::string ExternalContentsPath;
+  NameKind UseName;
+public:
+  FileEntry(StringRef Name, StringRef ExternalContentsPath, NameKind UseName)
+      : Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
+        UseName(UseName) {}
+  StringRef getExternalContentsPath() const { return ExternalContentsPath; }
+  /// \brief whether to use the external path as the name for this file.
+  bool useExternalName(bool GlobalUseExternalName) const {
+    return UseName == NK_NotSet ? GlobalUseExternalName
+                                : (UseName == NK_External);
+  }
+  static bool classof(const Entry *E) { return E->getKind() == EK_File; }
+};
+
+/// \brief A virtual file system parsed from a YAML file.
+///
+/// Currently, this class allows creating virtual directories and mapping
+/// virtual file paths to existing external files, available in \c ExternalFS.
+///
+/// The basic structure of the parsed file is:
+/// \verbatim
+/// {
+///   'version': <version number>,
+///   <optional configuration>
+///   'roots': [
+///              <directory entries>
+///            ]
+/// }
+/// \endverbatim
+///
+/// All configuration options are optional.
+///   'case-sensitive': <boolean, default=true>
+///   'use-external-names': <boolean, default=true>
+///
+/// Virtual directories are represented as
+/// \verbatim
+/// {
+///   'type': 'directory',
+///   'name': <string>,
+///   'contents': [ <file or directory entries> ]
+/// }
+/// \endverbatim
+///
+/// The default attributes for virtual directories are:
+/// \verbatim
+/// MTime = now() when created
+/// Perms = 0777
+/// User = Group = 0
+/// Size = 0
+/// UniqueID = unspecified unique value
+/// \endverbatim
+///
+/// Re-mapped files are represented as
+/// \verbatim
+/// {
+///   'type': 'file',
+///   'name': <string>,
+///   'use-external-name': <boolean> # Optional
+///   'external-contents': <path to external file>)
+/// }
+/// \endverbatim
+///
+/// and inherit their attributes from the external contents.
+///
+/// In both cases, the 'name' field may contain multiple path components (e.g.
+/// /path/to/file). However, any directory that contains more than one child
+/// must be uniquely represented by a directory entry.
+class VFSFromYAML : public vfs::FileSystem {
+  std::vector<Entry *> Roots; ///< The root(s) of the virtual file system.
+  /// \brief The file system to use for external references.
+  IntrusiveRefCntPtr<FileSystem> ExternalFS;
+
+  /// @name Configuration
+  /// @{
+
+  /// \brief Whether to perform case-sensitive comparisons.
+  ///
+  /// Currently, case-insensitive matching only works correctly with ASCII.
+  bool CaseSensitive;
+
+  /// \brief Whether to use to use the value of 'external-contents' for the
+  /// names of files.  This global value is overridable on a per-file basis.
+  bool UseExternalNames;
+  /// @}
+
+  friend class VFSFromYAMLParser;
+
+private:
+  VFSFromYAML(IntrusiveRefCntPtr<FileSystem> ExternalFS)
+      : ExternalFS(ExternalFS), CaseSensitive(true), UseExternalNames(true) {}
+
+  /// \brief Looks up \p Path in \c Roots.
+  ErrorOr<Entry *> lookupPath(const Twine &Path);
+
+  /// \brief Looks up the path <tt>[Start, End)</tt> in \p From, possibly
+  /// recursing into the contents of \p From if it is a directory.
+  ErrorOr<Entry *> lookupPath(sys::path::const_iterator Start,
+                              sys::path::const_iterator End, Entry *From);
+
+public:
+  ~VFSFromYAML();
+
+  /// \brief Parses \p Buffer, which is expected to be in YAML format and
+  /// returns a virtual file system representing its contents.
+  ///
+  /// Takes ownership of \p Buffer.
+  static VFSFromYAML *create(MemoryBuffer *Buffer,
+                             SourceMgr::DiagHandlerTy DiagHandler,
+                             void *DiagContext,
+                             IntrusiveRefCntPtr<FileSystem> ExternalFS);
+
+  ErrorOr<Status> status(const Twine &Path) override;
+  error_code openFileForRead(const Twine &Path,
+                             std::unique_ptr<File> &Result) override;
+};
+
+/// \brief A helper class to hold the common YAML parsing state.
+class VFSFromYAMLParser {
+  yaml::Stream &Stream;
+
+  void error(yaml::Node *N, const Twine &Msg) {
+    Stream.printError(N, Msg);
+  }
+
+  // false on error
+  bool parseScalarString(yaml::Node *N, StringRef &Result,
+                         SmallVectorImpl<char> &Storage) {
+    yaml::ScalarNode *S = dyn_cast<yaml::ScalarNode>(N);
+    if (!S) {
+      error(N, "expected string");
+      return false;
+    }
+    Result = S->getValue(Storage);
+    return true;
+  }
+
+  // false on error
+  bool parseScalarBool(yaml::Node *N, bool &Result) {
+    SmallString<5> Storage;
+    StringRef Value;
+    if (!parseScalarString(N, Value, Storage))
+      return false;
+
+    if (Value.equals_lower("true") || Value.equals_lower("on") ||
+        Value.equals_lower("yes") || Value == "1") {
+      Result = true;
+      return true;
+    } else if (Value.equals_lower("false") || Value.equals_lower("off") ||
+               Value.equals_lower("no") || Value == "0") {
+      Result = false;
+      return true;
+    }
+
+    error(N, "expected boolean value");
+    return false;
+  }
+
+  struct KeyStatus {
+    KeyStatus(bool Required=false) : Required(Required), Seen(false) {}
+    bool Required;
+    bool Seen;
+  };
+  typedef std::pair<StringRef, KeyStatus> KeyStatusPair;
+
+  // false on error
+  bool checkDuplicateOrUnknownKey(yaml::Node *KeyNode, StringRef Key,
+                                  DenseMap<StringRef, KeyStatus> &Keys) {
+    if (!Keys.count(Key)) {
+      error(KeyNode, "unknown key");
+      return false;
+    }
+    KeyStatus &S = Keys[Key];
+    if (S.Seen) {
+      error(KeyNode, Twine("duplicate key '") + Key + "'");
+      return false;
+    }
+    S.Seen = true;
+    return true;
+  }
+
+  // false on error
+  bool checkMissingKeys(yaml::Node *Obj, DenseMap<StringRef, KeyStatus> &Keys) {
+    for (DenseMap<StringRef, KeyStatus>::iterator I = Keys.begin(),
+         E = Keys.end();
+         I != E; ++I) {
+      if (I->second.Required && !I->second.Seen) {
+        error(Obj, Twine("missing key '") + I->first + "'");
+        return false;
+      }
+    }
+    return true;
+  }
+
+  Entry *parseEntry(yaml::Node *N) {
+    yaml::MappingNode *M = dyn_cast<yaml::MappingNode>(N);
+    if (!M) {
+      error(N, "expected mapping node for file or directory entry");
+      return NULL;
+    }
+
+    KeyStatusPair Fields[] = {
+      KeyStatusPair("name", true),
+      KeyStatusPair("type", true),
+      KeyStatusPair("contents", false),
+      KeyStatusPair("external-contents", false),
+      KeyStatusPair("use-external-name", false),
+    };
+
+    DenseMap<StringRef, KeyStatus> Keys(
+        &Fields[0], Fields + sizeof(Fields)/sizeof(Fields[0]));
+
+    bool HasContents = false; // external or otherwise
+    std::vector<Entry *> EntryArrayContents;
+    std::string ExternalContentsPath;
+    std::string Name;
+    FileEntry::NameKind UseExternalName = FileEntry::NK_NotSet;
+    EntryKind Kind;
+
+    for (yaml::MappingNode::iterator I = M->begin(), E = M->end(); I != E;
+         ++I) {
+      StringRef Key;
+      // Reuse the buffer for key and value, since we don't look at key after
+      // parsing value.
+      SmallString<256> Buffer;
+      if (!parseScalarString(I->getKey(), Key, Buffer))
+        return NULL;
+
+      if (!checkDuplicateOrUnknownKey(I->getKey(), Key, Keys))
+        return NULL;
+
+      StringRef Value;
+      if (Key == "name") {
+        if (!parseScalarString(I->getValue(), Value, Buffer))
+          return NULL;
+        Name = Value;
+      } else if (Key == "type") {
+        if (!parseScalarString(I->getValue(), Value, Buffer))
+          return NULL;
+        if (Value == "file")
+          Kind = EK_File;
+        else if (Value == "directory")
+          Kind = EK_Directory;
+        else {
+          error(I->getValue(), "unknown value for 'type'");
+          return NULL;
+        }
+      } else if (Key == "contents") {
+        if (HasContents) {
+          error(I->getKey(),
+                "entry already has 'contents' or 'external-contents'");
+          return NULL;
+        }
+        HasContents = true;
+        yaml::SequenceNode *Contents =
+            dyn_cast<yaml::SequenceNode>(I->getValue());
+        if (!Contents) {
+          // FIXME: this is only for directories, what about files?
+          error(I->getValue(), "expected array");
+          return NULL;
+        }
+
+        for (yaml::SequenceNode::iterator I = Contents->begin(),
+                                          E = Contents->end();
+             I != E; ++I) {
+          if (Entry *E = parseEntry(&*I))
+            EntryArrayContents.push_back(E);
+          else
+            return NULL;
+        }
+      } else if (Key == "external-contents") {
+        if (HasContents) {
+          error(I->getKey(),
+                "entry already has 'contents' or 'external-contents'");
+          return NULL;
+        }
+        HasContents = true;
+        if (!parseScalarString(I->getValue(), Value, Buffer))
+          return NULL;
+        ExternalContentsPath = Value;
+      } else if (Key == "use-external-name") {
+        bool Val;
+        if (!parseScalarBool(I->getValue(), Val))
+          return NULL;
+        UseExternalName = Val ? FileEntry::NK_External : FileEntry::NK_Virtual;
+      } else {
+        llvm_unreachable("key missing from Keys");
+      }
+    }
+
+    if (Stream.failed())
+      return NULL;
+
+    // check for missing keys
+    if (!HasContents) {
+      error(N, "missing key 'contents' or 'external-contents'");
+      return NULL;
+    }
+    if (!checkMissingKeys(N, Keys))
+      return NULL;
+
+    // check invalid configuration
+    if (Kind == EK_Directory && UseExternalName != FileEntry::NK_NotSet) {
+      error(N, "'use-external-name' is not supported for directories");
+      return NULL;
+    }
+
+    // Remove trailing slash(es), being careful not to remove the root path
+    StringRef Trimmed(Name);
+    size_t RootPathLen = sys::path::root_path(Trimmed).size();
+    while (Trimmed.size() > RootPathLen &&
+           sys::path::is_separator(Trimmed.back()))
+      Trimmed = Trimmed.slice(0, Trimmed.size()-1);
+    // Get the last component
+    StringRef LastComponent = sys::path::filename(Trimmed);
+
+    Entry *Result = 0;
+    switch (Kind) {
+    case EK_File:
+      Result = new FileEntry(LastComponent, std::move(ExternalContentsPath),
+                             UseExternalName);
+      break;
+    case EK_Directory:
+      Result = new DirectoryEntry(LastComponent, std::move(EntryArrayContents),
+          Status("", "", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0,
+                 0, file_type::directory_file, sys::fs::all_all));
+      break;
+    }
+
+    StringRef Parent = sys::path::parent_path(Trimmed);
+    if (Parent.empty())
+      return Result;
+
+    // if 'name' contains multiple components, create implicit directory entries
+    for (sys::path::reverse_iterator I = sys::path::rbegin(Parent),
+                                     E = sys::path::rend(Parent);
+         I != E; ++I) {
+      Result = new DirectoryEntry(*I, llvm::makeArrayRef(Result),
+          Status("", "", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0,
+                 0, file_type::directory_file, sys::fs::all_all));
+    }
+    return Result;
+  }
+
+public:
+  VFSFromYAMLParser(yaml::Stream &S) : Stream(S) {}
+
+  // false on error
+  bool parse(yaml::Node *Root, VFSFromYAML *FS) {
+    yaml::MappingNode *Top = dyn_cast<yaml::MappingNode>(Root);
+    if (!Top) {
+      error(Root, "expected mapping node");
+      return false;
+    }
+
+    KeyStatusPair Fields[] = {
+      KeyStatusPair("version", true),
+      KeyStatusPair("case-sensitive", false),
+      KeyStatusPair("use-external-names", false),
+      KeyStatusPair("roots", true),
+    };
+
+    DenseMap<StringRef, KeyStatus> Keys(
+        &Fields[0], Fields + sizeof(Fields)/sizeof(Fields[0]));
+
+    // Parse configuration and 'roots'
+    for (yaml::MappingNode::iterator I = Top->begin(), E = Top->end(); I != E;
+         ++I) {
+      SmallString<10> KeyBuffer;
+      StringRef Key;
+      if (!parseScalarString(I->getKey(), Key, KeyBuffer))
+        return false;
+
+      if (!checkDuplicateOrUnknownKey(I->getKey(), Key, Keys))
+        return false;
+
+      if (Key == "roots") {
+        yaml::SequenceNode *Roots = dyn_cast<yaml::SequenceNode>(I->getValue());
+        if (!Roots) {
+          error(I->getValue(), "expected array");
+          return false;
+        }
+
+        for (yaml::SequenceNode::iterator I = Roots->begin(), E = Roots->end();
+             I != E; ++I) {
+          if (Entry *E = parseEntry(&*I))
+            FS->Roots.push_back(E);
+          else
+            return false;
+        }
+      } else if (Key == "version") {
+        StringRef VersionString;
+        SmallString<4> Storage;
+        if (!parseScalarString(I->getValue(), VersionString, Storage))
+          return false;
+        int Version;
+        if (VersionString.getAsInteger<int>(10, Version)) {
+          error(I->getValue(), "expected integer");
+          return false;
+        }
+        if (Version < 0) {
+          error(I->getValue(), "invalid version number");
+          return false;
+        }
+        if (Version != 0) {
+          error(I->getValue(), "version mismatch, expected 0");
+          return false;
+        }
+      } else if (Key == "case-sensitive") {
+        if (!parseScalarBool(I->getValue(), FS->CaseSensitive))
+          return false;
+      } else if (Key == "use-external-names") {
+        if (!parseScalarBool(I->getValue(), FS->UseExternalNames))
+          return false;
+      } else {
+        llvm_unreachable("key missing from Keys");
+      }
+    }
+
+    if (Stream.failed())
+      return false;
+
+    if (!checkMissingKeys(Top, Keys))
+      return false;
+    return true;
+  }
+};
+} // end of anonymous namespace
+
+Entry::~Entry() {}
+DirectoryEntry::~DirectoryEntry() { llvm::DeleteContainerPointers(Contents); }
+
+VFSFromYAML::~VFSFromYAML() { llvm::DeleteContainerPointers(Roots); }
+
+VFSFromYAML *VFSFromYAML::create(MemoryBuffer *Buffer,
+                                 SourceMgr::DiagHandlerTy DiagHandler,
+                                 void *DiagContext,
+                                 IntrusiveRefCntPtr<FileSystem> ExternalFS) {
+
+  SourceMgr SM;
+  yaml::Stream Stream(Buffer, SM);
+
+  SM.setDiagHandler(DiagHandler, DiagContext);
+  yaml::document_iterator DI = Stream.begin();
+  yaml::Node *Root = DI->getRoot();
+  if (DI == Stream.end() || !Root) {
+    SM.PrintMessage(SMLoc(), SourceMgr::DK_Error, "expected root node");
+    return NULL;
+  }
+
+  VFSFromYAMLParser P(Stream);
+
+  std::unique_ptr<VFSFromYAML> FS(new VFSFromYAML(ExternalFS));
+  if (!P.parse(Root, FS.get()))
+    return NULL;
+
+  return FS.release();
+}
+
+ErrorOr<Entry *> VFSFromYAML::lookupPath(const Twine &Path_) {
+  SmallString<256> Path;
+  Path_.toVector(Path);
+
+  // Handle relative paths
+  if (error_code EC = sys::fs::make_absolute(Path))
+    return EC;
+
+  if (Path.empty())
+    return error_code(errc::invalid_argument, system_category());
+
+  sys::path::const_iterator Start = sys::path::begin(Path);
+  sys::path::const_iterator End = sys::path::end(Path);
+  for (std::vector<Entry *>::iterator I = Roots.begin(), E = Roots.end();
+       I != E; ++I) {
+    ErrorOr<Entry *> Result = lookupPath(Start, End, *I);
+    if (Result || Result.getError() != errc::no_such_file_or_directory)
+      return Result;
+  }
+  return error_code(errc::no_such_file_or_directory, system_category());
+}
+
+ErrorOr<Entry *> VFSFromYAML::lookupPath(sys::path::const_iterator Start,
+                                         sys::path::const_iterator End,
+                                         Entry *From) {
+  if (Start->equals("."))
+    ++Start;
+
+  // FIXME: handle ..
+  if (CaseSensitive ? !Start->equals(From->getName())
+                    : !Start->equals_lower(From->getName()))
+    // failure to match
+    return error_code(errc::no_such_file_or_directory, system_category());
+
+  ++Start;
+
+  if (Start == End) {
+    // Match!
+    return From;
+  }
+
+  DirectoryEntry *DE = dyn_cast<DirectoryEntry>(From);
+  if (!DE)
+    return error_code(errc::not_a_directory, system_category());
+
+  for (DirectoryEntry::iterator I = DE->contents_begin(),
+                                E = DE->contents_end();
+       I != E; ++I) {
+    ErrorOr<Entry *> Result = lookupPath(Start, End, *I);
+    if (Result || Result.getError() != errc::no_such_file_or_directory)
+      return Result;
+  }
+  return error_code(errc::no_such_file_or_directory, system_category());
+}
+
+ErrorOr<Status> VFSFromYAML::status(const Twine &Path) {
+  ErrorOr<Entry *> Result = lookupPath(Path);
+  if (!Result)
+    return Result.getError();
+
+  std::string PathStr(Path.str());
+  if (FileEntry *F = dyn_cast<FileEntry>(*Result)) {
+    ErrorOr<Status> S = ExternalFS->status(F->getExternalContentsPath());
+    assert(!S || S->getName() == F->getExternalContentsPath());
+    if (S && !F->useExternalName(UseExternalNames))
+      S->setName(PathStr);
+    return S;
+  } else { // directory
+    DirectoryEntry *DE = cast<DirectoryEntry>(*Result);
+    Status S = DE->getStatus();
+    S.setName(PathStr);
+    return S;
+  }
+}
+
+error_code VFSFromYAML::openFileForRead(const Twine &Path,
+                                        std::unique_ptr<vfs::File> &Result) {
+  ErrorOr<Entry *> E = lookupPath(Path);
+  if (!E)
+    return E.getError();
+
+  FileEntry *F = dyn_cast<FileEntry>(*E);
+  if (!F) // FIXME: errc::not_a_file?
+    return error_code(errc::invalid_argument, system_category());
+
+  if (error_code EC = ExternalFS->openFileForRead(F->getExternalContentsPath(),
+                                                  Result))
+    return EC;
+
+  if (!F->useExternalName(UseExternalNames))
+    Result->setName(Path.str());
+
+  return error_code::success();
+}
+
+IntrusiveRefCntPtr<FileSystem>
+vfs::getVFSFromYAML(MemoryBuffer *Buffer, SourceMgr::DiagHandlerTy DiagHandler,
+                    void *DiagContext,
+                    IntrusiveRefCntPtr<FileSystem> ExternalFS) {
+  return VFSFromYAML::create(Buffer, DiagHandler, DiagContext, ExternalFS);
+}
+
+UniqueID vfs::getNextVirtualUniqueID() {
+  static std::atomic<unsigned> UID;
+  unsigned ID = ++UID;
+  // The following assumes that uint64_t max will never collide with a real
+  // dev_t value from the OS.
+  return UniqueID(std::numeric_limits<uint64_t>::max(), ID);
+}
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 468fe04..d3ec46c 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -11,8 +11,8 @@
 #define CLANG_CODEGEN_ABIINFO_H
 
 #include "clang/AST/Type.h"
-#include "llvm/IR/Type.h"
 #include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Type.h"
 
 namespace llvm {
   class Value;
diff --git a/lib/CodeGen/Android.mk b/lib/CodeGen/Android.mk
index 9108e24..1c4a0b7 100644
--- a/lib/CodeGen/Android.mk
+++ b/lib/CodeGen/Android.mk
@@ -3,6 +3,7 @@
 clang_codegen_TBLGEN_TABLES := \
   AttrList.inc \
   Attrs.inc \
+  AttrVisitor.inc \
   CommentCommandList.inc \
   CommentNodes.inc \
   DeclNodes.inc \
@@ -48,11 +49,11 @@
   CodeGenAction.cpp \
   CodeGenFunction.cpp \
   CodeGenModule.cpp \
+  CodeGenPGO.cpp \
   CodeGenTBAA.cpp \
   CodeGenTypes.cpp \
   ItaniumCXXABI.cpp \
   MicrosoftCXXABI.cpp \
-  MicrosoftVBTables.cpp \
   ModuleBuilder.cpp \
   TargetInfo.cpp
 
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 90b0f68..17f5f4d 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -13,13 +13,14 @@
 #include "clang/Basic/TargetOptions.h"
 #include "clang/Frontend/CodeGenOptions.h"
 #include "clang/Frontend/FrontendDiagnostic.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/Bitcode/ReaderWriter.h"
+#include "clang/Frontend/Utils.h"
+#include "llvm/Bitcode/BitcodeWriterPass.h"
 #include "llvm/CodeGen/RegAllocRegistry.h"
 #include "llvm/CodeGen/SchedulerRegistry.h"
 #include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IRPrintingPasses.h"
 #include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
 #include "llvm/MC/SubtargetFeature.h"
 #include "llvm/PassManager.h"
 #include "llvm/Support/CommandLine.h"
@@ -36,6 +37,7 @@
 #include "llvm/Transforms/Instrumentation.h"
 #include "llvm/Transforms/ObjCARC.h"
 #include "llvm/Transforms/Scalar.h"
+#include <memory>
 using namespace clang;
 using namespace llvm;
 
@@ -55,38 +57,37 @@
   mutable FunctionPassManager *PerFunctionPasses;
 
 private:
-  PassManager *getCodeGenPasses(TargetMachine *TM) const {
+  PassManager *getCodeGenPasses() const {
     if (!CodeGenPasses) {
       CodeGenPasses = new PassManager();
-      CodeGenPasses->add(new DataLayout(TheModule));
+      CodeGenPasses->add(new DataLayoutPass(TheModule));
       if (TM)
         TM->addAnalysisPasses(*CodeGenPasses);
     }
     return CodeGenPasses;
   }
 
-  PassManager *getPerModulePasses(TargetMachine *TM) const {
+  PassManager *getPerModulePasses() const {
     if (!PerModulePasses) {
       PerModulePasses = new PassManager();
-      PerModulePasses->add(new DataLayout(TheModule));
+      PerModulePasses->add(new DataLayoutPass(TheModule));
       if (TM)
         TM->addAnalysisPasses(*PerModulePasses);
     }
     return PerModulePasses;
   }
 
-  FunctionPassManager *getPerFunctionPasses(TargetMachine *TM) const {
+  FunctionPassManager *getPerFunctionPasses() const {
     if (!PerFunctionPasses) {
       PerFunctionPasses = new FunctionPassManager(TheModule);
-      PerFunctionPasses->add(new DataLayout(TheModule));
+      PerFunctionPasses->add(new DataLayoutPass(TheModule));
       if (TM)
         TM->addAnalysisPasses(*PerFunctionPasses);
     }
     return PerFunctionPasses;
   }
 
-
-  void CreatePasses(TargetMachine *TM);
+  void CreatePasses();
 
   /// CreateTargetMachine - Generates the TargetMachine.
   /// Returns Null if it is unable to create the target machine.
@@ -101,8 +102,7 @@
   /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
   ///
   /// \return True on success.
-  bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS,
-                     TargetMachine *TM);
+  bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
 
 public:
   EmitAssemblyHelper(DiagnosticsEngine &_Diags,
@@ -118,8 +118,12 @@
     delete CodeGenPasses;
     delete PerModulePasses;
     delete PerFunctionPasses;
+    if (CodeGenOpts.DisableFree)
+      BuryPointer(TM.release());
   }
 
+  std::unique_ptr<TargetMachine> TM;
+
   void EmitAssembly(BackendAction Action, raw_ostream *OS);
 };
 
@@ -162,6 +166,11 @@
   PM.add(createSampleProfileLoaderPass(CGOpts.SampleProfileFile));
 }
 
+static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder,
+                                     PassManagerBase &PM) {
+  PM.add(createAddDiscriminatorsPass());
+}
+
 static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
                                     PassManagerBase &PM) {
   PM.add(createBoundsCheckingPass());
@@ -177,12 +186,10 @@
       LangOpts.Sanitize.InitOrder,
       LangOpts.Sanitize.UseAfterReturn,
       LangOpts.Sanitize.UseAfterScope,
-      CGOpts.SanitizerBlacklistFile,
-      CGOpts.SanitizeAddressZeroBaseShadow));
+      CGOpts.SanitizerBlacklistFile));
   PM.add(createAddressSanitizerModulePass(
       LangOpts.Sanitize.InitOrder,
-      CGOpts.SanitizerBlacklistFile,
-      CGOpts.SanitizeAddressZeroBaseShadow));
+      CGOpts.SanitizerBlacklistFile));
 }
 
 static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
@@ -222,7 +229,7 @@
   PM.add(createDataFlowSanitizerPass(CGOpts.SanitizerBlacklistFile));
 }
 
-void EmitAssemblyHelper::CreatePasses(TargetMachine *TM) {
+void EmitAssemblyHelper::CreatePasses() {
   unsigned OptLevel = CodeGenOpts.OptimizationLevel;
   CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining();
 
@@ -244,6 +251,9 @@
   PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
   PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
 
+  PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+                         addAddDiscriminatorsPass);
+
   if (!CodeGenOpts.SampleProfileFile.empty())
     PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
                            addSampleProfileLoaderPass);
@@ -298,19 +308,12 @@
   PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
   if (!CodeGenOpts.SimplifyLibCalls)
     PMBuilder.LibraryInfo->disableAllFunctions();
-  
+
   switch (Inlining) {
   case CodeGenOptions::NoInlining: break;
   case CodeGenOptions::NormalInlining: {
-    // FIXME: Derive these constants in a principled fashion.
-    unsigned Threshold = 225;
-    if (CodeGenOpts.OptimizeSize == 1)      // -Os
-      Threshold = 75;
-    else if (CodeGenOpts.OptimizeSize == 2) // -Oz
-      Threshold = 25;
-    else if (OptLevel > 2)
-      Threshold = 275;
-    PMBuilder.Inliner = createFunctionInliningPass(Threshold);
+    PMBuilder.Inliner =
+        createFunctionInliningPass(OptLevel, CodeGenOpts.OptimizeSize);
     break;
   }
   case CodeGenOptions::OnlyAlwaysInlining:
@@ -324,13 +327,13 @@
   }
 
   // Set up the per-function pass manager.
-  FunctionPassManager *FPM = getPerFunctionPasses(TM);
+  FunctionPassManager *FPM = getPerFunctionPasses();
   if (CodeGenOpts.VerifyModule)
     FPM->add(createVerifierPass());
   PMBuilder.populateFunctionPassManager(*FPM);
 
   // Set up the per-module pass manager.
-  PassManager *MPM = getPerModulePasses(TM);
+  PassManager *MPM = getPerModulePasses();
 
   if (!CodeGenOpts.DisableGCov &&
       (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)) {
@@ -437,6 +440,12 @@
 
   llvm::TargetOptions Options;
 
+  if (CodeGenOpts.DisableIntegratedAS)
+    Options.DisableIntegratedAS = true;
+
+  if (CodeGenOpts.CompressDebugSections)
+    Options.CompressDebugSections = true;
+
   // Set frame pointer elimination mode.
   if (!CodeGenOpts.DisableFPElim) {
     Options.NoFramePointerElim = false;
@@ -503,11 +512,10 @@
 }
 
 bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
-                                       formatted_raw_ostream &OS,
-                                       TargetMachine *TM) {
+                                       formatted_raw_ostream &OS) {
 
   // Create the code generator passes.
-  PassManager *PM = getCodeGenPasses(TM);
+  PassManager *PM = getCodeGenPasses();
 
   // Add LibraryInfo.
   llvm::Triple TargetTriple(TheModule->getTargetTriple());
@@ -552,27 +560,28 @@
   bool UsesCodeGen = (Action != Backend_EmitNothing &&
                       Action != Backend_EmitBC &&
                       Action != Backend_EmitLL);
-  TargetMachine *TM = CreateTargetMachine(UsesCodeGen);
+  if (!TM)
+    TM.reset(CreateTargetMachine(UsesCodeGen));
+
   if (UsesCodeGen && !TM) return;
-  llvm::OwningPtr<TargetMachine> TMOwner(CodeGenOpts.DisableFree ? 0 : TM);
-  CreatePasses(TM);
+  CreatePasses();
 
   switch (Action) {
   case Backend_EmitNothing:
     break;
 
   case Backend_EmitBC:
-    getPerModulePasses(TM)->add(createBitcodeWriterPass(*OS));
+    getPerModulePasses()->add(createBitcodeWriterPass(*OS));
     break;
 
   case Backend_EmitLL:
     FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
-    getPerModulePasses(TM)->add(createPrintModulePass(&FormattedOS));
+    getPerModulePasses()->add(createPrintModulePass(FormattedOS));
     break;
 
   default:
     FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
-    if (!AddEmitPasses(Action, FormattedOS, TM))
+    if (!AddEmitPasses(Action, FormattedOS))
       return;
   }
 
@@ -607,10 +616,23 @@
 void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
                               const CodeGenOptions &CGOpts,
                               const clang::TargetOptions &TOpts,
-                              const LangOptions &LOpts,
-                              Module *M,
-                              BackendAction Action, raw_ostream *OS) {
+                              const LangOptions &LOpts, StringRef TDesc,
+                              Module *M, BackendAction Action,
+                              raw_ostream *OS) {
   EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M);
 
   AsmHelper.EmitAssembly(Action, OS);
+
+  // If an optional clang TargetInfo description string was passed in, use it to
+  // verify the LLVM TargetMachine's DataLayout.
+  if (AsmHelper.TM && !TDesc.empty()) {
+    std::string DLDesc =
+        AsmHelper.TM->getDataLayout()->getStringRepresentation();
+    if (DLDesc != TDesc) {
+      unsigned DiagID = Diags.getCustomDiagID(
+          DiagnosticsEngine::Error, "backend data layout '%0' does not match "
+                                    "expected target description '%1'");
+      Diags.Report(DiagID) << DLDesc << TDesc;
+    }
+  }
 }
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 0df2a40..7c7c80c 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -24,16 +24,6 @@
 using namespace clang;
 using namespace CodeGen;
 
-// The ABI values for various atomic memory orderings.
-enum AtomicOrderingKind {
-  AO_ABI_memory_order_relaxed = 0,
-  AO_ABI_memory_order_consume = 1,
-  AO_ABI_memory_order_acquire = 2,
-  AO_ABI_memory_order_release = 3,
-  AO_ABI_memory_order_acq_rel = 4,
-  AO_ABI_memory_order_seq_cst = 5
-};
-
 namespace {
   class AtomicInfo {
     CodeGenFunction &CGF;
@@ -57,10 +47,10 @@
       ASTContext &C = CGF.getContext();
 
       uint64_t valueAlignInBits;
-      llvm::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
+      std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
 
       uint64_t atomicAlignInBits;
-      llvm::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
+      std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
 
       assert(ValueSizeInBits <= AtomicSizeInBits);
       assert(valueAlignInBits <= atomicAlignInBits);
@@ -184,10 +174,134 @@
   return true;
 }
 
-static void
-EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
-             llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
-             uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
+static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E,
+                              llvm::Value *Dest, llvm::Value *Ptr,
+                              llvm::Value *Val1, llvm::Value *Val2,
+                              uint64_t Size, unsigned Align,
+                              llvm::AtomicOrdering SuccessOrder,
+                              llvm::AtomicOrdering FailureOrder) {
+  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
+  llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
+  Expected->setAlignment(Align);
+  llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
+  Desired->setAlignment(Align);
+
+  llvm::AtomicCmpXchgInst *Old = CGF.Builder.CreateAtomicCmpXchg(
+      Ptr, Expected, Desired, SuccessOrder, FailureOrder);
+  Old->setVolatile(E->isVolatile());
+
+  // Cmp holds the result of the compare-exchange operation: true on success,
+  // false on failure.
+  llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
+
+  // This basic block is used to hold the store instruction if the operation
+  // failed.
+  llvm::BasicBlock *StoreExpectedBB =
+      CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
+
+  // This basic block is the exit point of the operation, we should end up
+  // here regardless of whether or not the operation succeeded.
+  llvm::BasicBlock *ContinueBB =
+      CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
+
+  // Update Expected if Expected isn't equal to Old, otherwise branch to the
+  // exit point.
+  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
+
+  CGF.Builder.SetInsertPoint(StoreExpectedBB);
+  // Update the memory at Expected with Old's value.
+  llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
+  StoreExpected->setAlignment(Align);
+  // Finally, branch to the exit point.
+  CGF.Builder.CreateBr(ContinueBB);
+
+  CGF.Builder.SetInsertPoint(ContinueBB);
+  // Update the memory at Dest with Cmp's value.
+  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
+  return;
+}
+
+/// Given an ordering required on success, emit all possible cmpxchg
+/// instructions to cope with the provided (but possibly only dynamically known)
+/// FailureOrder.
+static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
+                                        llvm::Value *Dest, llvm::Value *Ptr,
+                                        llvm::Value *Val1, llvm::Value *Val2,
+                                        llvm::Value *FailureOrderVal,
+                                        uint64_t Size, unsigned Align,
+                                        llvm::AtomicOrdering SuccessOrder) {
+  llvm::AtomicOrdering FailureOrder;
+  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
+    switch (FO->getSExtValue()) {
+    default:
+      FailureOrder = llvm::Monotonic;
+      break;
+    case AtomicExpr::AO_ABI_memory_order_consume:
+    case AtomicExpr::AO_ABI_memory_order_acquire:
+      FailureOrder = llvm::Acquire;
+      break;
+    case AtomicExpr::AO_ABI_memory_order_seq_cst:
+      FailureOrder = llvm::SequentiallyConsistent;
+      break;
+    }
+    if (FailureOrder >= SuccessOrder) {
+      // Don't assert on undefined behaviour.
+      FailureOrder =
+        llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
+    }
+    emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder,
+                      FailureOrder);
+    return;
+  }
+
+  // Create all the relevant BB's
+  llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *SeqCstBB = 0;
+  MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
+  if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
+    AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
+  if (SuccessOrder == llvm::SequentiallyConsistent)
+    SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
+
+  llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
+
+  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
+
+  // Emit all the different atomics
+
+  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
+  // doesn't matter unless someone is crazy enough to use something that
+  // doesn't fold to a constant for the ordering.
+  CGF.Builder.SetInsertPoint(MonotonicBB);
+  emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
+                    Size, Align, SuccessOrder, llvm::Monotonic);
+  CGF.Builder.CreateBr(ContBB);
+
+  if (AcquireBB) {
+    CGF.Builder.SetInsertPoint(AcquireBB);
+    emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
+                      Size, Align, SuccessOrder, llvm::Acquire);
+    CGF.Builder.CreateBr(ContBB);
+    SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
+                AcquireBB);
+    SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
+                AcquireBB);
+  }
+  if (SeqCstBB) {
+    CGF.Builder.SetInsertPoint(SeqCstBB);
+    emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
+                      Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
+    CGF.Builder.CreateBr(ContBB);
+    SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
+                SeqCstBB);
+  }
+
+  CGF.Builder.SetInsertPoint(ContBB);
+}
+
+static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
+                         llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
+                         llvm::Value *FailureOrder, uint64_t Size,
+                         unsigned Align, llvm::AtomicOrdering Order) {
   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
 
@@ -198,23 +312,10 @@
   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
   case AtomicExpr::AO__atomic_compare_exchange:
-  case AtomicExpr::AO__atomic_compare_exchange_n: {
-    // Note that cmpxchg only supports specifying one ordering and
-    // doesn't support weak cmpxchg, at least at the moment.
-    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
-    LoadVal1->setAlignment(Align);
-    llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
-    LoadVal2->setAlignment(Align);
-    llvm::AtomicCmpXchgInst *CXI =
-        CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
-    CXI->setVolatile(E->isVolatile());
-    llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
-    StoreVal1->setAlignment(Align);
-    llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
-    CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
+  case AtomicExpr::AO__atomic_compare_exchange_n:
+    emitAtomicCmpXchgFailureSet(CGF, E, Dest, Ptr, Val1, Val2, FailureOrder,
+                                Size, Align, Order);
     return;
-  }
-
   case AtomicExpr::AO__c11_atomic_load:
   case AtomicExpr::AO__atomic_load_n:
   case AtomicExpr::AO__atomic_load: {
@@ -476,6 +577,8 @@
     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
 
     std::string LibCallName;
+    QualType LoweredMemTy =
+      MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
     QualType RetTy;
     bool HaveRetTy = false;
     switch (E->getOp()) {
@@ -531,7 +634,7 @@
     case AtomicExpr::AO__c11_atomic_fetch_add:
     case AtomicExpr::AO__atomic_fetch_add:
       LibCallName = "__atomic_fetch_add";
-      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
                         E->getExprLoc());
       break;
     // T __atomic_fetch_and_N(T *mem, T val, int order)
@@ -552,7 +655,7 @@
     case AtomicExpr::AO__c11_atomic_fetch_sub:
     case AtomicExpr::AO__atomic_fetch_sub:
       LibCallName = "__atomic_fetch_sub";
-      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
                         E->getExprLoc());
       break;
     // T __atomic_fetch_xor_N(T *mem, T val, int order)
@@ -615,32 +718,32 @@
   if (isa<llvm::ConstantInt>(Order)) {
     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
     switch (ord) {
-    case AO_ABI_memory_order_relaxed:
-      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-                   llvm::Monotonic);
+    case AtomicExpr::AO_ABI_memory_order_relaxed:
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+                   Size, Align, llvm::Monotonic);
       break;
-    case AO_ABI_memory_order_consume:
-    case AO_ABI_memory_order_acquire:
+    case AtomicExpr::AO_ABI_memory_order_consume:
+    case AtomicExpr::AO_ABI_memory_order_acquire:
       if (IsStore)
         break; // Avoid crashing on code with undefined behavior
-      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-                   llvm::Acquire);
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+                   Size, Align, llvm::Acquire);
       break;
-    case AO_ABI_memory_order_release:
+    case AtomicExpr::AO_ABI_memory_order_release:
       if (IsLoad)
         break; // Avoid crashing on code with undefined behavior
-      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-                   llvm::Release);
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+                   Size, Align, llvm::Release);
       break;
-    case AO_ABI_memory_order_acq_rel:
+    case AtomicExpr::AO_ABI_memory_order_acq_rel:
       if (IsLoad || IsStore)
         break; // Avoid crashing on code with undefined behavior
-      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-                   llvm::AcquireRelease);
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+                   Size, Align, llvm::AcquireRelease);
       break;
-    case AO_ABI_memory_order_seq_cst:
-      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-                   llvm::SequentiallyConsistent);
+    case AtomicExpr::AO_ABI_memory_order_seq_cst:
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+                   Size, Align, llvm::SequentiallyConsistent);
       break;
     default: // invalid order
       // We should not ever get here normally, but it's hard to
@@ -676,36 +779,41 @@
 
   // Emit all the different atomics
   Builder.SetInsertPoint(MonotonicBB);
-  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-               llvm::Monotonic);
+  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+               Size, Align, llvm::Monotonic);
   Builder.CreateBr(ContBB);
   if (!IsStore) {
     Builder.SetInsertPoint(AcquireBB);
-    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-                 llvm::Acquire);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+                 Size, Align, llvm::Acquire);
     Builder.CreateBr(ContBB);
-    SI->addCase(Builder.getInt32(1), AcquireBB);
-    SI->addCase(Builder.getInt32(2), AcquireBB);
+    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
+                AcquireBB);
+    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
+                AcquireBB);
   }
   if (!IsLoad) {
     Builder.SetInsertPoint(ReleaseBB);
-    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-                 llvm::Release);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+                 Size, Align, llvm::Release);
     Builder.CreateBr(ContBB);
-    SI->addCase(Builder.getInt32(3), ReleaseBB);
+    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
+                ReleaseBB);
   }
   if (!IsLoad && !IsStore) {
     Builder.SetInsertPoint(AcqRelBB);
-    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-                 llvm::AcquireRelease);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+                 Size, Align, llvm::AcquireRelease);
     Builder.CreateBr(ContBB);
-    SI->addCase(Builder.getInt32(4), AcqRelBB);
+    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
+                AcqRelBB);
   }
   Builder.SetInsertPoint(SeqCstBB);
-  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
-               llvm::SequentiallyConsistent);
+  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+               Size, Align, llvm::SequentiallyConsistent);
   Builder.CreateBr(ContBB);
-  SI->addCase(Builder.getInt32(5), SeqCstBB);
+  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
+              SeqCstBB);
 
   // Cleanup and return
   Builder.SetInsertPoint(ContBB);
@@ -761,8 +869,8 @@
              getContext().VoidPtrTy);
     args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
              getContext().VoidPtrTy);
-    args.add(RValue::get(llvm::ConstantInt::get(IntTy,
-                                                AO_ABI_memory_order_seq_cst)),
+    args.add(RValue::get(llvm::ConstantInt::get(
+                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
              getContext().IntTy);
     emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
 
@@ -911,8 +1019,8 @@
              getContext().VoidPtrTy);
     args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
              getContext().VoidPtrTy);
-    args.add(RValue::get(llvm::ConstantInt::get(IntTy,
-                                                AO_ABI_memory_order_seq_cst)),
+    args.add(RValue::get(llvm::ConstantInt::get(
+                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
              getContext().IntTy);
     emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
     return;
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 692f9a0..15b08d4 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -18,9 +18,9 @@
 #include "CodeGenModule.h"
 #include "clang/AST/DeclObjC.h"
 #include "llvm/ADT/SmallSet.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
 #include <algorithm>
 #include <cstdio>
 
@@ -53,7 +53,7 @@
   return CodeGenFunction(CGM).GenerateCopyHelperFunction(blockInfo);
 }
 
-/// Build the helper function to dipose of a block.
+/// Build the helper function to dispose of a block.
 static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
                                           const CGBlockInfo &blockInfo) {
   return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
@@ -300,8 +300,8 @@
 
   // The header is basically a 'struct { void *; int; int; void *; void *; }'.
   CharUnits ptrSize, ptrAlign, intSize, intAlign;
-  llvm::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy);
-  llvm::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy);
+  std::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy);
+  std::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy);
 
   // Are there crazy embedded platforms where this isn't true?
   assert(intSize <= ptrSize && "layout assumptions horribly violated");
@@ -370,11 +370,10 @@
   }
 
   // Next, all the block captures.
-  for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
-         ce = block->capture_end(); ci != ce; ++ci) {
-    const VarDecl *variable = ci->getVariable();
+  for (const auto &CI : block->captures()) {
+    const VarDecl *variable = CI.getVariable();
 
-    if (ci->isByRef()) {
+    if (CI.isByRef()) {
       // We have to copy/dispose of the __block reference.
       info.NeedsCopyDispose = true;
 
@@ -387,8 +386,7 @@
       maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
 
       layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
-                                        Qualifiers::OCL_None,
-                                        &*ci, llvmType));
+                                        Qualifiers::OCL_None, &CI, llvmType));
       continue;
     }
 
@@ -422,7 +420,7 @@
       lifetime = Qualifiers::OCL_Strong;
 
     // So do types that require non-trivial copy construction.
-    } else if (ci->hasCopyExpr()) {
+    } else if (CI.hasCopyExpr()) {
       info.NeedsCopyDispose = true;
       info.HasCXXObject = true;
 
@@ -446,7 +444,7 @@
     llvm::Type *llvmType =
       CGM.getTypes().ConvertTypeForMem(VT);
     
-    layout.push_back(BlockLayoutChunk(align, size, lifetime, &*ci, llvmType));
+    layout.push_back(BlockLayoutChunk(align, size, lifetime, &CI, llvmType));
   }
 
   // If that was everything, we're done here.
@@ -581,14 +579,13 @@
 
   // Walk through the captures (in order) and find the ones not
   // captured by constant.
-  for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
-         ce = block->capture_end(); ci != ce; ++ci) {
+  for (const auto &CI : block->captures()) {
     // Ignore __block captures; there's nothing special in the
     // on-stack block that we need to do for them.
-    if (ci->isByRef()) continue;
+    if (CI.isByRef()) continue;
 
     // Ignore variables that are constant-captured.
-    const VarDecl *variable = ci->getVariable();
+    const VarDecl *variable = CI.getVariable();
     CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
     if (capture.isConstant()) continue;
 
@@ -679,7 +676,7 @@
   }
 
   // Find the block info for this block and take ownership of it.
-  OwningPtr<CGBlockInfo> blockInfo;
+  std::unique_ptr<CGBlockInfo> blockInfo;
   blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
                                          blockExpr->getBlockDecl()));
 
@@ -741,9 +738,8 @@
   }
 
   // Next, captured variables.
-  for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
-         ce = blockDecl->capture_end(); ci != ce; ++ci) {
-    const VarDecl *variable = ci->getVariable();
+  for (const auto &CI : blockDecl->captures()) {
+    const VarDecl *variable = CI.getVariable();
     const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
 
     // Ignore constant captures.
@@ -761,7 +757,7 @@
     // Compute the address of the thing we're going to move into the
     // block literal.
     llvm::Value *src;
-    if (BlockInfo && ci->isNested()) {
+    if (BlockInfo && CI.isNested()) {
       // We need to use the capture from the enclosing block.
       const CGBlockInfo::Capture &enclosingCapture =
         BlockInfo->getCapture(variable);
@@ -780,8 +776,8 @@
       // emission.
       src = LocalDeclMap.lookup(variable);
       if (!src) {
-        DeclRefExpr declRef(const_cast<VarDecl*>(variable),
-                            /*refersToEnclosing*/ ci->isNested(), type,
+        DeclRefExpr declRef(const_cast<VarDecl *>(variable),
+                            /*refersToEnclosing*/ CI.isNested(), type,
                             VK_LValue, SourceLocation());
         src = EmitDeclRefLValue(&declRef).getAddress();
       }
@@ -791,9 +787,9 @@
     // the block field.  There's no need to chase the forwarding
     // pointer at this point, since we're building something that will
     // live a shorter life than the stack byref anyway.
-    if (ci->isByRef()) {
+    if (CI.isByRef()) {
       // Get a void* that points to the byref struct.
-      if (ci->isNested())
+      if (CI.isNested())
         src = Builder.CreateAlignedLoad(src, align.getQuantity(),
                                         "byref.capture");
       else
@@ -803,7 +799,7 @@
       Builder.CreateAlignedStore(src, blockField, align.getQuantity());
 
     // If we have a copy constructor, evaluate that into the block field.
-    } else if (const Expr *copyExpr = ci->getCopyExpr()) {
+    } else if (const Expr *copyExpr = CI.getCopyExpr()) {
       if (blockDecl->isConversionFromLambda()) {
         // If we have a lambda conversion, emit the expression
         // directly into the block instead.
@@ -851,7 +847,7 @@
       // We use one of these or the other depending on whether the
       // reference is nested.
       DeclRefExpr declRef(const_cast<VarDecl*>(variable),
-                          /*refersToEnclosing*/ ci->isNested(), type,
+                          /*refersToEnclosing*/ CI.isNested(), type,
                           VK_LValue, SourceLocation());
 
       ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
@@ -862,7 +858,7 @@
     }
 
     // Activate the cleanup if layout pushed one.
-    if (!ci->isByRef()) {
+    if (!CI.isByRef()) {
       EHScopeStack::stable_iterator cleanup = capture.getCleanup();
       if (cleanup.isValid())
         ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
@@ -1117,17 +1113,15 @@
   args.push_back(&selfDecl);
 
   // Now add the rest of the parameters.
-  for (BlockDecl::param_const_iterator i = blockDecl->param_begin(),
-       e = blockDecl->param_end(); i != e; ++i)
-    args.push_back(*i);
+  for (auto i : blockDecl->params())
+    args.push_back(i);
 
   // Create the function declaration.
   const FunctionProtoType *fnType = blockInfo.getBlockExpr()->getFunctionType();
-  const CGFunctionInfo &fnInfo =
-    CGM.getTypes().arrangeFunctionDeclaration(fnType->getResultType(), args,
-                                              fnType->getExtInfo(),
-                                              fnType->isVariadic());
-  if (CGM.ReturnTypeUsesSRet(fnInfo))
+  const CGFunctionInfo &fnInfo = CGM.getTypes().arrangeFreeFunctionDeclaration(
+      fnType->getReturnType(), args, fnType->getExtInfo(),
+      fnType->isVariadic());
+  if (CGM.ReturnSlotInterferesWithArgs(fnInfo))
     blockInfo.UsesStret = true;
 
   llvm::FunctionType *fnLLVMType = CGM.getTypes().GetFunctionType(fnInfo);
@@ -1140,7 +1134,7 @@
   CGM.SetInternalFunctionAttributes(blockDecl, fn, fnInfo);
 
   // Begin generating the function.
-  StartFunction(blockDecl, fnType->getResultType(), fn, fnInfo, args,
+  StartFunction(blockDecl, fnType->getReturnType(), fn, fnInfo, args,
                 blockInfo.getBlockExpr()->getBody()->getLocStart());
 
   // Okay.  Undo some of what StartFunction did.
@@ -1177,9 +1171,8 @@
   }
 
   // Also force all the constant captures.
-  for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
-         ce = blockDecl->capture_end(); ci != ce; ++ci) {
-    const VarDecl *variable = ci->getVariable();
+  for (const auto &CI : blockDecl->captures()) {
+    const VarDecl *variable = CI.getVariable();
     const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
     if (!capture.isConstant()) continue;
 
@@ -1201,8 +1194,14 @@
 
   if (IsLambdaConversionToBlock)
     EmitLambdaBlockInvokeBody();
-  else
+  else {
+    PGO.assignRegionCounters(blockDecl, fn);
+    RegionCounter Cnt = getPGORegionCounter(blockDecl->getBody());
+    Cnt.beginRegion(Builder);
     EmitStmt(blockDecl->getBody());
+    PGO.emitInstrumentationData();
+    PGO.destroyRegionCounters();
+  }
 
   // Remember where we were...
   llvm::BasicBlock *resume = Builder.GetInsertBlock();
@@ -1214,9 +1213,8 @@
   // Emit debug information for all the DeclRefExprs.
   // FIXME: also for 'this'
   if (CGDebugInfo *DI = getDebugInfo()) {
-    for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
-           ce = blockDecl->capture_end(); ci != ce; ++ci) {
-      const VarDecl *variable = ci->getVariable();
+    for (const auto &CI : blockDecl->captures()) {
+      const VarDecl *variable = CI.getVariable();
       DI->EmitLocation(Builder, variable->getLocation());
 
       if (CGM.getCodeGenOpts().getDebugInfo()
@@ -1285,10 +1283,8 @@
   ImplicitParamDecl srcDecl(0, SourceLocation(), 0, C.VoidPtrTy);
   args.push_back(&srcDecl);
 
-  const CGFunctionInfo &FI =
-    CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
-                                              FunctionType::ExtInfo(),
-                                              /*variadic*/ false);
+  const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+      C.VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
 
   // FIXME: it would be nice if these were mergeable with things with
   // identical semantics.
@@ -1325,25 +1321,24 @@
 
   const BlockDecl *blockDecl = blockInfo.getBlockDecl();
 
-  for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
-         ce = blockDecl->capture_end(); ci != ce; ++ci) {
-    const VarDecl *variable = ci->getVariable();
+  for (const auto &CI : blockDecl->captures()) {
+    const VarDecl *variable = CI.getVariable();
     QualType type = variable->getType();
 
     const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
     if (capture.isConstant()) continue;
 
-    const Expr *copyExpr = ci->getCopyExpr();
+    const Expr *copyExpr = CI.getCopyExpr();
     BlockFieldFlags flags;
 
     bool useARCWeakCopy = false;
     bool useARCStrongCopy = false;
 
     if (copyExpr) {
-      assert(!ci->isByRef());
+      assert(!CI.isByRef());
       // don't bother computing flags
 
-    } else if (ci->isByRef()) {
+    } else if (CI.isByRef()) {
       flags = BLOCK_FIELD_IS_BYREF;
       if (type.isObjCGCWeak())
         flags |= BLOCK_FIELD_IS_WEAK;
@@ -1423,7 +1418,7 @@
         };
 
         bool copyCanThrow = false;
-        if (ci->isByRef() && variable->getType()->getAsCXXRecordDecl()) {
+        if (CI.isByRef() && variable->getType()->getAsCXXRecordDecl()) {
           const Expr *copyExpr =
             CGM.getContext().getBlockVarCopyInits(variable);
           if (copyExpr) {
@@ -1460,10 +1455,8 @@
   ImplicitParamDecl srcDecl(0, SourceLocation(), 0, C.VoidPtrTy);
   args.push_back(&srcDecl);
 
-  const CGFunctionInfo &FI =
-    CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
-                                              FunctionType::ExtInfo(),
-                                              /*variadic*/ false);
+  const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+      C.VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
 
   // FIXME: We'd like to put these into a mergable by content, with
   // internal linkage.
@@ -1496,9 +1489,8 @@
 
   CodeGenFunction::RunCleanupsScope cleanups(*this);
 
-  for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
-         ce = blockDecl->capture_end(); ci != ce; ++ci) {
-    const VarDecl *variable = ci->getVariable();
+  for (const auto &CI : blockDecl->captures()) {
+    const VarDecl *variable = CI.getVariable();
     QualType type = variable->getType();
 
     const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
@@ -1510,7 +1502,7 @@
     bool useARCWeakDestroy = false;
     bool useARCStrongDestroy = false;
 
-    if (ci->isByRef()) {
+    if (CI.isByRef()) {
       flags = BLOCK_FIELD_IS_BYREF;
       if (type.isObjCGCWeak())
         flags |= BLOCK_FIELD_IS_WEAK;
@@ -1587,7 +1579,7 @@
     : ByrefHelpers(alignment), Flags(flags) {}
 
   void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
-                llvm::Value *srcField) {
+                llvm::Value *srcField) override {
     destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
 
     srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
@@ -1602,14 +1594,14 @@
     CGF.EmitNounwindRuntimeCall(fn, args);
   }
 
-  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
     field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
     llvm::Value *value = CGF.Builder.CreateLoad(field);
 
     CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER);
   }
 
-  void profileImpl(llvm::FoldingSetNodeID &id) const {
+  void profileImpl(llvm::FoldingSetNodeID &id) const override {
     id.AddInteger(Flags.getBitMask());
   }
 };
@@ -1620,15 +1612,15 @@
   ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
 
   void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
-                llvm::Value *srcField) {
+                llvm::Value *srcField) override {
     CGF.EmitARCMoveWeak(destField, srcField);
   }
 
-  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
     CGF.EmitARCDestroyWeak(field);
   }
 
-  void profileImpl(llvm::FoldingSetNodeID &id) const {
+  void profileImpl(llvm::FoldingSetNodeID &id) const override {
     // 0 is distinguishable from all pointers and byref flags
     id.AddInteger(0);
   }
@@ -1641,7 +1633,7 @@
   ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
 
   void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
-                llvm::Value *srcField) {
+                llvm::Value *srcField) override {
     // Do a "move" by copying the value and then zeroing out the old
     // variable.
 
@@ -1665,11 +1657,11 @@
     store->setAlignment(Alignment.getQuantity());
   }
 
-  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
     CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
   }
 
-  void profileImpl(llvm::FoldingSetNodeID &id) const {
+  void profileImpl(llvm::FoldingSetNodeID &id) const override {
     // 1 is distinguishable from all pointers and byref flags
     id.AddInteger(1);
   }
@@ -1682,7 +1674,7 @@
   ARCStrongBlockByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
 
   void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
-                llvm::Value *srcField) {
+                llvm::Value *srcField) override {
     // Do the copy with objc_retainBlock; that's all that
     // _Block_object_assign would do anyway, and we'd have to pass the
     // right arguments to make sure it doesn't get no-op'ed.
@@ -1695,11 +1687,11 @@
     store->setAlignment(Alignment.getQuantity());
   }
 
-  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
     CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
   }
 
-  void profileImpl(llvm::FoldingSetNodeID &id) const {
+  void profileImpl(llvm::FoldingSetNodeID &id) const override {
     // 2 is distinguishable from all pointers and byref flags
     id.AddInteger(2);
   }
@@ -1716,20 +1708,20 @@
                   const Expr *copyExpr)
     : ByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
 
-  bool needsCopy() const { return CopyExpr != 0; }
+  bool needsCopy() const override { return CopyExpr != 0; }
   void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
-                llvm::Value *srcField) {
+                llvm::Value *srcField) override {
     if (!CopyExpr) return;
     CGF.EmitSynthesizedCXXCopyCtor(destField, srcField, CopyExpr);
   }
 
-  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+  void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
     EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin();
     CGF.PushDestructorCleanup(VarType, field);
     CGF.PopCleanupBlocks(cleanupDepth);
   }
 
-  void profileImpl(llvm::FoldingSetNodeID &id) const {
+  void profileImpl(llvm::FoldingSetNodeID &id) const override {
     id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr());
   }
 };
@@ -1751,10 +1743,8 @@
   ImplicitParamDecl src(0, SourceLocation(), 0, Context.VoidPtrTy);
   args.push_back(&src);
 
-  const CGFunctionInfo &FI =
-    CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
-                                                  FunctionType::ExtInfo(),
-                                                  /*variadic*/ false);
+  const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration(
+      R, args, FunctionType::ExtInfo(), /*variadic=*/false);
 
   CodeGenTypes &Types = CGF.CGM.getTypes();
   llvm::FunctionType *LTy = Types.GetFunctionType(FI);
@@ -1822,10 +1812,8 @@
   ImplicitParamDecl src(0, SourceLocation(), 0, Context.VoidPtrTy);
   args.push_back(&src);
 
-  const CGFunctionInfo &FI =
-    CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
-                                                  FunctionType::ExtInfo(),
-                                                  /*variadic*/ false);
+  const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration(
+      R, args, FunctionType::ExtInfo(), /*variadic=*/false);
 
   CodeGenTypes &Types = CGF.CGM.getTypes();
   llvm::FunctionType *LTy = Types.GetFunctionType(FI);
@@ -2218,7 +2206,7 @@
     llvm::Value *Addr;
     CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       // Should we be passing FIELD_IS_WEAK here?
       CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
     }
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 7726ad3..5a86bdd 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -215,8 +215,11 @@
     return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
   case Builtin::BI__builtin_stdarg_start:
   case Builtin::BI__builtin_va_start:
+  case Builtin::BI__va_start:
   case Builtin::BI__builtin_va_end: {
-    Value *ArgValue = EmitVAListRef(E->getArg(0));
+    Value *ArgValue = (BuiltinID == Builtin::BI__va_start)
+                          ? EmitScalarExpr(E->getArg(0))
+                          : EmitVAListRef(E->getArg(0));
     llvm::Type *DestType = Int8PtrTy;
     if (ArgValue->getType() != DestType)
       ArgValue = Builder.CreateBitCast(ArgValue, DestType,
@@ -429,6 +432,12 @@
     Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
     return RValue::get(Builder.CreateCall(F));
   }
+  case Builtin::BI__builtin___clear_cache: {
+    Value *Begin = EmitScalarExpr(E->getArg(0));
+    Value *End = EmitScalarExpr(E->getArg(1));
+    Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
+    return RValue::get(Builder.CreateCall2(F, Begin, End));
+  }
   case Builtin::BI__builtin_trap: {
     Value *F = CGM.getIntrinsic(Intrinsic::trap);
     return RValue::get(Builder.CreateCall(F));
@@ -964,6 +973,7 @@
     Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
 
     Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+                                                llvm::SequentiallyConsistent,
                                                 llvm::SequentiallyConsistent);
     Result = EmitFromInt(*this, Result, T, ValueType);
     return RValue::get(Result);
@@ -990,6 +1000,7 @@
 
     Value *OldVal = Args[1];
     Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+                                                 llvm::SequentiallyConsistent,
                                                  llvm::SequentiallyConsistent);
     Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
     // zext bool to int.
@@ -1311,7 +1322,6 @@
     llvm::Type *ArgType = Base->getType();
     Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
     return RValue::get(Builder.CreateCall2(F, Base, Exponent));
-    break;
   }
 
   case Builtin::BIfma:
@@ -1500,6 +1510,43 @@
     return RValue::get(EmitLValue(E->getArg(0)).getAddress());
   case Builtin::BI__noop:
     return RValue::get(0);
+  case Builtin::BI_InterlockedCompareExchange: {
+    AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
+        EmitScalarExpr(E->getArg(0)),
+        EmitScalarExpr(E->getArg(2)),
+        EmitScalarExpr(E->getArg(1)),
+        SequentiallyConsistent,
+        SequentiallyConsistent);
+      CXI->setVolatile(true);
+      return RValue::get(CXI);
+  }
+  case Builtin::BI_InterlockedIncrement: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Add,
+      EmitScalarExpr(E->getArg(0)),
+      ConstantInt::get(Int32Ty, 1),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
+  }
+  case Builtin::BI_InterlockedDecrement: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Sub,
+      EmitScalarExpr(E->getArg(0)),
+      ConstantInt::get(Int32Ty, 1),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
+  }
+  case Builtin::BI_InterlockedExchangeAdd: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Add,
+      EmitScalarExpr(E->getArg(0)),
+      EmitScalarExpr(E->getArg(1)),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return RValue::get(RMWI);
+  }
   }
 
   // If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -1591,10 +1638,15 @@
                                               const CallExpr *E) {
   switch (getTarget().getTriple().getArch()) {
   case llvm::Triple::aarch64:
+  case llvm::Triple::aarch64_be:
     return EmitAArch64BuiltinExpr(BuiltinID, E);
   case llvm::Triple::arm:
+  case llvm::Triple::armeb:
   case llvm::Triple::thumb:
+  case llvm::Triple::thumbeb:
     return EmitARMBuiltinExpr(BuiltinID, E);
+  case llvm::Triple::arm64:
+    return EmitARM64BuiltinExpr(BuiltinID, E);
   case llvm::Triple::x86:
   case llvm::Triple::x86_64:
     return EmitX86BuiltinExpr(BuiltinID, E);
@@ -1624,6 +1676,11 @@
   case NeonTypeFlags::Int64:
   case NeonTypeFlags::Poly64:
     return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
+  case NeonTypeFlags::Poly128:
+    // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
+    // There is a lot of i128 and f128 API missing.
+    // so we use v16i8 to represent poly128 and get pattern matched.
+    return llvm::VectorType::get(CGF->Int8Ty, 16);
   case NeonTypeFlags::Float32:
     return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
   case NeonTypeFlags::Float64:
@@ -1694,6 +1751,36 @@
     return Builder.CreateAShr(Vec, Shift, name);
 }
 
+Value *CodeGenFunction::EmitConcatVectors(Value *Lo, Value *Hi,
+                                          llvm::Type *ArgTy) {
+  unsigned NumElts = ArgTy->getVectorNumElements();
+  SmallVector<Constant *, 16> Indices;
+  for (unsigned i = 0; i < 2 * NumElts; ++i)
+    Indices.push_back(ConstantInt::get(Int32Ty, i));
+
+  Constant *Mask = ConstantVector::get(Indices);
+  Value *LoCast = Builder.CreateBitCast(Lo, ArgTy);
+  Value *HiCast = Builder.CreateBitCast(Hi, ArgTy);
+  return Builder.CreateShuffleVector(LoCast, HiCast, Mask, "concat");
+}
+
+Value *CodeGenFunction::EmitExtractHigh(Value *Vec, llvm::Type *ResTy) {
+  unsigned NumElts = ResTy->getVectorNumElements();
+  SmallVector<Constant *, 8> Indices;
+
+  llvm::Type *InTy = llvm::VectorType::get(ResTy->getVectorElementType(),
+                                           NumElts * 2);
+  Value *VecCast = Builder.CreateBitCast(Vec, InTy);
+
+  // extract_high is a shuffle on the second half of the input indices: E.g. 4,
+  // 5, 6, 7 if we're extracting <4 x i16> from <8 x i16>.
+  for (unsigned i = 0; i < NumElts; ++i)
+    Indices.push_back(ConstantInt::get(Int32Ty, NumElts + i));
+
+  Constant *Mask = ConstantVector::get(Indices);
+  return Builder.CreateShuffleVector(VecCast, VecCast, Mask, "concat");
+}
+
 /// GetPointeeAlignment - Given an expression with a pointer type, find the
 /// alignment of the type referenced by the pointer.  Skip over implicit
 /// casts.
@@ -1749,21 +1836,1044 @@
   return std::make_pair(EmitScalarExpr(Addr), Align);
 }
 
+enum {
+  AddRetType = (1 << 0),
+  Add1ArgType = (1 << 1),
+  Add2ArgTypes = (1 << 2),
+
+  VectorizeRetType = (1 << 3),
+  VectorizeArgTypes = (1 << 4),
+
+  InventFloatType = (1 << 5),
+  UnsignedAlts = (1 << 6),
+
+  Use64BitVectors = (1 << 7),
+  Use128BitVectors = (1 << 8),
+
+  Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
+  VectorRet = AddRetType | VectorizeRetType,
+  VectorRetGetArgs01 =
+      AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
+  FpCmpzModifiers =
+      AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
+};
+
+ struct NeonIntrinsicInfo {
+  unsigned BuiltinID;
+  unsigned LLVMIntrinsic;
+  unsigned AltLLVMIntrinsic;
+  const char *NameHint;
+  unsigned TypeModifier;
+
+  bool operator<(unsigned RHSBuiltinID) const {
+    return BuiltinID < RHSBuiltinID;
+  }
+};
+
+#define NEONMAP0(NameBase) \
+  { NEON::BI__builtin_neon_ ## NameBase, 0, 0, #NameBase, 0 }
+
+#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
+  { NEON:: BI__builtin_neon_ ## NameBase, \
+      Intrinsic::LLVMIntrinsic, 0, #NameBase, TypeModifier }
+
+#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
+  { NEON:: BI__builtin_neon_ ## NameBase, \
+      Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
+      #NameBase, TypeModifier }
+
+static const NeonIntrinsicInfo AArch64SISDIntrinsicInfo[] = {
+  NEONMAP1(vabdd_f64, aarch64_neon_vabd, AddRetType),
+  NEONMAP1(vabds_f32, aarch64_neon_vabd, AddRetType),
+  NEONMAP1(vabsd_s64, aarch64_neon_vabs, 0),
+  NEONMAP1(vaddd_s64, aarch64_neon_vaddds, 0),
+  NEONMAP1(vaddd_u64, aarch64_neon_vadddu, 0),
+  NEONMAP1(vaddlv_s16, aarch64_neon_saddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlv_s8, aarch64_neon_saddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlv_u16, aarch64_neon_uaddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlv_u8, aarch64_neon_uaddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlvq_s16, aarch64_neon_saddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlvq_s8, aarch64_neon_saddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlvq_u16, aarch64_neon_uaddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddlvq_u8, aarch64_neon_uaddlv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddv_f32, aarch64_neon_vpfadd, AddRetType | Add1ArgType),
+  NEONMAP1(vaddv_s16, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddv_s32, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddv_s8, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddv_u16, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddv_u32, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddv_u8, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddvq_f32, aarch64_neon_vpfadd, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_f64, aarch64_neon_vpfadd, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_s16, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddvq_s32, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddvq_s64, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddvq_s8, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddvq_u16, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddvq_u32, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddvq_u64, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vaddvq_u8, aarch64_neon_vaddv, VectorRet | Add1ArgType),
+  NEONMAP1(vcaged_f64, aarch64_neon_fcage, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcages_f32, aarch64_neon_fcage, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcagtd_f64, aarch64_neon_fcagt, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcagts_f32, aarch64_neon_fcagt, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcaled_f64, aarch64_neon_fcage, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcales_f32, aarch64_neon_fcage, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcaltd_f64, aarch64_neon_fcagt, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcalts_f32, aarch64_neon_fcagt, VectorRet | Add2ArgTypes),
+  NEONMAP1(vceqd_f64, aarch64_neon_fceq, VectorRet | Add2ArgTypes),
+  NEONMAP1(vceqd_s64, aarch64_neon_vceq, VectorRetGetArgs01),
+  NEONMAP1(vceqd_u64, aarch64_neon_vceq, VectorRetGetArgs01),
+  NEONMAP1(vceqs_f32, aarch64_neon_fceq, VectorRet | Add2ArgTypes),
+  NEONMAP1(vceqzd_f64, aarch64_neon_fceq, FpCmpzModifiers),
+  NEONMAP1(vceqzd_s64, aarch64_neon_vceq, VectorRetGetArgs01),
+  NEONMAP1(vceqzd_u64, aarch64_neon_vceq, VectorRetGetArgs01),
+  NEONMAP1(vceqzs_f32, aarch64_neon_fceq, FpCmpzModifiers),
+  NEONMAP1(vcged_f64, aarch64_neon_fcge, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcged_s64, aarch64_neon_vcge, VectorRetGetArgs01),
+  NEONMAP1(vcged_u64, aarch64_neon_vchs, VectorRetGetArgs01),
+  NEONMAP1(vcges_f32, aarch64_neon_fcge, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcgezd_f64, aarch64_neon_fcge, FpCmpzModifiers),
+  NEONMAP1(vcgezd_s64, aarch64_neon_vcge, VectorRetGetArgs01),
+  NEONMAP1(vcgezs_f32, aarch64_neon_fcge, FpCmpzModifiers),
+  NEONMAP1(vcgtd_f64, aarch64_neon_fcgt, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcgtd_s64, aarch64_neon_vcgt, VectorRetGetArgs01),
+  NEONMAP1(vcgtd_u64, aarch64_neon_vchi, VectorRetGetArgs01),
+  NEONMAP1(vcgts_f32, aarch64_neon_fcgt, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcgtzd_f64, aarch64_neon_fcgt, FpCmpzModifiers),
+  NEONMAP1(vcgtzd_s64, aarch64_neon_vcgt, VectorRetGetArgs01),
+  NEONMAP1(vcgtzs_f32, aarch64_neon_fcgt, FpCmpzModifiers),
+  NEONMAP1(vcled_f64, aarch64_neon_fcge, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcled_s64, aarch64_neon_vcge, VectorRetGetArgs01),
+  NEONMAP1(vcled_u64, aarch64_neon_vchs, VectorRetGetArgs01),
+  NEONMAP1(vcles_f32, aarch64_neon_fcge, VectorRet | Add2ArgTypes),
+  NEONMAP1(vclezd_f64, aarch64_neon_fclez, FpCmpzModifiers),
+  NEONMAP1(vclezd_s64, aarch64_neon_vclez, VectorRetGetArgs01),
+  NEONMAP1(vclezs_f32, aarch64_neon_fclez, FpCmpzModifiers),
+  NEONMAP1(vcltd_f64, aarch64_neon_fcgt, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcltd_s64, aarch64_neon_vcgt, VectorRetGetArgs01),
+  NEONMAP1(vcltd_u64, aarch64_neon_vchi, VectorRetGetArgs01),
+  NEONMAP1(vclts_f32, aarch64_neon_fcgt, VectorRet | Add2ArgTypes),
+  NEONMAP1(vcltzd_f64, aarch64_neon_fcltz, FpCmpzModifiers),
+  NEONMAP1(vcltzd_s64, aarch64_neon_vcltz, VectorRetGetArgs01),
+  NEONMAP1(vcltzs_f32, aarch64_neon_fcltz, FpCmpzModifiers),
+  NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtd_f64_s64, aarch64_neon_vcvtint2fps, AddRetType | Vectorize1ArgType),
+  NEONMAP1(vcvtd_f64_u64, aarch64_neon_vcvtint2fpu, AddRetType | Vectorize1ArgType),
+  NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp_n, AddRetType | Vectorize1ArgType),
+  NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp_n, AddRetType | Vectorize1ArgType),
+  NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs_n, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu_n, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, VectorRet | Add1ArgType),
+  NEONMAP1(vcvts_f32_s32, aarch64_neon_vcvtint2fps, AddRetType | Vectorize1ArgType),
+  NEONMAP1(vcvts_f32_u32, aarch64_neon_vcvtint2fpu, AddRetType | Vectorize1ArgType),
+  NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp_n, AddRetType | Vectorize1ArgType),
+  NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp_n, AddRetType | Vectorize1ArgType),
+  NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs_n, VectorRet | Add1ArgType),
+  NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu_n, VectorRet | Add1ArgType),
+  NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, VectorRet | Add1ArgType),
+  NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, VectorRet | Add1ArgType),
+  NEONMAP1(vcvtxd_f32_f64, aarch64_neon_fcvtxn, 0),
+  NEONMAP0(vdupb_lane_i8),
+  NEONMAP0(vdupb_laneq_i8),
+  NEONMAP0(vdupd_lane_f64),
+  NEONMAP0(vdupd_lane_i64),
+  NEONMAP0(vdupd_laneq_f64),
+  NEONMAP0(vdupd_laneq_i64),
+  NEONMAP0(vduph_lane_i16),
+  NEONMAP0(vduph_laneq_i16),
+  NEONMAP0(vdups_lane_f32),
+  NEONMAP0(vdups_lane_i32),
+  NEONMAP0(vdups_laneq_f32),
+  NEONMAP0(vdups_laneq_i32),
+  NEONMAP0(vfmad_lane_f64),
+  NEONMAP0(vfmad_laneq_f64),
+  NEONMAP0(vfmas_lane_f32),
+  NEONMAP0(vfmas_laneq_f32),
+  NEONMAP0(vget_lane_f32),
+  NEONMAP0(vget_lane_f64),
+  NEONMAP0(vget_lane_i16),
+  NEONMAP0(vget_lane_i32),
+  NEONMAP0(vget_lane_i64),
+  NEONMAP0(vget_lane_i8),
+  NEONMAP0(vgetq_lane_f32),
+  NEONMAP0(vgetq_lane_f64),
+  NEONMAP0(vgetq_lane_i16),
+  NEONMAP0(vgetq_lane_i32),
+  NEONMAP0(vgetq_lane_i64),
+  NEONMAP0(vgetq_lane_i8),
+  NEONMAP1(vmaxnmv_f32, aarch64_neon_vpfmaxnm, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxnmvq_f32, aarch64_neon_vmaxnmv, 0),
+  NEONMAP1(vmaxnmvq_f64, aarch64_neon_vpfmaxnm, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxv_f32, aarch64_neon_vpmax, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxv_s16, aarch64_neon_smaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxv_s8, aarch64_neon_smaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxv_u16, aarch64_neon_umaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxv_u8, aarch64_neon_umaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxvq_f32, aarch64_neon_vmaxv, 0),
+  NEONMAP1(vmaxvq_f64, aarch64_neon_vpmax, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_s16, aarch64_neon_smaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxvq_s8, aarch64_neon_smaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxvq_u16, aarch64_neon_umaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vmaxvq_u8, aarch64_neon_umaxv, VectorRet | Add1ArgType),
+  NEONMAP1(vminnmv_f32, aarch64_neon_vpfminnm, AddRetType | Add1ArgType),
+  NEONMAP1(vminnmvq_f32, aarch64_neon_vminnmv, 0),
+  NEONMAP1(vminnmvq_f64, aarch64_neon_vpfminnm, AddRetType | Add1ArgType),
+  NEONMAP1(vminv_f32, aarch64_neon_vpmin, AddRetType | Add1ArgType),
+  NEONMAP1(vminv_s16, aarch64_neon_sminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminv_s32, aarch64_neon_sminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminv_s8, aarch64_neon_sminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminv_u16, aarch64_neon_uminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminv_u32, aarch64_neon_uminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminv_u8, aarch64_neon_uminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminvq_f32, aarch64_neon_vminv, 0),
+  NEONMAP1(vminvq_f64, aarch64_neon_vpmin, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_s16, aarch64_neon_sminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminvq_s32, aarch64_neon_sminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminvq_s8, aarch64_neon_sminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminvq_u16, aarch64_neon_uminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminvq_u32, aarch64_neon_uminv, VectorRet | Add1ArgType),
+  NEONMAP1(vminvq_u8, aarch64_neon_uminv, VectorRet | Add1ArgType),
+  NEONMAP0(vmul_n_f64),
+  NEONMAP1(vmull_p64, aarch64_neon_vmull_p64, 0),
+  NEONMAP0(vmulxd_f64),
+  NEONMAP0(vmulxs_f32),
+  NEONMAP1(vnegd_s64, aarch64_neon_vneg, 0),
+  NEONMAP1(vpaddd_f64, aarch64_neon_vpfadd, AddRetType | Add1ArgType),
+  NEONMAP1(vpaddd_s64, aarch64_neon_vpadd, 0),
+  NEONMAP1(vpaddd_u64, aarch64_neon_vpadd, 0),
+  NEONMAP1(vpadds_f32, aarch64_neon_vpfadd, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxnmqd_f64, aarch64_neon_vpfmaxnm, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxnms_f32, aarch64_neon_vpfmaxnm, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxqd_f64, aarch64_neon_vpmax, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxs_f32, aarch64_neon_vpmax, AddRetType | Add1ArgType),
+  NEONMAP1(vpminnmqd_f64, aarch64_neon_vpfminnm, AddRetType | Add1ArgType),
+  NEONMAP1(vpminnms_f32, aarch64_neon_vpfminnm, AddRetType | Add1ArgType),
+  NEONMAP1(vpminqd_f64, aarch64_neon_vpmin, AddRetType | Add1ArgType),
+  NEONMAP1(vpmins_f32, aarch64_neon_vpmin, AddRetType | Add1ArgType),
+  NEONMAP1(vqabsb_s8, arm_neon_vqabs, VectorRet),
+  NEONMAP1(vqabsd_s64, arm_neon_vqabs, VectorRet),
+  NEONMAP1(vqabsh_s16, arm_neon_vqabs, VectorRet),
+  NEONMAP1(vqabss_s32, arm_neon_vqabs, VectorRet),
+  NEONMAP1(vqaddb_s8, arm_neon_vqadds, VectorRet),
+  NEONMAP1(vqaddb_u8, arm_neon_vqaddu, VectorRet),
+  NEONMAP1(vqaddd_s64, arm_neon_vqadds, VectorRet),
+  NEONMAP1(vqaddd_u64, arm_neon_vqaddu, VectorRet),
+  NEONMAP1(vqaddh_s16, arm_neon_vqadds, VectorRet),
+  NEONMAP1(vqaddh_u16, arm_neon_vqaddu, VectorRet),
+  NEONMAP1(vqadds_s32, arm_neon_vqadds, VectorRet),
+  NEONMAP1(vqadds_u32, arm_neon_vqaddu, VectorRet),
+  NEONMAP0(vqdmlalh_lane_s16),
+  NEONMAP0(vqdmlalh_laneq_s16),
+  NEONMAP1(vqdmlalh_s16, aarch64_neon_vqdmlal, VectorRet),
+  NEONMAP0(vqdmlals_lane_s32),
+  NEONMAP0(vqdmlals_laneq_s32),
+  NEONMAP1(vqdmlals_s32, aarch64_neon_vqdmlal, VectorRet),
+  NEONMAP0(vqdmlslh_lane_s16),
+  NEONMAP0(vqdmlslh_laneq_s16),
+  NEONMAP1(vqdmlslh_s16, aarch64_neon_vqdmlsl, VectorRet),
+  NEONMAP0(vqdmlsls_lane_s32),
+  NEONMAP0(vqdmlsls_laneq_s32),
+  NEONMAP1(vqdmlsls_s32, aarch64_neon_vqdmlsl, VectorRet),
+  NEONMAP1(vqdmulhh_s16, arm_neon_vqdmulh, VectorRet),
+  NEONMAP1(vqdmulhs_s32, arm_neon_vqdmulh, VectorRet),
+  NEONMAP1(vqdmullh_s16, arm_neon_vqdmull, VectorRet),
+  NEONMAP1(vqdmulls_s32, arm_neon_vqdmull, VectorRet),
+  NEONMAP1(vqmovnd_s64, arm_neon_vqmovns, VectorRet),
+  NEONMAP1(vqmovnd_u64, arm_neon_vqmovnu, VectorRet),
+  NEONMAP1(vqmovnh_s16, arm_neon_vqmovns, VectorRet),
+  NEONMAP1(vqmovnh_u16, arm_neon_vqmovnu, VectorRet),
+  NEONMAP1(vqmovns_s32, arm_neon_vqmovns, VectorRet),
+  NEONMAP1(vqmovns_u32, arm_neon_vqmovnu, VectorRet),
+  NEONMAP1(vqmovund_s64, arm_neon_vqmovnsu, VectorRet),
+  NEONMAP1(vqmovunh_s16, arm_neon_vqmovnsu, VectorRet),
+  NEONMAP1(vqmovuns_s32, arm_neon_vqmovnsu, VectorRet),
+  NEONMAP1(vqnegb_s8, arm_neon_vqneg, VectorRet),
+  NEONMAP1(vqnegd_s64, arm_neon_vqneg, VectorRet),
+  NEONMAP1(vqnegh_s16, arm_neon_vqneg, VectorRet),
+  NEONMAP1(vqnegs_s32, arm_neon_vqneg, VectorRet),
+  NEONMAP1(vqrdmulhh_s16, arm_neon_vqrdmulh, VectorRet),
+  NEONMAP1(vqrdmulhs_s32, arm_neon_vqrdmulh, VectorRet),
+  NEONMAP1(vqrshlb_s8, aarch64_neon_vqrshls, VectorRet),
+  NEONMAP1(vqrshlb_u8, aarch64_neon_vqrshlu, VectorRet),
+  NEONMAP1(vqrshld_s64, aarch64_neon_vqrshls, VectorRet),
+  NEONMAP1(vqrshld_u64, aarch64_neon_vqrshlu, VectorRet),
+  NEONMAP1(vqrshlh_s16, aarch64_neon_vqrshls, VectorRet),
+  NEONMAP1(vqrshlh_u16, aarch64_neon_vqrshlu, VectorRet),
+  NEONMAP1(vqrshls_s32, aarch64_neon_vqrshls, VectorRet),
+  NEONMAP1(vqrshls_u32, aarch64_neon_vqrshlu, VectorRet),
+  NEONMAP1(vqrshrnd_n_s64, aarch64_neon_vsqrshrn, VectorRet),
+  NEONMAP1(vqrshrnd_n_u64, aarch64_neon_vuqrshrn, VectorRet),
+  NEONMAP1(vqrshrnh_n_s16, aarch64_neon_vsqrshrn, VectorRet),
+  NEONMAP1(vqrshrnh_n_u16, aarch64_neon_vuqrshrn, VectorRet),
+  NEONMAP1(vqrshrns_n_s32, aarch64_neon_vsqrshrn, VectorRet),
+  NEONMAP1(vqrshrns_n_u32, aarch64_neon_vuqrshrn, VectorRet),
+  NEONMAP1(vqrshrund_n_s64, aarch64_neon_vsqrshrun, VectorRet),
+  NEONMAP1(vqrshrunh_n_s16, aarch64_neon_vsqrshrun, VectorRet),
+  NEONMAP1(vqrshruns_n_s32, aarch64_neon_vsqrshrun, VectorRet),
+  NEONMAP1(vqshlb_n_s8, aarch64_neon_vqshls_n, VectorRet),
+  NEONMAP1(vqshlb_n_u8, aarch64_neon_vqshlu_n, VectorRet),
+  NEONMAP1(vqshlb_s8, aarch64_neon_vqshls, VectorRet),
+  NEONMAP1(vqshlb_u8, aarch64_neon_vqshlu, VectorRet),
+  NEONMAP1(vqshld_n_s64, aarch64_neon_vqshls_n, VectorRet),
+  NEONMAP1(vqshld_n_u64, aarch64_neon_vqshlu_n, VectorRet),
+  NEONMAP1(vqshld_s64, aarch64_neon_vqshls, VectorRet),
+  NEONMAP1(vqshld_u64, aarch64_neon_vqshlu, VectorRet),
+  NEONMAP1(vqshlh_n_s16, aarch64_neon_vqshls_n, VectorRet),
+  NEONMAP1(vqshlh_n_u16, aarch64_neon_vqshlu_n, VectorRet),
+  NEONMAP1(vqshlh_s16, aarch64_neon_vqshls, VectorRet),
+  NEONMAP1(vqshlh_u16, aarch64_neon_vqshlu, VectorRet),
+  NEONMAP1(vqshls_n_s32, aarch64_neon_vqshls_n, VectorRet),
+  NEONMAP1(vqshls_n_u32, aarch64_neon_vqshlu_n, VectorRet),
+  NEONMAP1(vqshls_s32, aarch64_neon_vqshls, VectorRet),
+  NEONMAP1(vqshls_u32, aarch64_neon_vqshlu, VectorRet),
+  NEONMAP1(vqshlub_n_s8, aarch64_neon_vsqshlu, VectorRet),
+  NEONMAP1(vqshlud_n_s64, aarch64_neon_vsqshlu, VectorRet),
+  NEONMAP1(vqshluh_n_s16, aarch64_neon_vsqshlu, VectorRet),
+  NEONMAP1(vqshlus_n_s32, aarch64_neon_vsqshlu, VectorRet),
+  NEONMAP1(vqshrnd_n_s64, aarch64_neon_vsqshrn, VectorRet),
+  NEONMAP1(vqshrnd_n_u64, aarch64_neon_vuqshrn, VectorRet),
+  NEONMAP1(vqshrnh_n_s16, aarch64_neon_vsqshrn, VectorRet),
+  NEONMAP1(vqshrnh_n_u16, aarch64_neon_vuqshrn, VectorRet),
+  NEONMAP1(vqshrns_n_s32, aarch64_neon_vsqshrn, VectorRet),
+  NEONMAP1(vqshrns_n_u32, aarch64_neon_vuqshrn, VectorRet),
+  NEONMAP1(vqshrund_n_s64, aarch64_neon_vsqshrun, VectorRet),
+  NEONMAP1(vqshrunh_n_s16, aarch64_neon_vsqshrun, VectorRet),
+  NEONMAP1(vqshruns_n_s32, aarch64_neon_vsqshrun, VectorRet),
+  NEONMAP1(vqsubb_s8, arm_neon_vqsubs, VectorRet),
+  NEONMAP1(vqsubb_u8, arm_neon_vqsubu, VectorRet),
+  NEONMAP1(vqsubd_s64, arm_neon_vqsubs, VectorRet),
+  NEONMAP1(vqsubd_u64, arm_neon_vqsubu, VectorRet),
+  NEONMAP1(vqsubh_s16, arm_neon_vqsubs, VectorRet),
+  NEONMAP1(vqsubh_u16, arm_neon_vqsubu, VectorRet),
+  NEONMAP1(vqsubs_s32, arm_neon_vqsubs, VectorRet),
+  NEONMAP1(vqsubs_u32, arm_neon_vqsubu, VectorRet),
+  NEONMAP1(vrecped_f64, aarch64_neon_vrecpe, AddRetType),
+  NEONMAP1(vrecpes_f32, aarch64_neon_vrecpe, AddRetType),
+  NEONMAP1(vrecpsd_f64, aarch64_neon_vrecps, AddRetType),
+  NEONMAP1(vrecpss_f32, aarch64_neon_vrecps, AddRetType),
+  NEONMAP1(vrecpxd_f64, aarch64_neon_vrecpx, AddRetType),
+  NEONMAP1(vrecpxs_f32, aarch64_neon_vrecpx, AddRetType),
+  NEONMAP1(vrshld_s64, aarch64_neon_vrshlds, 0),
+  NEONMAP1(vrshld_u64, aarch64_neon_vrshldu, 0),
+  NEONMAP1(vrshrd_n_s64, aarch64_neon_vsrshr, VectorRet),
+  NEONMAP1(vrshrd_n_u64, aarch64_neon_vurshr, VectorRet),
+  NEONMAP1(vrsqrted_f64, aarch64_neon_vrsqrte, AddRetType),
+  NEONMAP1(vrsqrtes_f32, aarch64_neon_vrsqrte, AddRetType),
+  NEONMAP1(vrsqrtsd_f64, aarch64_neon_vrsqrts, AddRetType),
+  NEONMAP1(vrsqrtss_f32, aarch64_neon_vrsqrts, AddRetType),
+  NEONMAP1(vrsrad_n_s64, aarch64_neon_vrsrads_n, 0),
+  NEONMAP1(vrsrad_n_u64, aarch64_neon_vrsradu_n, 0),
+  NEONMAP0(vset_lane_f32),
+  NEONMAP0(vset_lane_f64),
+  NEONMAP0(vset_lane_i16),
+  NEONMAP0(vset_lane_i32),
+  NEONMAP0(vset_lane_i64),
+  NEONMAP0(vset_lane_i8),
+  NEONMAP0(vsetq_lane_f32),
+  NEONMAP0(vsetq_lane_f64),
+  NEONMAP0(vsetq_lane_i16),
+  NEONMAP0(vsetq_lane_i32),
+  NEONMAP0(vsetq_lane_i64),
+  NEONMAP0(vsetq_lane_i8),
+  NEONMAP1(vsha1cq_u32, arm_neon_sha1c, 0),
+  NEONMAP1(vsha1h_u32, arm_neon_sha1h, 0),
+  NEONMAP1(vsha1mq_u32, arm_neon_sha1m, 0),
+  NEONMAP1(vsha1pq_u32, arm_neon_sha1p, 0),
+  NEONMAP1(vshld_n_s64, aarch64_neon_vshld_n, 0),
+  NEONMAP1(vshld_n_u64, aarch64_neon_vshld_n, 0),
+  NEONMAP1(vshld_s64, aarch64_neon_vshlds, 0),
+  NEONMAP1(vshld_u64, aarch64_neon_vshldu, 0),
+  NEONMAP1(vshrd_n_s64, aarch64_neon_vshrds_n, 0),
+  NEONMAP1(vshrd_n_u64, aarch64_neon_vshrdu_n, 0),
+  NEONMAP1(vslid_n_s64, aarch64_neon_vsli, VectorRet),
+  NEONMAP1(vslid_n_u64, aarch64_neon_vsli, VectorRet),
+  NEONMAP1(vsqaddb_u8, aarch64_neon_vsqadd, VectorRet),
+  NEONMAP1(vsqaddd_u64, aarch64_neon_vsqadd, VectorRet),
+  NEONMAP1(vsqaddh_u16, aarch64_neon_vsqadd, VectorRet),
+  NEONMAP1(vsqadds_u32, aarch64_neon_vsqadd, VectorRet),
+  NEONMAP1(vsrad_n_s64, aarch64_neon_vsrads_n, 0),
+  NEONMAP1(vsrad_n_u64, aarch64_neon_vsradu_n, 0),
+  NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, VectorRet),
+  NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, VectorRet),
+  NEONMAP1(vsubd_s64, aarch64_neon_vsubds, 0),
+  NEONMAP1(vsubd_u64, aarch64_neon_vsubdu, 0),
+  NEONMAP1(vtstd_s64, aarch64_neon_vtstd, VectorRetGetArgs01),
+  NEONMAP1(vtstd_u64, aarch64_neon_vtstd, VectorRetGetArgs01),
+  NEONMAP1(vuqaddb_s8, aarch64_neon_vuqadd, VectorRet),
+  NEONMAP1(vuqaddd_s64, aarch64_neon_vuqadd, VectorRet),
+  NEONMAP1(vuqaddh_s16, aarch64_neon_vuqadd, VectorRet),
+  NEONMAP1(vuqadds_s32, aarch64_neon_vuqadd, VectorRet)
+};
+
+static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
+  NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
+  NEONMAP1(vabs_v, arm_neon_vabs, 0),
+  NEONMAP1(vabsq_v, arm_neon_vabs, 0),
+  NEONMAP0(vaddhn_v),
+  NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
+  NEONMAP1(vaeseq_v, arm_neon_aese, 0),
+  NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
+  NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
+  NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
+  NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
+  NEONMAP1(vcage_v, arm_neon_vacge, 0),
+  NEONMAP1(vcageq_v, arm_neon_vacge, 0),
+  NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
+  NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
+  NEONMAP1(vcale_v, arm_neon_vacge, 0),
+  NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
+  NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
+  NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
+  NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
+  NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
+  NEONMAP1(vclz_v, ctlz, Add1ArgType),
+  NEONMAP1(vclzq_v, ctlz, Add1ArgType),
+  NEONMAP1(vcnt_v, ctpop, Add1ArgType),
+  NEONMAP1(vcntq_v, ctpop, Add1ArgType),
+  NEONMAP1(vcvt_f16_v, arm_neon_vcvtfp2hf, 0),
+  NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
+  NEONMAP0(vcvt_f32_v),
+  NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+  NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
+  NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
+  NEONMAP0(vcvt_s32_v),
+  NEONMAP0(vcvt_s64_v),
+  NEONMAP0(vcvt_u32_v),
+  NEONMAP0(vcvt_u64_v),
+  NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
+  NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
+  NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
+  NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
+  NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
+  NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
+  NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
+  NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
+  NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
+  NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
+  NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
+  NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
+  NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
+  NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
+  NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
+  NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
+  NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
+  NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
+  NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
+  NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
+  NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
+  NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
+  NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
+  NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
+  NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
+  NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
+  NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
+  NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
+  NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
+  NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
+  NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
+  NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
+  NEONMAP0(vcvtq_f32_v),
+  NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+  NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
+  NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
+  NEONMAP0(vcvtq_s32_v),
+  NEONMAP0(vcvtq_s64_v),
+  NEONMAP0(vcvtq_u32_v),
+  NEONMAP0(vcvtq_u64_v),
+  NEONMAP0(vext_v),
+  NEONMAP0(vextq_v),
+  NEONMAP0(vfma_v),
+  NEONMAP0(vfmaq_v),
+  NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
+  NEONMAP0(vld1_dup_v),
+  NEONMAP1(vld1_v, arm_neon_vld1, 0),
+  NEONMAP0(vld1q_dup_v),
+  NEONMAP1(vld1q_v, arm_neon_vld1, 0),
+  NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
+  NEONMAP1(vld2_v, arm_neon_vld2, 0),
+  NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
+  NEONMAP1(vld2q_v, arm_neon_vld2, 0),
+  NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
+  NEONMAP1(vld3_v, arm_neon_vld3, 0),
+  NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
+  NEONMAP1(vld3q_v, arm_neon_vld3, 0),
+  NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
+  NEONMAP1(vld4_v, arm_neon_vld4, 0),
+  NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
+  NEONMAP1(vld4q_v, arm_neon_vld4, 0),
+  NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
+  NEONMAP0(vmovl_v),
+  NEONMAP0(vmovn_v),
+  NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
+  NEONMAP0(vmull_v),
+  NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
+  NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
+  NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
+  NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
+  NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
+  NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
+  NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
+  NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
+  NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
+  NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
+  NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
+  NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
+  NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
+  NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
+  NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
+  NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
+  NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
+  NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
+  NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
+  NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
+  NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
+  NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
+  NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
+  NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
+  NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
+  NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
+  NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
+  NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
+  NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
+  NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
+  NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
+  NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
+  NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
+  NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
+  NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
+  NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
+  NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
+  NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
+  NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
+  NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
+  NEONMAP0(vshl_n_v),
+  NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
+  NEONMAP0(vshll_n_v),
+  NEONMAP0(vshlq_n_v),
+  NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
+  NEONMAP0(vshr_n_v),
+  NEONMAP0(vshrn_n_v),
+  NEONMAP0(vshrq_n_v),
+  NEONMAP1(vst1_v, arm_neon_vst1, 0),
+  NEONMAP1(vst1q_v, arm_neon_vst1, 0),
+  NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
+  NEONMAP1(vst2_v, arm_neon_vst2, 0),
+  NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
+  NEONMAP1(vst2q_v, arm_neon_vst2, 0),
+  NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
+  NEONMAP1(vst3_v, arm_neon_vst3, 0),
+  NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
+  NEONMAP1(vst3q_v, arm_neon_vst3, 0),
+  NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
+  NEONMAP1(vst4_v, arm_neon_vst4, 0),
+  NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
+  NEONMAP1(vst4q_v, arm_neon_vst4, 0),
+  NEONMAP0(vsubhn_v),
+  NEONMAP0(vtrn_v),
+  NEONMAP0(vtrnq_v),
+  NEONMAP0(vtst_v),
+  NEONMAP0(vtstq_v),
+  NEONMAP0(vuzp_v),
+  NEONMAP0(vuzpq_v),
+  NEONMAP0(vzip_v),
+  NEONMAP0(vzipq_v)
+};
+
+static NeonIntrinsicInfo ARM64SIMDIntrinsicMap[] = {
+  NEONMAP1(vabs_v, arm64_neon_abs, 0),
+  NEONMAP1(vabsq_v, arm64_neon_abs, 0),
+  NEONMAP0(vaddhn_v),
+  NEONMAP1(vaesdq_v, arm64_crypto_aesd, 0),
+  NEONMAP1(vaeseq_v, arm64_crypto_aese, 0),
+  NEONMAP1(vaesimcq_v, arm64_crypto_aesimc, 0),
+  NEONMAP1(vaesmcq_v, arm64_crypto_aesmc, 0),
+  NEONMAP1(vcage_v, arm64_neon_facge, 0),
+  NEONMAP1(vcageq_v, arm64_neon_facge, 0),
+  NEONMAP1(vcagt_v, arm64_neon_facgt, 0),
+  NEONMAP1(vcagtq_v, arm64_neon_facgt, 0),
+  NEONMAP1(vcale_v, arm64_neon_facge, 0),
+  NEONMAP1(vcaleq_v, arm64_neon_facge, 0),
+  NEONMAP1(vcalt_v, arm64_neon_facgt, 0),
+  NEONMAP1(vcaltq_v, arm64_neon_facgt, 0),
+  NEONMAP1(vcls_v, arm64_neon_cls, Add1ArgType),
+  NEONMAP1(vclsq_v, arm64_neon_cls, Add1ArgType),
+  NEONMAP1(vclz_v, ctlz, Add1ArgType),
+  NEONMAP1(vclzq_v, ctlz, Add1ArgType),
+  NEONMAP1(vcnt_v, ctpop, Add1ArgType),
+  NEONMAP1(vcntq_v, ctpop, Add1ArgType),
+  NEONMAP1(vcvt_f16_v, arm64_neon_vcvtfp2hf, 0),
+  NEONMAP1(vcvt_f32_f16, arm64_neon_vcvthf2fp, 0),
+  NEONMAP0(vcvt_f32_v),
+  NEONMAP2(vcvt_n_f32_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0),
+  NEONMAP2(vcvt_n_f64_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0),
+  NEONMAP1(vcvt_n_s32_v, arm64_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvt_n_s64_v, arm64_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvt_n_u32_v, arm64_neon_vcvtfp2fxu, 0),
+  NEONMAP1(vcvt_n_u64_v, arm64_neon_vcvtfp2fxu, 0),
+  NEONMAP0(vcvtq_f32_v),
+  NEONMAP2(vcvtq_n_f32_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0),
+  NEONMAP2(vcvtq_n_f64_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0),
+  NEONMAP1(vcvtq_n_s32_v, arm64_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvtq_n_s64_v, arm64_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvtq_n_u32_v, arm64_neon_vcvtfp2fxu, 0),
+  NEONMAP1(vcvtq_n_u64_v, arm64_neon_vcvtfp2fxu, 0),
+  NEONMAP1(vcvtx_f32_v, arm64_neon_fcvtxn, AddRetType | Add1ArgType),
+  NEONMAP0(vext_v),
+  NEONMAP0(vextq_v),
+  NEONMAP0(vfma_v),
+  NEONMAP0(vfmaq_v),
+  NEONMAP2(vhadd_v, arm64_neon_uhadd, arm64_neon_shadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhaddq_v, arm64_neon_uhadd, arm64_neon_shadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhsub_v, arm64_neon_uhsub, arm64_neon_shsub, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhsubq_v, arm64_neon_uhsub, arm64_neon_shsub, Add1ArgType | UnsignedAlts),
+  NEONMAP0(vmovl_v),
+  NEONMAP0(vmovn_v),
+  NEONMAP1(vmul_v, arm64_neon_pmul, Add1ArgType),
+  NEONMAP1(vmulq_v, arm64_neon_pmul, Add1ArgType),
+  NEONMAP1(vpadd_v, arm64_neon_addp, Add1ArgType),
+  NEONMAP2(vpaddl_v, arm64_neon_uaddlp, arm64_neon_saddlp, UnsignedAlts),
+  NEONMAP2(vpaddlq_v, arm64_neon_uaddlp, arm64_neon_saddlp, UnsignedAlts),
+  NEONMAP1(vpaddq_v, arm64_neon_addp, Add1ArgType),
+  NEONMAP1(vqabs_v, arm64_neon_sqabs, Add1ArgType),
+  NEONMAP1(vqabsq_v, arm64_neon_sqabs, Add1ArgType),
+  NEONMAP2(vqadd_v, arm64_neon_uqadd, arm64_neon_sqadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqaddq_v, arm64_neon_uqadd, arm64_neon_sqadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqdmlal_v, arm64_neon_sqdmull, arm64_neon_sqadd, 0),
+  NEONMAP2(vqdmlsl_v, arm64_neon_sqdmull, arm64_neon_sqsub, 0),
+  NEONMAP1(vqdmulh_v, arm64_neon_sqdmulh, Add1ArgType),
+  NEONMAP1(vqdmulhq_v, arm64_neon_sqdmulh, Add1ArgType),
+  NEONMAP1(vqdmull_v, arm64_neon_sqdmull, Add1ArgType),
+  NEONMAP2(vqmovn_v, arm64_neon_uqxtn, arm64_neon_sqxtn, Add1ArgType | UnsignedAlts),
+  NEONMAP1(vqmovun_v, arm64_neon_sqxtun, Add1ArgType),
+  NEONMAP1(vqneg_v, arm64_neon_sqneg, Add1ArgType),
+  NEONMAP1(vqnegq_v, arm64_neon_sqneg, Add1ArgType),
+  NEONMAP1(vqrdmulh_v, arm64_neon_sqrdmulh, Add1ArgType),
+  NEONMAP1(vqrdmulhq_v, arm64_neon_sqrdmulh, Add1ArgType),
+  NEONMAP2(vqrshl_v, arm64_neon_uqrshl, arm64_neon_sqrshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqrshlq_v, arm64_neon_uqrshl, arm64_neon_sqrshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqshl_n_v, arm64_neon_uqshl, arm64_neon_sqshl, UnsignedAlts),
+  NEONMAP2(vqshl_v, arm64_neon_uqshl, arm64_neon_sqshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqshlq_n_v, arm64_neon_uqshl, arm64_neon_sqshl,UnsignedAlts),
+  NEONMAP2(vqshlq_v, arm64_neon_uqshl, arm64_neon_sqshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqsub_v, arm64_neon_uqsub, arm64_neon_sqsub, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqsubq_v, arm64_neon_uqsub, arm64_neon_sqsub, Add1ArgType | UnsignedAlts),
+  NEONMAP1(vraddhn_v, arm64_neon_raddhn, Add1ArgType),
+  NEONMAP2(vrecpe_v, arm64_neon_frecpe, arm64_neon_urecpe, 0),
+  NEONMAP2(vrecpeq_v, arm64_neon_frecpe, arm64_neon_urecpe, 0),
+  NEONMAP1(vrecps_v, arm64_neon_frecps, Add1ArgType),
+  NEONMAP1(vrecpsq_v, arm64_neon_frecps, Add1ArgType),
+  NEONMAP2(vrhadd_v, arm64_neon_urhadd, arm64_neon_srhadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrhaddq_v, arm64_neon_urhadd, arm64_neon_srhadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrshl_v, arm64_neon_urshl, arm64_neon_srshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrshlq_v, arm64_neon_urshl, arm64_neon_srshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrsqrte_v, arm64_neon_frsqrte, arm64_neon_ursqrte, 0),
+  NEONMAP2(vrsqrteq_v, arm64_neon_frsqrte, arm64_neon_ursqrte, 0),
+  NEONMAP1(vrsqrts_v, arm64_neon_frsqrts, Add1ArgType),
+  NEONMAP1(vrsqrtsq_v, arm64_neon_frsqrts, Add1ArgType),
+  NEONMAP1(vrsubhn_v, arm64_neon_rsubhn, Add1ArgType),
+  NEONMAP1(vsha1su0q_v, arm64_crypto_sha1su0, 0),
+  NEONMAP1(vsha1su1q_v, arm64_crypto_sha1su1, 0),
+  NEONMAP1(vsha256h2q_v, arm64_crypto_sha256h2, 0),
+  NEONMAP1(vsha256hq_v, arm64_crypto_sha256h, 0),
+  NEONMAP1(vsha256su0q_v, arm64_crypto_sha256su0, 0),
+  NEONMAP1(vsha256su1q_v, arm64_crypto_sha256su1, 0),
+  NEONMAP0(vshl_n_v),
+  NEONMAP2(vshl_v, arm64_neon_ushl, arm64_neon_sshl, Add1ArgType | UnsignedAlts),
+  NEONMAP0(vshll_n_v),
+  NEONMAP0(vshlq_n_v),
+  NEONMAP2(vshlq_v, arm64_neon_ushl, arm64_neon_sshl, Add1ArgType | UnsignedAlts),
+  NEONMAP0(vshr_n_v),
+  NEONMAP0(vshrn_n_v),
+  NEONMAP0(vshrq_n_v),
+  NEONMAP0(vsubhn_v),
+  NEONMAP0(vtst_v),
+  NEONMAP0(vtstq_v),
+};
+
+static NeonIntrinsicInfo ARM64SISDIntrinsicMap[] = {
+  NEONMAP1(vabdd_f64, arm64_sisd_fabd, Add1ArgType),
+  NEONMAP1(vabds_f32, arm64_sisd_fabd, Add1ArgType),
+  NEONMAP1(vabsd_s64, arm64_neon_abs, Add1ArgType),
+  NEONMAP1(vaddlv_s32, arm64_neon_saddlv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddlv_u32, arm64_neon_uaddlv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddlvq_s32, arm64_neon_saddlv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddlvq_u32, arm64_neon_uaddlv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddv_f32, arm64_neon_faddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddv_s32, arm64_neon_saddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddv_u32, arm64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_f32, arm64_neon_faddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_f64, arm64_neon_faddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_s32, arm64_neon_saddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_s64, arm64_neon_saddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_u32, arm64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_u64, arm64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vcaged_f64, arm64_neon_facge, AddRetType | Add1ArgType),
+  NEONMAP1(vcages_f32, arm64_neon_facge, AddRetType | Add1ArgType),
+  NEONMAP1(vcagtd_f64, arm64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcagts_f32, arm64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcaled_f64, arm64_neon_facge, AddRetType | Add1ArgType),
+  NEONMAP1(vcales_f32, arm64_neon_facge, AddRetType | Add1ArgType),
+  NEONMAP1(vcaltd_f64, arm64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcalts_f32, arm64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtad_s64_f64, arm64_neon_fcvtas, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtad_u64_f64, arm64_neon_fcvtau, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtas_s32_f32, arm64_neon_fcvtas, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtas_u32_f32, arm64_neon_fcvtau, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_n_f64_s64, arm64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_n_f64_u64, arm64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_n_s64_f64, arm64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_n_u64_f64, arm64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtmd_s64_f64, arm64_neon_fcvtms, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtmd_u64_f64, arm64_neon_fcvtmu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtms_s32_f32, arm64_neon_fcvtms, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtms_u32_f32, arm64_neon_fcvtmu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtnd_s64_f64, arm64_neon_fcvtns, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtnd_u64_f64, arm64_neon_fcvtnu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtns_s32_f32, arm64_neon_fcvtns, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtns_u32_f32, arm64_neon_fcvtnu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtpd_s64_f64, arm64_neon_fcvtps, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtpd_u64_f64, arm64_neon_fcvtpu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtps_s32_f32, arm64_neon_fcvtps, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtps_u32_f32, arm64_neon_fcvtpu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_n_f32_s32, arm64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_n_f32_u32, arm64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_n_s32_f32, arm64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_n_u32_f32, arm64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtxd_f32_f64, arm64_sisd_fcvtxn, 0),
+  NEONMAP1(vmaxnmv_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxnmvq_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxnmvq_f64, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxv_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxv_s32, arm64_neon_smaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxv_u32, arm64_neon_umaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_f64, arm64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_s32, arm64_neon_smaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_u32, arm64_neon_umaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vminnmv_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vminnmvq_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vminnmvq_f64, arm64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vminv_f32, arm64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminv_s32, arm64_neon_sminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminv_u32, arm64_neon_uminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_f32, arm64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_f64, arm64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_s32, arm64_neon_sminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_u32, arm64_neon_uminv, AddRetType | Add1ArgType),
+  NEONMAP1(vmull_p64, arm64_neon_pmull64, 0),
+  NEONMAP1(vmulxd_f64, arm64_neon_fmulx, Add1ArgType),
+  NEONMAP1(vmulxs_f32, arm64_neon_fmulx, Add1ArgType),
+  NEONMAP1(vpaddd_s64, arm64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vpaddd_u64, arm64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxnmqd_f64, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxnms_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxqd_f64, arm64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxs_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vpminnmqd_f64, arm64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vpminnms_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vpminqd_f64, arm64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmins_f32, arm64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vqabsb_s8, arm64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqabsd_s64, arm64_neon_sqabs, Add1ArgType),
+  NEONMAP1(vqabsh_s16, arm64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqabss_s32, arm64_neon_sqabs, Add1ArgType),
+  NEONMAP1(vqaddb_s8, arm64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqaddb_u8, arm64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqaddd_s64, arm64_neon_sqadd, Add1ArgType),
+  NEONMAP1(vqaddd_u64, arm64_neon_uqadd, Add1ArgType),
+  NEONMAP1(vqaddh_s16, arm64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqaddh_u16, arm64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqadds_s32, arm64_neon_sqadd, Add1ArgType),
+  NEONMAP1(vqadds_u32, arm64_neon_uqadd, Add1ArgType),
+  NEONMAP1(vqdmulhh_s16, arm64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqdmulhs_s32, arm64_neon_sqdmulh, Add1ArgType),
+  NEONMAP1(vqdmullh_s16, arm64_neon_sqdmull, VectorRet | Use128BitVectors),
+  NEONMAP1(vqdmulls_s32, arm64_neon_sqdmulls_scalar, 0),
+  NEONMAP1(vqmovnd_s64, arm64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
+  NEONMAP1(vqmovnd_u64, arm64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
+  NEONMAP1(vqmovnh_s16, arm64_neon_sqxtn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovnh_u16, arm64_neon_uqxtn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovns_s32, arm64_neon_sqxtn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovns_u32, arm64_neon_uqxtn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovund_s64, arm64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
+  NEONMAP1(vqmovunh_s16, arm64_neon_sqxtun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovuns_s32, arm64_neon_sqxtun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqnegb_s8, arm64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqnegd_s64, arm64_neon_sqneg, Add1ArgType),
+  NEONMAP1(vqnegh_s16, arm64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqnegs_s32, arm64_neon_sqneg, Add1ArgType),
+  NEONMAP1(vqrdmulhh_s16, arm64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrdmulhs_s32, arm64_neon_sqrdmulh, Add1ArgType),
+  NEONMAP1(vqrshlb_s8, arm64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrshlb_u8, arm64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrshld_s64, arm64_neon_sqrshl, Add1ArgType),
+  NEONMAP1(vqrshld_u64, arm64_neon_uqrshl, Add1ArgType),
+  NEONMAP1(vqrshlh_s16, arm64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrshlh_u16, arm64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrshls_s32, arm64_neon_sqrshl, Add1ArgType),
+  NEONMAP1(vqrshls_u32, arm64_neon_uqrshl, Add1ArgType),
+  NEONMAP1(vqrshrnd_n_s64, arm64_neon_sqrshrn, AddRetType),
+  NEONMAP1(vqrshrnd_n_u64, arm64_neon_uqrshrn, AddRetType),
+  NEONMAP1(vqrshrnh_n_s16, arm64_neon_sqrshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshrnh_n_u16, arm64_neon_uqrshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshrns_n_s32, arm64_neon_sqrshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshrns_n_u32, arm64_neon_uqrshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshrund_n_s64, arm64_neon_sqrshrun, AddRetType),
+  NEONMAP1(vqrshrunh_n_s16, arm64_neon_sqrshrun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshruns_n_s32, arm64_neon_sqrshrun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshlb_n_s8, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlb_n_u8, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlb_s8, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlb_u8, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshld_s64, arm64_neon_sqshl, Add1ArgType),
+  NEONMAP1(vqshld_u64, arm64_neon_uqshl, Add1ArgType),
+  NEONMAP1(vqshlh_n_s16, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlh_n_u16, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlh_s16, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlh_u16, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshls_n_s32, arm64_neon_sqshl, Add1ArgType),
+  NEONMAP1(vqshls_n_u32, arm64_neon_uqshl, Add1ArgType),
+  NEONMAP1(vqshls_s32, arm64_neon_sqshl, Add1ArgType),
+  NEONMAP1(vqshls_u32, arm64_neon_uqshl, Add1ArgType),
+  NEONMAP1(vqshlub_n_s8, arm64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshluh_n_s16, arm64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlus_n_s32, arm64_neon_sqshlu, Add1ArgType),
+  NEONMAP1(vqshrnd_n_s64, arm64_neon_sqshrn, AddRetType),
+  NEONMAP1(vqshrnd_n_u64, arm64_neon_uqshrn, AddRetType),
+  NEONMAP1(vqshrnh_n_s16, arm64_neon_sqshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshrnh_n_u16, arm64_neon_uqshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshrns_n_s32, arm64_neon_sqshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshrns_n_u32, arm64_neon_uqshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshrund_n_s64, arm64_neon_sqshrun, AddRetType),
+  NEONMAP1(vqshrunh_n_s16, arm64_neon_sqshrun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshruns_n_s32, arm64_neon_sqshrun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqsubb_s8, arm64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqsubb_u8, arm64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqsubd_s64, arm64_neon_sqsub, Add1ArgType),
+  NEONMAP1(vqsubd_u64, arm64_neon_uqsub, Add1ArgType),
+  NEONMAP1(vqsubh_s16, arm64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqsubh_u16, arm64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqsubs_s32, arm64_neon_sqsub, Add1ArgType),
+  NEONMAP1(vqsubs_u32, arm64_neon_uqsub, Add1ArgType),
+  NEONMAP1(vrecped_f64, arm64_neon_frecpe, Add1ArgType),
+  NEONMAP1(vrecpes_f32, arm64_neon_frecpe, Add1ArgType),
+  NEONMAP1(vrecpxd_f64, arm64_neon_frecpx, Add1ArgType),
+  NEONMAP1(vrecpxs_f32, arm64_neon_frecpx, Add1ArgType),
+  NEONMAP1(vrshld_s64, arm64_neon_srshl, Add1ArgType),
+  NEONMAP1(vrshld_u64, arm64_neon_urshl, Add1ArgType),
+  NEONMAP1(vrsqrted_f64, arm64_neon_frsqrte, Add1ArgType),
+  NEONMAP1(vrsqrtes_f32, arm64_neon_frsqrte, Add1ArgType),
+  NEONMAP1(vrsqrtsd_f64, arm64_neon_frsqrts, Add1ArgType),
+  NEONMAP1(vrsqrtss_f32, arm64_neon_frsqrts, Add1ArgType),
+  NEONMAP1(vsha1cq_u32, arm64_crypto_sha1c, 0),
+  NEONMAP1(vsha1h_u32, arm64_crypto_sha1h, 0),
+  NEONMAP1(vsha1mq_u32, arm64_crypto_sha1m, 0),
+  NEONMAP1(vsha1pq_u32, arm64_crypto_sha1p, 0),
+  NEONMAP1(vshld_s64, arm64_neon_sshl, Add1ArgType),
+  NEONMAP1(vshld_u64, arm64_neon_ushl, Add1ArgType),
+  NEONMAP1(vslid_n_s64, arm64_neon_vsli, Vectorize1ArgType),
+  NEONMAP1(vslid_n_u64, arm64_neon_vsli, Vectorize1ArgType),
+  NEONMAP1(vsqaddb_u8, arm64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vsqaddd_u64, arm64_neon_usqadd, Add1ArgType),
+  NEONMAP1(vsqaddh_u16, arm64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vsqadds_u32, arm64_neon_usqadd, Add1ArgType),
+  NEONMAP1(vsrid_n_s64, arm64_neon_vsri, Vectorize1ArgType),
+  NEONMAP1(vsrid_n_u64, arm64_neon_vsri, Vectorize1ArgType),
+  NEONMAP1(vuqaddb_s8, arm64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vuqaddd_s64, arm64_neon_suqadd, Add1ArgType),
+  NEONMAP1(vuqaddh_s16, arm64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vuqadds_s32, arm64_neon_suqadd, Add1ArgType),
+};
+
+#undef NEONMAP0
+#undef NEONMAP1
+#undef NEONMAP2
+
+static bool NEONSIMDIntrinsicsProvenSorted = false;
+static bool AArch64SISDIntrinsicInfoProvenSorted = false;
+
+static bool ARM64SIMDIntrinsicsProvenSorted = false;
+static bool ARM64SISDIntrinsicsProvenSorted = false;
+
+
+static const NeonIntrinsicInfo *
+findNeonIntrinsicInMap(llvm::ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
+                       unsigned BuiltinID, bool &MapProvenSorted) {
+
+#ifndef NDEBUG
+  if (!MapProvenSorted) {
+    // FIXME: use std::is_sorted once C++11 is allowed
+    for (unsigned i = 0; i < IntrinsicMap.size() - 1; ++i)
+      assert(IntrinsicMap[i].BuiltinID <= IntrinsicMap[i + 1].BuiltinID);
+    MapProvenSorted = true;
+  }
+#endif
+
+  const NeonIntrinsicInfo *Builtin =
+      std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
+
+  if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
+    return Builtin;
+
+  return 0;
+}
+
+Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
+                                                   unsigned Modifier,
+                                                   llvm::Type *ArgType,
+                                                   const CallExpr *E) {
+  int VectorSize = 0;
+  if (Modifier & Use64BitVectors)
+    VectorSize = 64;
+  else if (Modifier & Use128BitVectors)
+    VectorSize = 128;
+
+  // Return type.
+  SmallVector<llvm::Type *, 3> Tys;
+  if (Modifier & AddRetType) {
+    llvm::Type *Ty = ConvertType(E->getCallReturnType());
+    if (Modifier & VectorizeRetType)
+      Ty = llvm::VectorType::get(
+          Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
+
+    Tys.push_back(Ty);
+  }
+
+  // Arguments.
+  if (Modifier & VectorizeArgTypes) {
+    int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
+    ArgType = llvm::VectorType::get(ArgType, Elts);
+  }
+
+  if (Modifier & (Add1ArgType | Add2ArgTypes))
+    Tys.push_back(ArgType);
+
+  if (Modifier & Add2ArgTypes)
+    Tys.push_back(ArgType);
+
+  if (Modifier & InventFloatType)
+    Tys.push_back(FloatTy);
+
+  return CGM.getIntrinsic(IntrinsicID, Tys);
+}
+
+static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
+                                            const NeonIntrinsicInfo &SISDInfo,
+                                            SmallVectorImpl<Value *> &Ops,
+                                            const CallExpr *E) {
+  unsigned BuiltinID = SISDInfo.BuiltinID;
+  unsigned int Int = SISDInfo.LLVMIntrinsic;
+  unsigned Modifier = SISDInfo.TypeModifier;
+  const char *s = SISDInfo.NameHint;
+
+  switch (BuiltinID) {
+  case NEON::BI__builtin_neon_vcled_s64:
+  case NEON::BI__builtin_neon_vcled_u64:
+  case NEON::BI__builtin_neon_vcles_f32:
+  case NEON::BI__builtin_neon_vcled_f64:
+  case NEON::BI__builtin_neon_vcltd_s64:
+  case NEON::BI__builtin_neon_vcltd_u64:
+  case NEON::BI__builtin_neon_vclts_f32:
+  case NEON::BI__builtin_neon_vcltd_f64:
+  case NEON::BI__builtin_neon_vcales_f32:
+  case NEON::BI__builtin_neon_vcaled_f64:
+  case NEON::BI__builtin_neon_vcalts_f32:
+  case NEON::BI__builtin_neon_vcaltd_f64:
+    // Only one direction of comparisons actually exist, cmle is actually a cmge
+    // with swapped operands. The table gives us the right intrinsic but we
+    // still need to do the swap.
+    std::swap(Ops[0], Ops[1]);
+    break;
+  }
+
+  assert(Int && "Generic code assumes a valid intrinsic");
+
+  // Determine the type(s) of this overloaded AArch64 intrinsic.
+  const Expr *Arg = E->getArg(0);
+  llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
+  Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
+
+  int j = 0;
+  ConstantInt *C0 = ConstantInt::get(CGF.Int32Ty, 0);
+  for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
+       ai != ae; ++ai, ++j) {
+    llvm::Type *ArgTy = ai->getType();
+    if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
+             ArgTy->getPrimitiveSizeInBits())
+      continue;
+
+    assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
+    // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
+    // it before inserting.
+    Ops[j] =
+        CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
+    Ops[j] =
+        CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
+  }
+
+  Value *Result = CGF.EmitNeonCall(F, Ops, s);
+  llvm::Type *ResultType = CGF.ConvertType(E->getType());
+  if (ResultType->getPrimitiveSizeInBits() <
+      Result->getType()->getPrimitiveSizeInBits())
+    return CGF.Builder.CreateExtractElement(Result, C0);
+
+  return CGF.Builder.CreateBitCast(Result, ResultType, s);
+}
+
 static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
-                                           unsigned BuiltinID,
+                                           const NeonIntrinsicInfo &SISDInfo,
                                            const CallExpr *E) {
-  unsigned int Int = 0;
-  // Scalar result generated across vectors
-  bool AcrossVec = false;
-  // Extend element of one-element vector
-  bool ExtendEle = false;
-  bool OverloadInt = false;
-  bool OverloadCmpInt = false;
-  bool IsFpCmpZInt = false;
-  bool OverloadCvtInt = false;
-  bool OverloadWideInt = false;
-  bool OverloadNarrowInt = false;
-  const char *s = NULL;
+  unsigned BuiltinID = SISDInfo.BuiltinID;
+  unsigned int Int = SISDInfo.LLVMIntrinsic;
+  const char *s = SISDInfo.NameHint;
 
   SmallVector<Value *, 4> Ops;
   for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
@@ -1774,20 +2884,20 @@
   // argument that specifies the vector type, need to handle each case.
   switch (BuiltinID) {
   default: break;
-  case AArch64::BI__builtin_neon_vdups_lane_f32:
-  case AArch64::BI__builtin_neon_vdupd_lane_f64:
-  case AArch64::BI__builtin_neon_vdups_laneq_f32:
-  case AArch64::BI__builtin_neon_vdupd_laneq_f64: {
+  case NEON::BI__builtin_neon_vdups_lane_f32:
+  case NEON::BI__builtin_neon_vdupd_lane_f64:
+  case NEON::BI__builtin_neon_vdups_laneq_f32:
+  case NEON::BI__builtin_neon_vdupd_laneq_f64: {
     return CGF.Builder.CreateExtractElement(Ops[0], Ops[1], "vdup_lane");
   }
-  case AArch64::BI__builtin_neon_vdupb_lane_i8:
-  case AArch64::BI__builtin_neon_vduph_lane_i16:
-  case AArch64::BI__builtin_neon_vdups_lane_i32:
-  case AArch64::BI__builtin_neon_vdupd_lane_i64:
-  case AArch64::BI__builtin_neon_vdupb_laneq_i8:
-  case AArch64::BI__builtin_neon_vduph_laneq_i16:
-  case AArch64::BI__builtin_neon_vdups_laneq_i32:
-  case AArch64::BI__builtin_neon_vdupd_laneq_i64: {
+  case NEON::BI__builtin_neon_vdupb_lane_i8:
+  case NEON::BI__builtin_neon_vduph_lane_i16:
+  case NEON::BI__builtin_neon_vdups_lane_i32:
+  case NEON::BI__builtin_neon_vdupd_lane_i64:
+  case NEON::BI__builtin_neon_vdupb_laneq_i8:
+  case NEON::BI__builtin_neon_vduph_laneq_i16:
+  case NEON::BI__builtin_neon_vdups_laneq_i32:
+  case NEON::BI__builtin_neon_vdupd_laneq_i64: {
     // The backend treats Neon scalar types as v1ix types
     // So we want to dup lane from any vector to v1ix vector
     // with shufflevector
@@ -1799,19 +2909,19 @@
     // scalar type expected by the builtin
     return CGF.Builder.CreateBitCast(Result, Ty, s);
   }
-  case AArch64::BI__builtin_neon_vqdmlalh_lane_s16 :
-  case AArch64::BI__builtin_neon_vqdmlalh_laneq_s16 :
-  case AArch64::BI__builtin_neon_vqdmlals_lane_s32 :
-  case AArch64::BI__builtin_neon_vqdmlals_laneq_s32 :
-  case AArch64::BI__builtin_neon_vqdmlslh_lane_s16 :
-  case AArch64::BI__builtin_neon_vqdmlslh_laneq_s16 :
-  case AArch64::BI__builtin_neon_vqdmlsls_lane_s32 :
-  case AArch64::BI__builtin_neon_vqdmlsls_laneq_s32 : {
+  case NEON::BI__builtin_neon_vqdmlalh_lane_s16 :
+  case NEON::BI__builtin_neon_vqdmlalh_laneq_s16 :
+  case NEON::BI__builtin_neon_vqdmlals_lane_s32 :
+  case NEON::BI__builtin_neon_vqdmlals_laneq_s32 :
+  case NEON::BI__builtin_neon_vqdmlslh_lane_s16 :
+  case NEON::BI__builtin_neon_vqdmlslh_laneq_s16 :
+  case NEON::BI__builtin_neon_vqdmlsls_lane_s32 :
+  case NEON::BI__builtin_neon_vqdmlsls_laneq_s32 : {
     Int = Intrinsic::arm_neon_vqadds;
-    if (BuiltinID == AArch64::BI__builtin_neon_vqdmlslh_lane_s16 ||
-        BuiltinID == AArch64::BI__builtin_neon_vqdmlslh_laneq_s16 ||
-        BuiltinID == AArch64::BI__builtin_neon_vqdmlsls_lane_s32 ||
-        BuiltinID == AArch64::BI__builtin_neon_vqdmlsls_laneq_s32) {
+    if (BuiltinID == NEON::BI__builtin_neon_vqdmlslh_lane_s16 ||
+        BuiltinID == NEON::BI__builtin_neon_vqdmlslh_laneq_s16 ||
+        BuiltinID == NEON::BI__builtin_neon_vqdmlsls_lane_s32 ||
+        BuiltinID == NEON::BI__builtin_neon_vqdmlsls_laneq_s32) {
       Int = Intrinsic::arm_neon_vqsubs;
     }
     // create vqdmull call with b * c[i]
@@ -1839,23 +2949,23 @@
     Value *AddRes = CGF.Builder.CreateCall2(F, AddOps[0], AddOps[1]);
     return CGF.Builder.CreateBitCast(AddRes, Ty);
   }
-  case AArch64::BI__builtin_neon_vfmas_lane_f32:
-  case AArch64::BI__builtin_neon_vfmas_laneq_f32:
-  case AArch64::BI__builtin_neon_vfmad_lane_f64:
-  case AArch64::BI__builtin_neon_vfmad_laneq_f64: {
+  case NEON::BI__builtin_neon_vfmas_lane_f32:
+  case NEON::BI__builtin_neon_vfmas_laneq_f32:
+  case NEON::BI__builtin_neon_vfmad_lane_f64:
+  case NEON::BI__builtin_neon_vfmad_laneq_f64: {
     llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
     Value *F = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
     Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
     return CGF.Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
   }
   // Scalar Floating-point Multiply Extended
-  case AArch64::BI__builtin_neon_vmulxs_f32:
-  case AArch64::BI__builtin_neon_vmulxd_f64: {
+  case NEON::BI__builtin_neon_vmulxs_f32:
+  case NEON::BI__builtin_neon_vmulxd_f64: {
     Int = Intrinsic::aarch64_neon_vmulx;
     llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
     return CGF.EmitNeonCall(CGF.CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
   }
-  case AArch64::BI__builtin_neon_vmul_n_f64: {
+  case NEON::BI__builtin_neon_vmul_n_f64: {
     // v1f64 vmul_n_f64  should be mapped to Neon scalar mul lane
     llvm::Type *VTy = GetNeonType(&CGF,
       NeonTypeFlags(NeonTypeFlags::Float64, false, false));
@@ -1865,788 +2975,523 @@
     Value *Result = CGF.Builder.CreateFMul(Ops[0], Ops[1]);
     return CGF.Builder.CreateBitCast(Result, VTy);
   }
-  case AArch64::BI__builtin_neon_vget_lane_i8:
-  case AArch64::BI__builtin_neon_vget_lane_i16:
-  case AArch64::BI__builtin_neon_vget_lane_i32:
-  case AArch64::BI__builtin_neon_vget_lane_i64:
-  case AArch64::BI__builtin_neon_vget_lane_f32:
-  case AArch64::BI__builtin_neon_vget_lane_f64:
-  case AArch64::BI__builtin_neon_vgetq_lane_i8:
-  case AArch64::BI__builtin_neon_vgetq_lane_i16:
-  case AArch64::BI__builtin_neon_vgetq_lane_i32:
-  case AArch64::BI__builtin_neon_vgetq_lane_i64:
-  case AArch64::BI__builtin_neon_vgetq_lane_f32:
-  case AArch64::BI__builtin_neon_vgetq_lane_f64:
-    return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vget_lane_i8, E);
-  case AArch64::BI__builtin_neon_vset_lane_i8:
-  case AArch64::BI__builtin_neon_vset_lane_i16:
-  case AArch64::BI__builtin_neon_vset_lane_i32:
-  case AArch64::BI__builtin_neon_vset_lane_i64:
-  case AArch64::BI__builtin_neon_vset_lane_f32:
-  case AArch64::BI__builtin_neon_vset_lane_f64:
-  case AArch64::BI__builtin_neon_vsetq_lane_i8:
-  case AArch64::BI__builtin_neon_vsetq_lane_i16:
-  case AArch64::BI__builtin_neon_vsetq_lane_i32:
-  case AArch64::BI__builtin_neon_vsetq_lane_i64:
-  case AArch64::BI__builtin_neon_vsetq_lane_f32:
-  case AArch64::BI__builtin_neon_vsetq_lane_f64:
-    return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vset_lane_i8, E);
-  // Crypto
-  case AArch64::BI__builtin_neon_vsha1h_u32:
-    Int = Intrinsic::arm_neon_sha1h;
-    s = "sha1h"; OverloadInt = true; break;
-  case AArch64::BI__builtin_neon_vsha1cq_u32:
-    Int = Intrinsic::aarch64_neon_sha1c;
-    s = "sha1c"; break;
-  case AArch64::BI__builtin_neon_vsha1pq_u32:
-    Int = Intrinsic::aarch64_neon_sha1p;
-    s = "sha1p"; break;
-  case AArch64::BI__builtin_neon_vsha1mq_u32:
-    Int = Intrinsic::aarch64_neon_sha1m;
-    s = "sha1m"; break;
-  // Scalar Add
-  case AArch64::BI__builtin_neon_vaddd_s64:
-    Int = Intrinsic::aarch64_neon_vaddds;
-    s = "vaddds"; break;
-  case AArch64::BI__builtin_neon_vaddd_u64:
-    Int = Intrinsic::aarch64_neon_vadddu;
-    s = "vadddu"; break;
-  // Scalar Sub
-  case AArch64::BI__builtin_neon_vsubd_s64:
-    Int = Intrinsic::aarch64_neon_vsubds;
-    s = "vsubds"; break;
-  case AArch64::BI__builtin_neon_vsubd_u64:
-    Int = Intrinsic::aarch64_neon_vsubdu;
-    s = "vsubdu"; break;
-  // Scalar Saturating Add
-  case AArch64::BI__builtin_neon_vqaddb_s8:
-  case AArch64::BI__builtin_neon_vqaddh_s16:
-  case AArch64::BI__builtin_neon_vqadds_s32:
-  case AArch64::BI__builtin_neon_vqaddd_s64:
-    Int = Intrinsic::arm_neon_vqadds;
-    s = "vqadds"; OverloadInt = true; break;
-  case AArch64::BI__builtin_neon_vqaddb_u8:
-  case AArch64::BI__builtin_neon_vqaddh_u16:
-  case AArch64::BI__builtin_neon_vqadds_u32:
-  case AArch64::BI__builtin_neon_vqaddd_u64:
-    Int = Intrinsic::arm_neon_vqaddu;
-    s = "vqaddu"; OverloadInt = true; break;
-  // Scalar Saturating Sub
-  case AArch64::BI__builtin_neon_vqsubb_s8:
-  case AArch64::BI__builtin_neon_vqsubh_s16:
-  case AArch64::BI__builtin_neon_vqsubs_s32:
-  case AArch64::BI__builtin_neon_vqsubd_s64:
-    Int = Intrinsic::arm_neon_vqsubs;
-    s = "vqsubs"; OverloadInt = true; break;
-  case AArch64::BI__builtin_neon_vqsubb_u8:
-  case AArch64::BI__builtin_neon_vqsubh_u16:
-  case AArch64::BI__builtin_neon_vqsubs_u32:
-  case AArch64::BI__builtin_neon_vqsubd_u64:
-    Int = Intrinsic::arm_neon_vqsubu;
-    s = "vqsubu"; OverloadInt = true; break;
-  // Scalar Shift Left
-  case AArch64::BI__builtin_neon_vshld_s64:
-    Int = Intrinsic::aarch64_neon_vshlds;
-    s = "vshlds"; break;
-  case AArch64::BI__builtin_neon_vshld_u64:
-    Int = Intrinsic::aarch64_neon_vshldu;
-    s = "vshldu"; break;
-  // Scalar Saturating Shift Left
-  case AArch64::BI__builtin_neon_vqshlb_s8:
-  case AArch64::BI__builtin_neon_vqshlh_s16:
-  case AArch64::BI__builtin_neon_vqshls_s32:
-  case AArch64::BI__builtin_neon_vqshld_s64:
-    Int = Intrinsic::aarch64_neon_vqshls;
-    s = "vqshls"; OverloadInt = true; break;
-  case AArch64::BI__builtin_neon_vqshlb_u8:
-  case AArch64::BI__builtin_neon_vqshlh_u16:
-  case AArch64::BI__builtin_neon_vqshls_u32:
-  case AArch64::BI__builtin_neon_vqshld_u64:
-    Int = Intrinsic::aarch64_neon_vqshlu;
-    s = "vqshlu"; OverloadInt = true; break;
-  // Scalar Rouding Shift Left
-  case AArch64::BI__builtin_neon_vrshld_s64:
-    Int = Intrinsic::aarch64_neon_vrshlds;
-    s = "vrshlds"; break;
-  case AArch64::BI__builtin_neon_vrshld_u64:
-    Int = Intrinsic::aarch64_neon_vrshldu;
-    s = "vrshldu"; break;
-  // Scalar Saturating Rouding Shift Left
-  case AArch64::BI__builtin_neon_vqrshlb_s8:
-  case AArch64::BI__builtin_neon_vqrshlh_s16:
-  case AArch64::BI__builtin_neon_vqrshls_s32:
-  case AArch64::BI__builtin_neon_vqrshld_s64:
-    Int = Intrinsic::aarch64_neon_vqrshls;
-    s = "vqrshls"; OverloadInt = true; break;
-  case AArch64::BI__builtin_neon_vqrshlb_u8:
-  case AArch64::BI__builtin_neon_vqrshlh_u16:
-  case AArch64::BI__builtin_neon_vqrshls_u32:
-  case AArch64::BI__builtin_neon_vqrshld_u64:
-    Int = Intrinsic::aarch64_neon_vqrshlu;
-    s = "vqrshlu"; OverloadInt = true; break;
-  // Scalar Reduce Pairwise Add
-  case AArch64::BI__builtin_neon_vpaddd_s64:
-  case AArch64::BI__builtin_neon_vpaddd_u64:
-    Int = Intrinsic::aarch64_neon_vpadd; s = "vpadd";
-    break;
-  case AArch64::BI__builtin_neon_vpadds_f32:
-    Int = Intrinsic::aarch64_neon_vpfadd; s = "vpfadd";
-    break;
-  case AArch64::BI__builtin_neon_vpaddd_f64:
-    Int = Intrinsic::aarch64_neon_vpfaddq; s = "vpfaddq";
-    break;
-  // Scalar Reduce Pairwise Floating Point Max
-  case AArch64::BI__builtin_neon_vpmaxs_f32:
-    Int = Intrinsic::aarch64_neon_vpmax; s = "vpmax";
-    break;
-  case AArch64::BI__builtin_neon_vpmaxqd_f64:
-    Int = Intrinsic::aarch64_neon_vpmaxq; s = "vpmaxq";
-    break;
-  // Scalar Reduce Pairwise Floating Point Min
-  case AArch64::BI__builtin_neon_vpmins_f32:
-    Int = Intrinsic::aarch64_neon_vpmin; s = "vpmin";
-    break;
-  case AArch64::BI__builtin_neon_vpminqd_f64:
-    Int = Intrinsic::aarch64_neon_vpminq; s = "vpminq";
-    break;
-  // Scalar Reduce Pairwise Floating Point Maxnm
-  case AArch64::BI__builtin_neon_vpmaxnms_f32:
-    Int = Intrinsic::aarch64_neon_vpfmaxnm; s = "vpfmaxnm";
-    break;
-  case AArch64::BI__builtin_neon_vpmaxnmqd_f64:
-    Int = Intrinsic::aarch64_neon_vpfmaxnmq; s = "vpfmaxnmq";
-    break;
-  // Scalar Reduce Pairwise Floating Point Minnm
-  case AArch64::BI__builtin_neon_vpminnms_f32:
-    Int = Intrinsic::aarch64_neon_vpfminnm; s = "vpfminnm";
-    break;
-  case AArch64::BI__builtin_neon_vpminnmqd_f64:
-    Int = Intrinsic::aarch64_neon_vpfminnmq; s = "vpfminnmq";
-    break;
-  // The followings are intrinsics with scalar results generated AcrossVec vectors
-  case AArch64::BI__builtin_neon_vaddlv_s8:
-  case AArch64::BI__builtin_neon_vaddlv_s16:
-  case AArch64::BI__builtin_neon_vaddlvq_s8:
-  case AArch64::BI__builtin_neon_vaddlvq_s16:
-  case AArch64::BI__builtin_neon_vaddlvq_s32:
-    Int = Intrinsic::aarch64_neon_saddlv;
-    AcrossVec = true; ExtendEle = true; s = "saddlv"; break;
-  case AArch64::BI__builtin_neon_vaddlv_u8:
-  case AArch64::BI__builtin_neon_vaddlv_u16:
-  case AArch64::BI__builtin_neon_vaddlvq_u8:
-  case AArch64::BI__builtin_neon_vaddlvq_u16:
-  case AArch64::BI__builtin_neon_vaddlvq_u32:
-    Int = Intrinsic::aarch64_neon_uaddlv;
-    AcrossVec = true; ExtendEle = true; s = "uaddlv"; break;
-  case AArch64::BI__builtin_neon_vmaxv_s8:
-  case AArch64::BI__builtin_neon_vmaxv_s16:
-  case AArch64::BI__builtin_neon_vmaxvq_s8:
-  case AArch64::BI__builtin_neon_vmaxvq_s16:
-  case AArch64::BI__builtin_neon_vmaxvq_s32:
-    Int = Intrinsic::aarch64_neon_smaxv;
-    AcrossVec = true; ExtendEle = false; s = "smaxv"; break;
-  case AArch64::BI__builtin_neon_vmaxv_u8:
-  case AArch64::BI__builtin_neon_vmaxv_u16:
-  case AArch64::BI__builtin_neon_vmaxvq_u8:
-  case AArch64::BI__builtin_neon_vmaxvq_u16:
-  case AArch64::BI__builtin_neon_vmaxvq_u32:
-    Int = Intrinsic::aarch64_neon_umaxv;
-    AcrossVec = true; ExtendEle = false; s = "umaxv"; break;
-  case AArch64::BI__builtin_neon_vminv_s8:
-  case AArch64::BI__builtin_neon_vminv_s16:
-  case AArch64::BI__builtin_neon_vminvq_s8:
-  case AArch64::BI__builtin_neon_vminvq_s16:
-  case AArch64::BI__builtin_neon_vminvq_s32:
-    Int = Intrinsic::aarch64_neon_sminv;
-    AcrossVec = true; ExtendEle = false; s = "sminv"; break;
-  case AArch64::BI__builtin_neon_vminv_u8:
-  case AArch64::BI__builtin_neon_vminv_u16:
-  case AArch64::BI__builtin_neon_vminvq_u8:
-  case AArch64::BI__builtin_neon_vminvq_u16:
-  case AArch64::BI__builtin_neon_vminvq_u32:
-    Int = Intrinsic::aarch64_neon_uminv;
-    AcrossVec = true; ExtendEle = false; s = "uminv"; break;
-  case AArch64::BI__builtin_neon_vaddv_s8:
-  case AArch64::BI__builtin_neon_vaddv_s16:
-  case AArch64::BI__builtin_neon_vaddvq_s8:
-  case AArch64::BI__builtin_neon_vaddvq_s16:
-  case AArch64::BI__builtin_neon_vaddvq_s32:
-  case AArch64::BI__builtin_neon_vaddvq_s64:
-  case AArch64::BI__builtin_neon_vaddv_u8:
-  case AArch64::BI__builtin_neon_vaddv_u16:
-  case AArch64::BI__builtin_neon_vaddvq_u8:
-  case AArch64::BI__builtin_neon_vaddvq_u16:
-  case AArch64::BI__builtin_neon_vaddvq_u32:
-  case AArch64::BI__builtin_neon_vaddvq_u64:
-  case AArch64::BI__builtin_neon_vaddv_f32:
-  case AArch64::BI__builtin_neon_vaddvq_f32:
-  case AArch64::BI__builtin_neon_vaddvq_f64:
-    Int = Intrinsic::aarch64_neon_vaddv;
-    AcrossVec = true; ExtendEle = false; s = "vaddv"; break;      
-  case AArch64::BI__builtin_neon_vmaxv_f32:
-  case AArch64::BI__builtin_neon_vmaxvq_f32:
-  case AArch64::BI__builtin_neon_vmaxvq_f64:
-    Int = Intrinsic::aarch64_neon_vmaxv;
-    AcrossVec = true; ExtendEle = false; s = "vmaxv"; break;
-  case AArch64::BI__builtin_neon_vminv_f32:
-  case AArch64::BI__builtin_neon_vminvq_f32:
-  case AArch64::BI__builtin_neon_vminvq_f64:
-    Int = Intrinsic::aarch64_neon_vminv;
-    AcrossVec = true; ExtendEle = false; s = "vminv"; break;
-  case AArch64::BI__builtin_neon_vmaxnmv_f32:
-  case AArch64::BI__builtin_neon_vmaxnmvq_f32:
-  case AArch64::BI__builtin_neon_vmaxnmvq_f64:
-    Int = Intrinsic::aarch64_neon_vmaxnmv;
-    AcrossVec = true; ExtendEle = false; s = "vmaxnmv"; break;
-  case AArch64::BI__builtin_neon_vminnmv_f32:
-  case AArch64::BI__builtin_neon_vminnmvq_f32:
-  case AArch64::BI__builtin_neon_vminnmvq_f64:
-    Int = Intrinsic::aarch64_neon_vminnmv;
-    AcrossVec = true; ExtendEle = false; s = "vminnmv"; break;
-  // Scalar Integer Saturating Doubling Multiply Half High
-  case AArch64::BI__builtin_neon_vqdmulhh_s16:
-  case AArch64::BI__builtin_neon_vqdmulhs_s32:
-    Int = Intrinsic::arm_neon_vqdmulh;
-    s = "vqdmulh"; OverloadInt = true; break;
-  // Scalar Integer Saturating Rounding Doubling Multiply Half High
-  case AArch64::BI__builtin_neon_vqrdmulhh_s16:
-  case AArch64::BI__builtin_neon_vqrdmulhs_s32:
-    Int = Intrinsic::arm_neon_vqrdmulh;
-    s = "vqrdmulh"; OverloadInt = true; break;
-  // Scalar Floating-point Reciprocal Step and
-  case AArch64::BI__builtin_neon_vrecpss_f32:
-  case AArch64::BI__builtin_neon_vrecpsd_f64:
-    Int = Intrinsic::arm_neon_vrecps;
-    s = "vrecps"; OverloadInt = true; break;
-  // Scalar Floating-point Reciprocal Square Root Step
-  case AArch64::BI__builtin_neon_vrsqrtss_f32:
-  case AArch64::BI__builtin_neon_vrsqrtsd_f64:
-    Int = Intrinsic::arm_neon_vrsqrts;
-    s = "vrsqrts"; OverloadInt = true; break;
-  // Scalar Signed Integer Convert To Floating-point
-  case AArch64::BI__builtin_neon_vcvts_f32_s32:
-    Int = Intrinsic::aarch64_neon_vcvtf32_s32,
-    s = "vcvtf"; OverloadInt = false; break;
-  case AArch64::BI__builtin_neon_vcvtd_f64_s64:
-    Int = Intrinsic::aarch64_neon_vcvtf64_s64,
-    s = "vcvtf"; OverloadInt = false; break;
-  // Scalar Unsigned Integer Convert To Floating-point
-  case AArch64::BI__builtin_neon_vcvts_f32_u32:
-    Int = Intrinsic::aarch64_neon_vcvtf32_u32,
-    s = "vcvtf"; OverloadInt = false; break;
-  case AArch64::BI__builtin_neon_vcvtd_f64_u64:
-    Int = Intrinsic::aarch64_neon_vcvtf64_u64,
-    s = "vcvtf"; OverloadInt = false; break;
-  // Scalar Floating-point Converts
-  case AArch64::BI__builtin_neon_vcvtxd_f32_f64:
-    Int = Intrinsic::aarch64_neon_fcvtxn;
-    s = "vcvtxn"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvtas_s32_f32:
-  case AArch64::BI__builtin_neon_vcvtad_s64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtas;
-    s = "vcvtas"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvtas_u32_f32:
-  case AArch64::BI__builtin_neon_vcvtad_u64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtau;
-    s = "vcvtau"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvtms_s32_f32:
-  case AArch64::BI__builtin_neon_vcvtmd_s64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtms;
-    s = "vcvtms"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvtms_u32_f32:
-  case AArch64::BI__builtin_neon_vcvtmd_u64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtmu;
-    s = "vcvtmu"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvtns_s32_f32:
-  case AArch64::BI__builtin_neon_vcvtnd_s64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtns;
-    s = "vcvtns"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvtns_u32_f32:
-  case AArch64::BI__builtin_neon_vcvtnd_u64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtnu;
-    s = "vcvtnu"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvtps_s32_f32:
-  case AArch64::BI__builtin_neon_vcvtpd_s64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtps;
-    s = "vcvtps"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvtps_u32_f32:
-  case AArch64::BI__builtin_neon_vcvtpd_u64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtpu;
-    s = "vcvtpu"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvts_s32_f32:
-  case AArch64::BI__builtin_neon_vcvtd_s64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtzs;
-    s = "vcvtzs"; OverloadCvtInt = true; break;
-  case AArch64::BI__builtin_neon_vcvts_u32_f32:
-  case AArch64::BI__builtin_neon_vcvtd_u64_f64:
-    Int = Intrinsic::aarch64_neon_fcvtzu;
-    s = "vcvtzu"; OverloadCvtInt = true; break;
-  // Scalar Floating-point Reciprocal Estimate
-  case AArch64::BI__builtin_neon_vrecpes_f32:
-  case AArch64::BI__builtin_neon_vrecped_f64:
-    Int = Intrinsic::arm_neon_vrecpe;
-    s = "vrecpe"; OverloadInt = true; break;
-  // Scalar Floating-point Reciprocal Exponent
-  case AArch64::BI__builtin_neon_vrecpxs_f32:
-  case AArch64::BI__builtin_neon_vrecpxd_f64:
-    Int = Intrinsic::aarch64_neon_vrecpx;
-    s = "vrecpx"; OverloadInt = true; break;
-  // Scalar Floating-point Reciprocal Square Root Estimate
-  case AArch64::BI__builtin_neon_vrsqrtes_f32:
-  case AArch64::BI__builtin_neon_vrsqrted_f64:
-    Int = Intrinsic::arm_neon_vrsqrte;
-    s = "vrsqrte"; OverloadInt = true; break;
-  // Scalar Compare Equal
-  case AArch64::BI__builtin_neon_vceqd_s64:
-  case AArch64::BI__builtin_neon_vceqd_u64:
-    Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
-    OverloadCmpInt = true; break;
-  // Scalar Compare Equal To Zero
-  case AArch64::BI__builtin_neon_vceqzd_s64:
-  case AArch64::BI__builtin_neon_vceqzd_u64:
-    Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
+  case NEON::BI__builtin_neon_vget_lane_i8:
+  case NEON::BI__builtin_neon_vget_lane_i16:
+  case NEON::BI__builtin_neon_vget_lane_i32:
+  case NEON::BI__builtin_neon_vget_lane_i64:
+  case NEON::BI__builtin_neon_vget_lane_f32:
+  case NEON::BI__builtin_neon_vget_lane_f64:
+  case NEON::BI__builtin_neon_vgetq_lane_i8:
+  case NEON::BI__builtin_neon_vgetq_lane_i16:
+  case NEON::BI__builtin_neon_vgetq_lane_i32:
+  case NEON::BI__builtin_neon_vgetq_lane_i64:
+  case NEON::BI__builtin_neon_vgetq_lane_f32:
+  case NEON::BI__builtin_neon_vgetq_lane_f64:
+    return CGF.EmitARMBuiltinExpr(NEON::BI__builtin_neon_vget_lane_i8, E);
+  case NEON::BI__builtin_neon_vset_lane_i8:
+  case NEON::BI__builtin_neon_vset_lane_i16:
+  case NEON::BI__builtin_neon_vset_lane_i32:
+  case NEON::BI__builtin_neon_vset_lane_i64:
+  case NEON::BI__builtin_neon_vset_lane_f32:
+  case NEON::BI__builtin_neon_vset_lane_f64:
+  case NEON::BI__builtin_neon_vsetq_lane_i8:
+  case NEON::BI__builtin_neon_vsetq_lane_i16:
+  case NEON::BI__builtin_neon_vsetq_lane_i32:
+  case NEON::BI__builtin_neon_vsetq_lane_i64:
+  case NEON::BI__builtin_neon_vsetq_lane_f32:
+  case NEON::BI__builtin_neon_vsetq_lane_f64:
+    return CGF.EmitARMBuiltinExpr(NEON::BI__builtin_neon_vset_lane_i8, E);
+
+  case NEON::BI__builtin_neon_vceqzd_s64:
+  case NEON::BI__builtin_neon_vceqzd_u64:
+  case NEON::BI__builtin_neon_vcgezd_s64:
+  case NEON::BI__builtin_neon_vcgtzd_s64:
+  case NEON::BI__builtin_neon_vclezd_s64:
+  case NEON::BI__builtin_neon_vcltzd_s64:
     // Add implicit zero operand.
     Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
-    OverloadCmpInt = true; break;
-  // Scalar Compare Greater Than or Equal
-  case AArch64::BI__builtin_neon_vcged_s64:
-    Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
-    OverloadCmpInt = true; break;
-  case AArch64::BI__builtin_neon_vcged_u64:
-    Int = Intrinsic::aarch64_neon_vchs; s = "vcge";
-    OverloadCmpInt = true; break;
-  // Scalar Compare Greater Than or Equal To Zero
-  case AArch64::BI__builtin_neon_vcgezd_s64:
-    Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
-    // Add implicit zero operand.
-    Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
-    OverloadCmpInt = true; break;
-  // Scalar Compare Greater Than
-  case AArch64::BI__builtin_neon_vcgtd_s64:
-    Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
-    OverloadCmpInt = true; break;
-  case AArch64::BI__builtin_neon_vcgtd_u64:
-    Int = Intrinsic::aarch64_neon_vchi; s = "vcgt";
-    OverloadCmpInt = true; break;
-  // Scalar Compare Greater Than Zero
-  case AArch64::BI__builtin_neon_vcgtzd_s64:
-    Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
-    // Add implicit zero operand.
-    Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
-    OverloadCmpInt = true; break;
-  // Scalar Compare Less Than or Equal
-  case AArch64::BI__builtin_neon_vcled_s64:
-    Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
-    OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
-  case AArch64::BI__builtin_neon_vcled_u64:
-    Int = Intrinsic::aarch64_neon_vchs; s = "vchs";
-    OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
-  // Scalar Compare Less Than or Equal To Zero
-  case AArch64::BI__builtin_neon_vclezd_s64:
-    Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
-    // Add implicit zero operand.
-    Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
-    OverloadCmpInt = true; break;
-  // Scalar Compare Less Than
-  case AArch64::BI__builtin_neon_vcltd_s64:
-    Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
-    OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
-  case AArch64::BI__builtin_neon_vcltd_u64:
-    Int = Intrinsic::aarch64_neon_vchi; s = "vchi";
-    OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
-  // Scalar Compare Less Than Zero
-  case AArch64::BI__builtin_neon_vcltzd_s64:
-    Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
-    // Add implicit zero operand.
-    Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Equal
-  case AArch64::BI__builtin_neon_vceqs_f32:
-  case AArch64::BI__builtin_neon_vceqd_f64:
-    Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Equal To Zero
-  case AArch64::BI__builtin_neon_vceqzs_f32:
-  case AArch64::BI__builtin_neon_vceqzd_f64:
-    Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
+    break;
+  case NEON::BI__builtin_neon_vceqzs_f32:
+  case NEON::BI__builtin_neon_vceqzd_f64:
+  case NEON::BI__builtin_neon_vcgezs_f32:
+  case NEON::BI__builtin_neon_vcgezd_f64:
+  case NEON::BI__builtin_neon_vcgtzs_f32:
+  case NEON::BI__builtin_neon_vcgtzd_f64:
+  case NEON::BI__builtin_neon_vclezs_f32:
+  case NEON::BI__builtin_neon_vclezd_f64:
+  case NEON::BI__builtin_neon_vcltzs_f32:
+  case NEON::BI__builtin_neon_vcltzd_f64:
     // Add implicit zero operand.
     Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
-    IsFpCmpZInt = true;
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Greater Than Or Equal
-  case AArch64::BI__builtin_neon_vcges_f32:
-  case AArch64::BI__builtin_neon_vcged_f64:
-    Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Greater Than Or Equal To Zero
-  case AArch64::BI__builtin_neon_vcgezs_f32:
-  case AArch64::BI__builtin_neon_vcgezd_f64:
-    Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
-    // Add implicit zero operand.
-    Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
-    IsFpCmpZInt = true;
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Greather Than
-  case AArch64::BI__builtin_neon_vcgts_f32:
-  case AArch64::BI__builtin_neon_vcgtd_f64:
-    Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Greather Than Zero
-  case AArch64::BI__builtin_neon_vcgtzs_f32:
-  case AArch64::BI__builtin_neon_vcgtzd_f64:
-    Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
-    // Add implicit zero operand.
-    Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
-    IsFpCmpZInt = true;
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Less Than or Equal
-  case AArch64::BI__builtin_neon_vcles_f32:
-  case AArch64::BI__builtin_neon_vcled_f64:
-    Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Less Than Or Equal To Zero
-  case AArch64::BI__builtin_neon_vclezs_f32:
-  case AArch64::BI__builtin_neon_vclezd_f64:
-    Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
-    // Add implicit zero operand.
-    Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
-    IsFpCmpZInt = true;
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Compare Less Than Zero
-  case AArch64::BI__builtin_neon_vclts_f32:
-  case AArch64::BI__builtin_neon_vcltd_f64:
-    Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
-    OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
-  // Scalar Floating-point Compare Less Than Zero
-  case AArch64::BI__builtin_neon_vcltzs_f32:
-  case AArch64::BI__builtin_neon_vcltzd_f64:
-    Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
-    // Add implicit zero operand.
-    Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
-    IsFpCmpZInt = true;
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Absolute Compare Greater Than Or Equal
-  case AArch64::BI__builtin_neon_vcages_f32:
-  case AArch64::BI__builtin_neon_vcaged_f64:
-    Int = Intrinsic::aarch64_neon_vcage; s = "vcage";
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Absolute Compare Greater Than
-  case AArch64::BI__builtin_neon_vcagts_f32:
-  case AArch64::BI__builtin_neon_vcagtd_f64:
-    Int = Intrinsic::aarch64_neon_vcagt; s = "vcagt";
-    OverloadCmpInt = true; break;
-  // Scalar Floating-point Absolute Compare Less Than Or Equal
-  case AArch64::BI__builtin_neon_vcales_f32:
-  case AArch64::BI__builtin_neon_vcaled_f64:
-    Int = Intrinsic::aarch64_neon_vcage; s = "vcage";
-    OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
-  // Scalar Floating-point Absolute Compare Less Than
-  case AArch64::BI__builtin_neon_vcalts_f32:
-  case AArch64::BI__builtin_neon_vcaltd_f64:
-    Int = Intrinsic::aarch64_neon_vcagt; s = "vcalt";
-    OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
-  // Scalar Compare Bitwise Test Bits
-  case AArch64::BI__builtin_neon_vtstd_s64:
-  case AArch64::BI__builtin_neon_vtstd_u64:
-    Int = Intrinsic::aarch64_neon_vtstd; s = "vtst";
-    OverloadCmpInt = true; break;
-  // Scalar Absolute Value
-  case AArch64::BI__builtin_neon_vabsd_s64:
-    Int = Intrinsic::aarch64_neon_vabs;
-    s = "vabs"; OverloadInt = false; break;
-  // Scalar Absolute Difference
-  case AArch64::BI__builtin_neon_vabds_f32:
-  case AArch64::BI__builtin_neon_vabdd_f64:
-    Int = Intrinsic::aarch64_neon_vabd;
-    s = "vabd"; OverloadInt = true; break;
-  // Scalar Signed Saturating Absolute Value
-  case AArch64::BI__builtin_neon_vqabsb_s8:
-  case AArch64::BI__builtin_neon_vqabsh_s16:
-  case AArch64::BI__builtin_neon_vqabss_s32:
-  case AArch64::BI__builtin_neon_vqabsd_s64:
-    Int = Intrinsic::arm_neon_vqabs;
-    s = "vqabs"; OverloadInt = true; break;
-  // Scalar Negate
-  case AArch64::BI__builtin_neon_vnegd_s64:
-    Int = Intrinsic::aarch64_neon_vneg;
-    s = "vneg"; OverloadInt = false; break;
-  // Scalar Signed Saturating Negate
-  case AArch64::BI__builtin_neon_vqnegb_s8:
-  case AArch64::BI__builtin_neon_vqnegh_s16:
-  case AArch64::BI__builtin_neon_vqnegs_s32:
-  case AArch64::BI__builtin_neon_vqnegd_s64:
-    Int = Intrinsic::arm_neon_vqneg;
-    s = "vqneg"; OverloadInt = true; break;
-  // Scalar Signed Saturating Accumulated of Unsigned Value
-  case AArch64::BI__builtin_neon_vuqaddb_s8:
-  case AArch64::BI__builtin_neon_vuqaddh_s16:
-  case AArch64::BI__builtin_neon_vuqadds_s32:
-  case AArch64::BI__builtin_neon_vuqaddd_s64:
-    Int = Intrinsic::aarch64_neon_vuqadd;
-    s = "vuqadd"; OverloadInt = true; break;
-  // Scalar Unsigned Saturating Accumulated of Signed Value
-  case AArch64::BI__builtin_neon_vsqaddb_u8:
-  case AArch64::BI__builtin_neon_vsqaddh_u16:
-  case AArch64::BI__builtin_neon_vsqadds_u32:
-  case AArch64::BI__builtin_neon_vsqaddd_u64:
-    Int = Intrinsic::aarch64_neon_vsqadd;
-    s = "vsqadd"; OverloadInt = true; break;
-  // Signed Saturating Doubling Multiply-Add Long
-  case AArch64::BI__builtin_neon_vqdmlalh_s16:
-  case AArch64::BI__builtin_neon_vqdmlals_s32:
-    Int = Intrinsic::aarch64_neon_vqdmlal;
-    s = "vqdmlal"; OverloadWideInt = true; break;
-  // Signed Saturating Doubling Multiply-Subtract Long
-  case AArch64::BI__builtin_neon_vqdmlslh_s16:
-  case AArch64::BI__builtin_neon_vqdmlsls_s32:
-    Int = Intrinsic::aarch64_neon_vqdmlsl;
-    s = "vqdmlsl"; OverloadWideInt = true; break;
-  // Signed Saturating Doubling Multiply Long
-  case AArch64::BI__builtin_neon_vqdmullh_s16:
-  case AArch64::BI__builtin_neon_vqdmulls_s32:
-    Int = Intrinsic::arm_neon_vqdmull;
-    s = "vqdmull"; OverloadWideInt = true; break;
-  // Scalar Signed Saturating Extract Unsigned Narrow
-  case AArch64::BI__builtin_neon_vqmovunh_s16:
-  case AArch64::BI__builtin_neon_vqmovuns_s32:
-  case AArch64::BI__builtin_neon_vqmovund_s64:
-    Int = Intrinsic::arm_neon_vqmovnsu;
-    s = "vqmovun"; OverloadNarrowInt = true; break;
-  // Scalar Signed Saturating Extract Narrow
-  case AArch64::BI__builtin_neon_vqmovnh_s16:
-  case AArch64::BI__builtin_neon_vqmovns_s32:
-  case AArch64::BI__builtin_neon_vqmovnd_s64:
-    Int = Intrinsic::arm_neon_vqmovns;
-    s = "vqmovn"; OverloadNarrowInt = true; break;
-  // Scalar Unsigned Saturating Extract Narrow
-  case AArch64::BI__builtin_neon_vqmovnh_u16:
-  case AArch64::BI__builtin_neon_vqmovns_u32:
-  case AArch64::BI__builtin_neon_vqmovnd_u64:
-    Int = Intrinsic::arm_neon_vqmovnu;
-    s = "vqmovn"; OverloadNarrowInt = true; break;
-  // Scalar Signed Shift Right (Immediate)
-  case AArch64::BI__builtin_neon_vshrd_n_s64:
-    Int = Intrinsic::aarch64_neon_vshrds_n;
-    s = "vsshr"; OverloadInt = false; break;
-  // Scalar Unsigned Shift Right (Immediate)
-  case AArch64::BI__builtin_neon_vshrd_n_u64:
-    Int = Intrinsic::aarch64_neon_vshrdu_n;
-    s = "vushr"; OverloadInt = false; break;
-  // Scalar Signed Rounding Shift Right (Immediate)
-  case AArch64::BI__builtin_neon_vrshrd_n_s64:
-    Int = Intrinsic::aarch64_neon_vsrshr;
-    s = "vsrshr"; OverloadInt = true; break;
-  // Scalar Unsigned Rounding Shift Right (Immediate)
-  case AArch64::BI__builtin_neon_vrshrd_n_u64:
-    Int = Intrinsic::aarch64_neon_vurshr;
-    s = "vurshr"; OverloadInt = true; break;
-  // Scalar Signed Shift Right and Accumulate (Immediate)
-  case AArch64::BI__builtin_neon_vsrad_n_s64:
-    Int = Intrinsic::aarch64_neon_vsrads_n;
-    s = "vssra"; OverloadInt = false; break;
-  // Scalar Unsigned Shift Right and Accumulate (Immediate)
-  case AArch64::BI__builtin_neon_vsrad_n_u64:
-    Int = Intrinsic::aarch64_neon_vsradu_n;
-    s = "vusra"; OverloadInt = false; break;
-  // Scalar Signed Rounding Shift Right and Accumulate (Immediate)
-  case AArch64::BI__builtin_neon_vrsrad_n_s64:
-    Int = Intrinsic::aarch64_neon_vrsrads_n;
-    s = "vsrsra"; OverloadInt = false; break;
-  // Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
-  case AArch64::BI__builtin_neon_vrsrad_n_u64:
-    Int = Intrinsic::aarch64_neon_vrsradu_n;
-    s = "vursra"; OverloadInt = false; break;
-  // Scalar Signed/Unsigned Shift Left (Immediate)
-  case AArch64::BI__builtin_neon_vshld_n_s64:
-  case AArch64::BI__builtin_neon_vshld_n_u64:
-    Int = Intrinsic::aarch64_neon_vshld_n;
-    s = "vshl"; OverloadInt = false; break;
-  // Signed Saturating Shift Left (Immediate)
-  case AArch64::BI__builtin_neon_vqshlb_n_s8:
-  case AArch64::BI__builtin_neon_vqshlh_n_s16:
-  case AArch64::BI__builtin_neon_vqshls_n_s32:
-  case AArch64::BI__builtin_neon_vqshld_n_s64:
-    Int = Intrinsic::aarch64_neon_vqshls_n;
-    s = "vsqshl"; OverloadInt = true; break;
-  // Unsigned Saturating Shift Left (Immediate)
-  case AArch64::BI__builtin_neon_vqshlb_n_u8:
-  case AArch64::BI__builtin_neon_vqshlh_n_u16:
-  case AArch64::BI__builtin_neon_vqshls_n_u32:
-  case AArch64::BI__builtin_neon_vqshld_n_u64:
-    Int = Intrinsic::aarch64_neon_vqshlu_n;
-    s = "vuqshl"; OverloadInt = true; break;
-  // Signed Saturating Shift Left Unsigned (Immediate)
-  case AArch64::BI__builtin_neon_vqshlub_n_s8:
-  case AArch64::BI__builtin_neon_vqshluh_n_s16:
-  case AArch64::BI__builtin_neon_vqshlus_n_s32:
-  case AArch64::BI__builtin_neon_vqshlud_n_s64:
-    Int = Intrinsic::aarch64_neon_vsqshlu;
-    s = "vsqshlu"; OverloadInt = true; break;
-  // Shift Right And Insert (Immediate)
-  case AArch64::BI__builtin_neon_vsrid_n_s64:
-  case AArch64::BI__builtin_neon_vsrid_n_u64:
-    Int = Intrinsic::aarch64_neon_vsri;
-    s = "vsri"; OverloadInt = true; break;
-  // Shift Left And Insert (Immediate)
-  case AArch64::BI__builtin_neon_vslid_n_s64:
-  case AArch64::BI__builtin_neon_vslid_n_u64:
-    Int = Intrinsic::aarch64_neon_vsli;
-    s = "vsli"; OverloadInt = true; break;
-  // Signed Saturating Shift Right Narrow (Immediate)
-  case AArch64::BI__builtin_neon_vqshrnh_n_s16:
-  case AArch64::BI__builtin_neon_vqshrns_n_s32:
-  case AArch64::BI__builtin_neon_vqshrnd_n_s64:
-    Int = Intrinsic::aarch64_neon_vsqshrn;
-    s = "vsqshrn"; OverloadInt = true; break;
-  // Unsigned Saturating Shift Right Narrow (Immediate)
-  case AArch64::BI__builtin_neon_vqshrnh_n_u16:
-  case AArch64::BI__builtin_neon_vqshrns_n_u32:
-  case AArch64::BI__builtin_neon_vqshrnd_n_u64:
-    Int = Intrinsic::aarch64_neon_vuqshrn;
-    s = "vuqshrn"; OverloadInt = true; break;
-  // Signed Saturating Rounded Shift Right Narrow (Immediate)
-  case AArch64::BI__builtin_neon_vqrshrnh_n_s16:
-  case AArch64::BI__builtin_neon_vqrshrns_n_s32:
-  case AArch64::BI__builtin_neon_vqrshrnd_n_s64:
-    Int = Intrinsic::aarch64_neon_vsqrshrn;
-    s = "vsqrshrn"; OverloadInt = true; break;
-  // Unsigned Saturating Rounded Shift Right Narrow (Immediate)
-  case AArch64::BI__builtin_neon_vqrshrnh_n_u16:
-  case AArch64::BI__builtin_neon_vqrshrns_n_u32:
-  case AArch64::BI__builtin_neon_vqrshrnd_n_u64:
-    Int = Intrinsic::aarch64_neon_vuqrshrn;
-    s = "vuqrshrn"; OverloadInt = true; break;
-  // Signed Saturating Shift Right Unsigned Narrow (Immediate)
-  case AArch64::BI__builtin_neon_vqshrunh_n_s16:
-  case AArch64::BI__builtin_neon_vqshruns_n_s32:
-  case AArch64::BI__builtin_neon_vqshrund_n_s64:
-    Int = Intrinsic::aarch64_neon_vsqshrun;
-    s = "vsqshrun"; OverloadInt = true; break;
-  // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
-  case AArch64::BI__builtin_neon_vqrshrunh_n_s16:
-  case AArch64::BI__builtin_neon_vqrshruns_n_s32:
-  case AArch64::BI__builtin_neon_vqrshrund_n_s64:
-    Int = Intrinsic::aarch64_neon_vsqrshrun;
-    s = "vsqrshrun"; OverloadInt = true; break;
-  // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
-  case AArch64::BI__builtin_neon_vcvts_n_f32_s32:
-    Int = Intrinsic::aarch64_neon_vcvtf32_n_s32;
-    s = "vcvtf"; OverloadInt = false; break;
-  case AArch64::BI__builtin_neon_vcvtd_n_f64_s64:
-    Int = Intrinsic::aarch64_neon_vcvtf64_n_s64;
-    s = "vcvtf"; OverloadInt = false; break;
-  // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
-  case AArch64::BI__builtin_neon_vcvts_n_f32_u32:
-    Int = Intrinsic::aarch64_neon_vcvtf32_n_u32;
-    s = "vcvtf"; OverloadInt = false; break;
-  case AArch64::BI__builtin_neon_vcvtd_n_f64_u64:
-    Int = Intrinsic::aarch64_neon_vcvtf64_n_u64;
-    s = "vcvtf"; OverloadInt = false; break;
-  // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
-  case AArch64::BI__builtin_neon_vcvts_n_s32_f32:
-    Int = Intrinsic::aarch64_neon_vcvts_n_s32_f32;
-    s = "fcvtzs"; OverloadInt = false; break;
-  case AArch64::BI__builtin_neon_vcvtd_n_s64_f64:
-    Int = Intrinsic::aarch64_neon_vcvtd_n_s64_f64;
-    s = "fcvtzs"; OverloadInt = false; break;
-  // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
-  case AArch64::BI__builtin_neon_vcvts_n_u32_f32:
-    Int = Intrinsic::aarch64_neon_vcvts_n_u32_f32;
-    s = "fcvtzu"; OverloadInt = false; break;
-  case AArch64::BI__builtin_neon_vcvtd_n_u64_f64:
-    Int = Intrinsic::aarch64_neon_vcvtd_n_u64_f64;
-    s = "fcvtzu"; OverloadInt = false; break;
+    break;
   }
 
-  if (!Int)
+  // It didn't need any handling specific to the AArch64 backend, so defer to
+  // common code.
+  return EmitCommonNeonSISDBuiltinExpr(CGF, SISDInfo, Ops, E);
+}
+
+Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
+    unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
+    const char *NameHint, unsigned Modifier, const CallExpr *E,
+    SmallVectorImpl<llvm::Value *> &Ops, llvm::Value *Align) {
+  // Get the last argument, which specifies the vector type.
+  llvm::APSInt NeonTypeConst;
+  const Expr *Arg = E->getArg(E->getNumArgs() - 1);
+  if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
     return 0;
 
-  // AArch64 scalar builtin that returns scalar type
-  // and should be mapped to AArch64 intrinsic that returns
-  // one-element vector type.
-  Function *F = 0;
-  if (AcrossVec) {
-    // Gen arg type
-    const Expr *Arg = E->getArg(E->getNumArgs()-1);
-    llvm::Type *Ty = CGF.ConvertType(Arg->getType());
-    llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
-    llvm::Type *ETy = VTy->getElementType();
-    llvm::VectorType *RTy = llvm::VectorType::get(ETy, 1);
-  
-    if (ExtendEle) {
-      assert(!ETy->isFloatingPointTy());
-      RTy = llvm::VectorType::getExtendedElementVectorType(RTy);
+  // Determine the type of this overloaded NEON intrinsic.
+  NeonTypeFlags Type(NeonTypeConst.getZExtValue());
+  bool Usgn = Type.isUnsigned();
+  bool Quad = Type.isQuad();
+
+  llvm::VectorType *VTy = GetNeonType(this, Type);
+  llvm::Type *Ty = VTy;
+  if (!Ty)
+    return 0;
+
+  unsigned Int = LLVMIntrinsic;
+  if ((Modifier & UnsignedAlts) && !Usgn)
+    Int = AltLLVMIntrinsic;
+
+  switch (BuiltinID) {
+  default: break;
+  case NEON::BI__builtin_neon_vabs_v:
+  case NEON::BI__builtin_neon_vabsq_v:
+    if (VTy->getElementType()->isFloatingPointTy())
+      return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
+    return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
+  case NEON::BI__builtin_neon_vaddhn_v: {
+    llvm::VectorType *SrcTy =
+        llvm::VectorType::getExtendedElementVectorType(VTy);
+
+    // %sum = add <4 x i32> %lhs, %rhs
+    Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+    Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
+    Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
+
+    // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+    Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
+                                       SrcTy->getScalarSizeInBits() / 2);
+    ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
+    Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
+
+    // %res = trunc <4 x i32> %high to <4 x i16>
+    return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
+  }
+  case NEON::BI__builtin_neon_vcale_v:
+  case NEON::BI__builtin_neon_vcaleq_v:
+  case NEON::BI__builtin_neon_vcalt_v:
+  case NEON::BI__builtin_neon_vcaltq_v:
+    std::swap(Ops[0], Ops[1]);
+  case NEON::BI__builtin_neon_vcage_v:
+  case NEON::BI__builtin_neon_vcageq_v:
+  case NEON::BI__builtin_neon_vcagt_v:
+  case NEON::BI__builtin_neon_vcagtq_v: {
+    llvm::Type *VecFlt = llvm::VectorType::get(
+        VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
+        VTy->getNumElements());
+    llvm::Type *Tys[] = { VTy, VecFlt };
+    Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
+    return EmitNeonCall(F, Ops, NameHint);
+  }
+  case NEON::BI__builtin_neon_vclz_v:
+  case NEON::BI__builtin_neon_vclzq_v:
+    // We generate target-independent intrinsic, which needs a second argument
+    // for whether or not clz of zero is undefined; on ARM it isn't.
+    Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
+    break;
+  case NEON::BI__builtin_neon_vcvt_f32_v:
+  case NEON::BI__builtin_neon_vcvtq_f32_v:
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
+    return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+                : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+  case NEON::BI__builtin_neon_vcvt_n_f32_v:
+  case NEON::BI__builtin_neon_vcvt_n_f64_v:
+  case NEON::BI__builtin_neon_vcvtq_n_f32_v:
+  case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *FloatTy =
+        GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                               : NeonTypeFlags::Float32,
+                                        false, Quad));
+    llvm::Type *Tys[2] = { FloatTy, Ty };
+    Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
+    Function *F = CGM.getIntrinsic(Int, Tys);
+    return EmitNeonCall(F, Ops, "vcvt_n");
+  }
+  case NEON::BI__builtin_neon_vcvt_n_s32_v:
+  case NEON::BI__builtin_neon_vcvt_n_u32_v:
+  case NEON::BI__builtin_neon_vcvt_n_s64_v:
+  case NEON::BI__builtin_neon_vcvt_n_u64_v:
+  case NEON::BI__builtin_neon_vcvtq_n_s32_v:
+  case NEON::BI__builtin_neon_vcvtq_n_u32_v:
+  case NEON::BI__builtin_neon_vcvtq_n_s64_v:
+  case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *FloatTy =
+        GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                               : NeonTypeFlags::Float32,
+                                        false, Quad));
+    llvm::Type *Tys[2] = { Ty, FloatTy };
+    Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
+    return EmitNeonCall(F, Ops, "vcvt_n");
+  }
+  case NEON::BI__builtin_neon_vcvt_s32_v:
+  case NEON::BI__builtin_neon_vcvt_u32_v:
+  case NEON::BI__builtin_neon_vcvt_s64_v:
+  case NEON::BI__builtin_neon_vcvt_u64_v:
+  case NEON::BI__builtin_neon_vcvtq_s32_v:
+  case NEON::BI__builtin_neon_vcvtq_u32_v:
+  case NEON::BI__builtin_neon_vcvtq_s64_v:
+  case NEON::BI__builtin_neon_vcvtq_u64_v: {
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *FloatTy =
+        GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                               : NeonTypeFlags::Float32,
+                                        false, Quad));
+    Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
+    return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+                : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
+  }
+  case NEON::BI__builtin_neon_vcvta_s32_v:
+  case NEON::BI__builtin_neon_vcvta_s64_v:
+  case NEON::BI__builtin_neon_vcvta_u32_v:
+  case NEON::BI__builtin_neon_vcvta_u64_v:
+  case NEON::BI__builtin_neon_vcvtaq_s32_v:
+  case NEON::BI__builtin_neon_vcvtaq_s64_v:
+  case NEON::BI__builtin_neon_vcvtaq_u32_v:
+  case NEON::BI__builtin_neon_vcvtaq_u64_v:
+  case NEON::BI__builtin_neon_vcvtn_s32_v:
+  case NEON::BI__builtin_neon_vcvtn_s64_v:
+  case NEON::BI__builtin_neon_vcvtn_u32_v:
+  case NEON::BI__builtin_neon_vcvtn_u64_v:
+  case NEON::BI__builtin_neon_vcvtnq_s32_v:
+  case NEON::BI__builtin_neon_vcvtnq_s64_v:
+  case NEON::BI__builtin_neon_vcvtnq_u32_v:
+  case NEON::BI__builtin_neon_vcvtnq_u64_v:
+  case NEON::BI__builtin_neon_vcvtp_s32_v:
+  case NEON::BI__builtin_neon_vcvtp_s64_v:
+  case NEON::BI__builtin_neon_vcvtp_u32_v:
+  case NEON::BI__builtin_neon_vcvtp_u64_v:
+  case NEON::BI__builtin_neon_vcvtpq_s32_v:
+  case NEON::BI__builtin_neon_vcvtpq_s64_v:
+  case NEON::BI__builtin_neon_vcvtpq_u32_v:
+  case NEON::BI__builtin_neon_vcvtpq_u64_v:
+  case NEON::BI__builtin_neon_vcvtm_s32_v:
+  case NEON::BI__builtin_neon_vcvtm_s64_v:
+  case NEON::BI__builtin_neon_vcvtm_u32_v:
+  case NEON::BI__builtin_neon_vcvtm_u64_v:
+  case NEON::BI__builtin_neon_vcvtmq_s32_v:
+  case NEON::BI__builtin_neon_vcvtmq_s64_v:
+  case NEON::BI__builtin_neon_vcvtmq_u32_v:
+  case NEON::BI__builtin_neon_vcvtmq_u64_v: {
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *InTy =
+      GetNeonType(this,
+                  NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                : NeonTypeFlags::Float32, false, Quad));
+    llvm::Type *Tys[2] = { Ty, InTy };
+    return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
+  }
+  case NEON::BI__builtin_neon_vext_v:
+  case NEON::BI__builtin_neon_vextq_v: {
+    int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
+    SmallVector<Constant*, 16> Indices;
+    for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+      Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
+
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Value *SV = llvm::ConstantVector::get(Indices);
+    return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
+  }
+  case NEON::BI__builtin_neon_vfma_v:
+  case NEON::BI__builtin_neon_vfmaq_v: {
+    Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+
+    // NEON intrinsic puts accumulator first, unlike the LLVM fma.
+    return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld1_v:
+  case NEON::BI__builtin_neon_vld1q_v:
+    Ops.push_back(Align);
+    return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vld1");
+  case NEON::BI__builtin_neon_vld2_v:
+  case NEON::BI__builtin_neon_vld2q_v:
+  case NEON::BI__builtin_neon_vld3_v:
+  case NEON::BI__builtin_neon_vld3q_v:
+  case NEON::BI__builtin_neon_vld4_v:
+  case NEON::BI__builtin_neon_vld4q_v: {
+    Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
+    Ops[1] = Builder.CreateCall2(F, Ops[1], Align, NameHint);
+    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld1_dup_v:
+  case NEON::BI__builtin_neon_vld1q_dup_v: {
+    Value *V = UndefValue::get(Ty);
+    Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+    Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+    llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+    Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
+    return EmitNeonSplat(Ops[0], CI);
+  }
+  case NEON::BI__builtin_neon_vld2_lane_v:
+  case NEON::BI__builtin_neon_vld2q_lane_v:
+  case NEON::BI__builtin_neon_vld3_lane_v:
+  case NEON::BI__builtin_neon_vld3q_lane_v:
+  case NEON::BI__builtin_neon_vld4_lane_v:
+  case NEON::BI__builtin_neon_vld4q_lane_v: {
+    Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
+    for (unsigned I = 2; I < Ops.size() - 1; ++I)
+      Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
+    Ops.push_back(Align);
+    Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
+    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vmovl_v: {
+    llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+    Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+    if (Usgn)
+      return Builder.CreateZExt(Ops[0], Ty, "vmovl");
+    return Builder.CreateSExt(Ops[0], Ty, "vmovl");
+  }
+  case NEON::BI__builtin_neon_vmovn_v: {
+    llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+    Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
+    return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
+  }
+  case NEON::BI__builtin_neon_vmull_v:
+    // FIXME: the integer vmull operations could be emitted in terms of pure
+    // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
+    // hoisting the exts outside loops. Until global ISel comes along that can
+    // see through such movement this leads to bad CodeGen. So we need an
+    // intrinsic for now.
+    Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
+    Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
+  case NEON::BI__builtin_neon_vpadal_v:
+  case NEON::BI__builtin_neon_vpadalq_v: {
+    // The source operand type has twice as many elements of half the size.
+    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+    llvm::Type *EltTy =
+      llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+    llvm::Type *NarrowTy =
+      llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+    llvm::Type *Tys[2] = { Ty, NarrowTy };
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
+  }
+  case NEON::BI__builtin_neon_vpaddl_v:
+  case NEON::BI__builtin_neon_vpaddlq_v: {
+    // The source operand type has twice as many elements of half the size.
+    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+    llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+    llvm::Type *NarrowTy =
+      llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+    llvm::Type *Tys[2] = { Ty, NarrowTy };
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
+  }
+  case NEON::BI__builtin_neon_vqdmlal_v:
+  case NEON::BI__builtin_neon_vqdmlsl_v: {
+    SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
+    Value *Mul = EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty),
+                              MulOps, "vqdmlal");
+
+    SmallVector<Value *, 2> AccumOps;
+    AccumOps.push_back(Ops[0]);
+    AccumOps.push_back(Mul);
+    return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty),
+                        AccumOps, NameHint);
+  }
+  case NEON::BI__builtin_neon_vqshl_n_v:
+  case NEON::BI__builtin_neon_vqshlq_n_v:
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
+                        1, false);
+  case NEON::BI__builtin_neon_vrecpe_v:
+  case NEON::BI__builtin_neon_vrecpeq_v:
+  case NEON::BI__builtin_neon_vrsqrte_v:
+  case NEON::BI__builtin_neon_vrsqrteq_v:
+    Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
+
+  case NEON::BI__builtin_neon_vshl_n_v:
+  case NEON::BI__builtin_neon_vshlq_n_v:
+    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+    return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
+                             "vshl_n");
+  case NEON::BI__builtin_neon_vshll_n_v: {
+    llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
+    Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+    if (Usgn)
+      Ops[0] = Builder.CreateZExt(Ops[0], VTy);
+    else
+      Ops[0] = Builder.CreateSExt(Ops[0], VTy);
+    Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
+    return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
+  }
+  case NEON::BI__builtin_neon_vshrn_n_v: {
+    llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+    Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+    Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
+    if (Usgn)
+      Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
+    else
+      Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
+    return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
+  }
+  case NEON::BI__builtin_neon_vshr_n_v:
+  case NEON::BI__builtin_neon_vshrq_n_v:
+    return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
+  case NEON::BI__builtin_neon_vst1_v:
+  case NEON::BI__builtin_neon_vst1q_v:
+  case NEON::BI__builtin_neon_vst2_v:
+  case NEON::BI__builtin_neon_vst2q_v:
+  case NEON::BI__builtin_neon_vst3_v:
+  case NEON::BI__builtin_neon_vst3q_v:
+  case NEON::BI__builtin_neon_vst4_v:
+  case NEON::BI__builtin_neon_vst4q_v:
+  case NEON::BI__builtin_neon_vst2_lane_v:
+  case NEON::BI__builtin_neon_vst2q_lane_v:
+  case NEON::BI__builtin_neon_vst3_lane_v:
+  case NEON::BI__builtin_neon_vst3q_lane_v:
+  case NEON::BI__builtin_neon_vst4_lane_v:
+  case NEON::BI__builtin_neon_vst4q_lane_v:
+    Ops.push_back(Align);
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
+  case NEON::BI__builtin_neon_vsubhn_v: {
+    llvm::VectorType *SrcTy =
+        llvm::VectorType::getExtendedElementVectorType(VTy);
+
+    // %sum = add <4 x i32> %lhs, %rhs
+    Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+    Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
+    Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
+
+    // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+    Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
+                                       SrcTy->getScalarSizeInBits() / 2);
+    ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
+    Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
+
+    // %res = trunc <4 x i32> %high to <4 x i16>
+    return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
+  }
+  case NEON::BI__builtin_neon_vtrn_v:
+  case NEON::BI__builtin_neon_vtrnq_v: {
+    Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+    Value *SV = 0;
+
+    for (unsigned vi = 0; vi != 2; ++vi) {
+      SmallVector<Constant*, 16> Indices;
+      for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+        Indices.push_back(Builder.getInt32(i+vi));
+        Indices.push_back(Builder.getInt32(i+e+vi));
+      }
+      Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+      SV = llvm::ConstantVector::get(Indices);
+      SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
+      SV = Builder.CreateStore(SV, Addr);
     }
+    return SV;
+  }
+  case NEON::BI__builtin_neon_vtst_v:
+  case NEON::BI__builtin_neon_vtstq_v: {
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
+    Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+                                ConstantAggregateZero::get(Ty));
+    return Builder.CreateSExt(Ops[0], Ty, "vtst");
+  }
+  case NEON::BI__builtin_neon_vuzp_v:
+  case NEON::BI__builtin_neon_vuzpq_v: {
+    Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+    Value *SV = 0;
 
-    llvm::Type *Tys[2] = {RTy, VTy};
-    F = CGF.CGM.getIntrinsic(Int, Tys);
-    assert(E->getNumArgs() == 1);
-  } else if (OverloadInt) {
-    // Determine the type of this overloaded AArch64 intrinsic
-    llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
-    llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
-    assert(VTy);
+    for (unsigned vi = 0; vi != 2; ++vi) {
+      SmallVector<Constant*, 16> Indices;
+      for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+        Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
 
-    F = CGF.CGM.getIntrinsic(Int, VTy);
-  } else if (OverloadWideInt || OverloadNarrowInt) {
-    // Determine the type of this overloaded AArch64 intrinsic
-    const Expr *Arg = E->getArg(E->getNumArgs()-1);
-    llvm::Type *Ty = CGF.ConvertType(Arg->getType());
-    llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
-    llvm::VectorType *RTy = OverloadWideInt ? 
-      llvm::VectorType::getExtendedElementVectorType(VTy) :
-      llvm::VectorType::getTruncatedElementVectorType(VTy);
-    F = CGF.CGM.getIntrinsic(Int, RTy);
-  } else if (OverloadCmpInt) {
-    // Determine the types of this overloaded AArch64 intrinsic
-    SmallVector<llvm::Type *, 3> Tys;
-    const Expr *Arg = E->getArg(E->getNumArgs()-1);
-    llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
-    llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
-    Tys.push_back(VTy);
-    Ty = CGF.ConvertType(Arg->getType());
-    VTy = llvm::VectorType::get(Ty, 1);
-    Tys.push_back(VTy);
-    if(IsFpCmpZInt)
-      VTy = llvm::VectorType::get(CGF.FloatTy, 1);
-    Tys.push_back(VTy);
+      Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+      SV = llvm::ConstantVector::get(Indices);
+      SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
+      SV = Builder.CreateStore(SV, Addr);
+    }
+    return SV;
+  }
+  case NEON::BI__builtin_neon_vzip_v:
+  case NEON::BI__builtin_neon_vzipq_v: {
+    Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+    Value *SV = 0;
 
-    F = CGF.CGM.getIntrinsic(Int, Tys);
-  } else if (OverloadCvtInt) {
-    // Determine the types of this overloaded AArch64 intrinsic
-    SmallVector<llvm::Type *, 2> Tys;
-    const Expr *Arg = E->getArg(E->getNumArgs()-1);
-    llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
-    llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
-    Tys.push_back(VTy);
-    Ty = CGF.ConvertType(Arg->getType());
-    VTy = llvm::VectorType::get(Ty, 1);
-    Tys.push_back(VTy);
+    for (unsigned vi = 0; vi != 2; ++vi) {
+      SmallVector<Constant*, 16> Indices;
+      for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+        Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
+        Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
+      }
+      Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+      SV = llvm::ConstantVector::get(Indices);
+      SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
+      SV = Builder.CreateStore(SV, Addr);
+    }
+    return SV;
+  }
+  }
 
-    F = CGF.CGM.getIntrinsic(Int, Tys);
-  } else
-    F = CGF.CGM.getIntrinsic(Int);
+  assert(Int && "Expected valid intrinsic number");
 
-  Value *Result = CGF.EmitNeonCall(F, Ops, s);
-  llvm::Type *ResultType = CGF.ConvertType(E->getType());
+  // Determine the type(s) of this overloaded AArch64 intrinsic.
+  Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
+
+  Value *Result = EmitNeonCall(F, Ops, NameHint);
+  llvm::Type *ResultType = ConvertType(E->getType());
   // AArch64 intrinsic one-element vector type cast to
   // scalar type expected by the builtin
-  return CGF.Builder.CreateBitCast(Result, ResultType, s);
+  return Builder.CreateBitCast(Result, ResultType, NameHint);
 }
 
 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
     Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
     const CmpInst::Predicate Ip, const Twine &Name) {
-  llvm::Type *OTy = ((llvm::User *)Op)->getOperand(0)->getType();
-  if (OTy->isPointerTy())
-    OTy = Ty;
+  llvm::Type *OTy = Op->getType();
+
+  // FIXME: this is utterly horrific. We should not be looking at previous
+  // codegen context to find out what needs doing. Unfortunately TableGen
+  // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
+  // (etc).
+  if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
+    OTy = BI->getOperand(0)->getType();
+
   Op = Builder.CreateBitCast(Op, OTy);
-  if (((llvm::VectorType *)OTy)->getElementType()->isFloatingPointTy()) {
-    Op = Builder.CreateFCmp(Fp, Op, ConstantAggregateZero::get(OTy));
+  if (OTy->getScalarType()->isFloatingPointTy()) {
+    Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
   } else {
-    Op = Builder.CreateICmp(Ip, Op, ConstantAggregateZero::get(OTy));
+    Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
   }
-  return Builder.CreateZExt(Op, Ty, Name);
+  return Builder.CreateSExt(Op, Ty, Name);
 }
 
 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
@@ -2681,13 +3526,9 @@
                                                      ZeroTbl, SV, Name));
   }
 
-  TblTy = llvm::VectorType::get(TblTy->getElementType(),
-                                2*TblTy->getNumElements());
-  llvm::Type *Tys[2] = { ResTy, TblTy };
-
   Function *TblF;
   TblOps.push_back(IndexOp);
-  TblF = CGF.CGM.getIntrinsic(IntID, Tys);
+  TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
   
   return CGF.EmitNeonCall(TblF, TblOps, Name);
 }
@@ -2698,37 +3539,33 @@
   unsigned int Int = 0;
   const char *s = NULL;
 
-  unsigned TblPos;
   switch (BuiltinID) {
   default:
     return 0;
-  case AArch64::BI__builtin_neon_vtbl1_v:
-  case AArch64::BI__builtin_neon_vqtbl1_v:
-  case AArch64::BI__builtin_neon_vqtbl1q_v:
-  case AArch64::BI__builtin_neon_vtbl2_v:
-  case AArch64::BI__builtin_neon_vqtbl2_v:
-  case AArch64::BI__builtin_neon_vqtbl2q_v:
-  case AArch64::BI__builtin_neon_vtbl3_v:
-  case AArch64::BI__builtin_neon_vqtbl3_v:
-  case AArch64::BI__builtin_neon_vqtbl3q_v:
-  case AArch64::BI__builtin_neon_vtbl4_v:
-  case AArch64::BI__builtin_neon_vqtbl4_v:
-  case AArch64::BI__builtin_neon_vqtbl4q_v:
-    TblPos = 0;
-    break;
-  case AArch64::BI__builtin_neon_vtbx1_v:
-  case AArch64::BI__builtin_neon_vqtbx1_v:
-  case AArch64::BI__builtin_neon_vqtbx1q_v:
-  case AArch64::BI__builtin_neon_vtbx2_v:
-  case AArch64::BI__builtin_neon_vqtbx2_v:
-  case AArch64::BI__builtin_neon_vqtbx2q_v:
-  case AArch64::BI__builtin_neon_vtbx3_v:
-  case AArch64::BI__builtin_neon_vqtbx3_v:
-  case AArch64::BI__builtin_neon_vqtbx3q_v:
-  case AArch64::BI__builtin_neon_vtbx4_v:
-  case AArch64::BI__builtin_neon_vqtbx4_v:
-  case AArch64::BI__builtin_neon_vqtbx4q_v:
-    TblPos = 1;
+  case NEON::BI__builtin_neon_vtbl1_v:
+  case NEON::BI__builtin_neon_vqtbl1_v:
+  case NEON::BI__builtin_neon_vqtbl1q_v:
+  case NEON::BI__builtin_neon_vtbl2_v:
+  case NEON::BI__builtin_neon_vqtbl2_v:
+  case NEON::BI__builtin_neon_vqtbl2q_v:
+  case NEON::BI__builtin_neon_vtbl3_v:
+  case NEON::BI__builtin_neon_vqtbl3_v:
+  case NEON::BI__builtin_neon_vqtbl3q_v:
+  case NEON::BI__builtin_neon_vtbl4_v:
+  case NEON::BI__builtin_neon_vqtbl4_v:
+  case NEON::BI__builtin_neon_vqtbl4q_v:
+  case NEON::BI__builtin_neon_vtbx1_v:
+  case NEON::BI__builtin_neon_vqtbx1_v:
+  case NEON::BI__builtin_neon_vqtbx1q_v:
+  case NEON::BI__builtin_neon_vtbx2_v:
+  case NEON::BI__builtin_neon_vqtbx2_v:
+  case NEON::BI__builtin_neon_vqtbx2q_v:
+  case NEON::BI__builtin_neon_vtbx3_v:
+  case NEON::BI__builtin_neon_vqtbx3_v:
+  case NEON::BI__builtin_neon_vqtbx3q_v:
+  case NEON::BI__builtin_neon_vtbx4_v:
+  case NEON::BI__builtin_neon_vqtbx4_v:
+  case NEON::BI__builtin_neon_vqtbx4q_v:
     break;
   }
 
@@ -2752,35 +3589,31 @@
     Ops.push_back(CGF.EmitScalarExpr(E->getArg(i)));
   }
 
-  Arg = E->getArg(TblPos);
-  llvm::Type *TblTy = CGF.ConvertType(Arg->getType());
-  llvm::VectorType *VTblTy = cast<llvm::VectorType>(TblTy);
-  llvm::Type *Tys[2] = { Ty, VTblTy };
   unsigned nElts = VTy->getNumElements();  
 
   // AArch64 scalar builtins are not overloaded, they do not have an extra
   // argument that specifies the vector type, need to handle each case.
   SmallVector<Value *, 2> TblOps;
   switch (BuiltinID) {
-  case AArch64::BI__builtin_neon_vtbl1_v: {
+  case NEON::BI__builtin_neon_vtbl1_v: {
     TblOps.push_back(Ops[0]);
     return packTBLDVectorList(CGF, TblOps, 0, Ops[1], Ty,
                               Intrinsic::aarch64_neon_vtbl1, "vtbl1");
   }
-  case AArch64::BI__builtin_neon_vtbl2_v: {
+  case NEON::BI__builtin_neon_vtbl2_v: {
     TblOps.push_back(Ops[0]);
     TblOps.push_back(Ops[1]);
     return packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
                               Intrinsic::aarch64_neon_vtbl1, "vtbl1");
   }
-  case AArch64::BI__builtin_neon_vtbl3_v: {
+  case NEON::BI__builtin_neon_vtbl3_v: {
     TblOps.push_back(Ops[0]);
     TblOps.push_back(Ops[1]);
     TblOps.push_back(Ops[2]);
     return packTBLDVectorList(CGF, TblOps, 0, Ops[3], Ty,
                               Intrinsic::aarch64_neon_vtbl2, "vtbl2");
   }
-  case AArch64::BI__builtin_neon_vtbl4_v: {
+  case NEON::BI__builtin_neon_vtbl4_v: {
     TblOps.push_back(Ops[0]);
     TblOps.push_back(Ops[1]);
     TblOps.push_back(Ops[2]);
@@ -2788,7 +3621,7 @@
     return packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
                               Intrinsic::aarch64_neon_vtbl2, "vtbl2");
   }
-  case AArch64::BI__builtin_neon_vtbx1_v: {
+  case NEON::BI__builtin_neon_vtbx1_v: {
     TblOps.push_back(Ops[1]);
     Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
                                     Intrinsic::aarch64_neon_vtbl1, "vtbl1");
@@ -2805,13 +3638,13 @@
     Function *BslF = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty);
     return CGF.EmitNeonCall(BslF, BslOps, "vbsl");
   }
-  case AArch64::BI__builtin_neon_vtbx2_v: {
+  case NEON::BI__builtin_neon_vtbx2_v: {
     TblOps.push_back(Ops[1]);
     TblOps.push_back(Ops[2]);
     return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
                               Intrinsic::aarch64_neon_vtbx1, "vtbx1");
   }
-  case AArch64::BI__builtin_neon_vtbx3_v: {
+  case NEON::BI__builtin_neon_vtbx3_v: {
     TblOps.push_back(Ops[1]);
     TblOps.push_back(Ops[2]);
     TblOps.push_back(Ops[3]);
@@ -2831,7 +3664,7 @@
     Function *BslF = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty);
     return CGF.EmitNeonCall(BslF, BslOps, "vbsl");
   }
-  case AArch64::BI__builtin_neon_vtbx4_v: {
+  case NEON::BI__builtin_neon_vtbx4_v: {
     TblOps.push_back(Ops[1]);
     TblOps.push_back(Ops[2]);
     TblOps.push_back(Ops[3]);
@@ -2839,29 +3672,29 @@
     return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
                               Intrinsic::aarch64_neon_vtbx2, "vtbx2");
   }
-  case AArch64::BI__builtin_neon_vqtbl1_v:
-  case AArch64::BI__builtin_neon_vqtbl1q_v:
+  case NEON::BI__builtin_neon_vqtbl1_v:
+  case NEON::BI__builtin_neon_vqtbl1q_v:
     Int = Intrinsic::aarch64_neon_vtbl1; s = "vtbl1"; break;
-  case AArch64::BI__builtin_neon_vqtbl2_v:
-  case AArch64::BI__builtin_neon_vqtbl2q_v: {
+  case NEON::BI__builtin_neon_vqtbl2_v:
+  case NEON::BI__builtin_neon_vqtbl2q_v: {
     Int = Intrinsic::aarch64_neon_vtbl2; s = "vtbl2"; break;
-  case AArch64::BI__builtin_neon_vqtbl3_v:
-  case AArch64::BI__builtin_neon_vqtbl3q_v:
+  case NEON::BI__builtin_neon_vqtbl3_v:
+  case NEON::BI__builtin_neon_vqtbl3q_v:
     Int = Intrinsic::aarch64_neon_vtbl3; s = "vtbl3"; break;
-  case AArch64::BI__builtin_neon_vqtbl4_v:
-  case AArch64::BI__builtin_neon_vqtbl4q_v:
+  case NEON::BI__builtin_neon_vqtbl4_v:
+  case NEON::BI__builtin_neon_vqtbl4q_v:
     Int = Intrinsic::aarch64_neon_vtbl4; s = "vtbl4"; break;
-  case AArch64::BI__builtin_neon_vqtbx1_v:
-  case AArch64::BI__builtin_neon_vqtbx1q_v:
+  case NEON::BI__builtin_neon_vqtbx1_v:
+  case NEON::BI__builtin_neon_vqtbx1q_v:
     Int = Intrinsic::aarch64_neon_vtbx1; s = "vtbx1"; break;
-  case AArch64::BI__builtin_neon_vqtbx2_v:
-  case AArch64::BI__builtin_neon_vqtbx2q_v:
+  case NEON::BI__builtin_neon_vqtbx2_v:
+  case NEON::BI__builtin_neon_vqtbx2q_v:
     Int = Intrinsic::aarch64_neon_vtbx2; s = "vtbx2"; break;
-  case AArch64::BI__builtin_neon_vqtbx3_v:
-  case AArch64::BI__builtin_neon_vqtbx3q_v:
+  case NEON::BI__builtin_neon_vqtbx3_v:
+  case NEON::BI__builtin_neon_vqtbx3q_v:
     Int = Intrinsic::aarch64_neon_vtbx3; s = "vtbx3"; break;
-  case AArch64::BI__builtin_neon_vqtbx4_v:
-  case AArch64::BI__builtin_neon_vqtbx4q_v:
+  case NEON::BI__builtin_neon_vqtbx4_v:
+  case NEON::BI__builtin_neon_vqtbx4q_v:
     Int = Intrinsic::aarch64_neon_vtbx4; s = "vtbx4"; break;
   }
   }
@@ -2869,15 +3702,23 @@
   if (!Int)
     return 0;
 
-  Function *F = CGF.CGM.getIntrinsic(Int, Tys);
+  Function *F = CGF.CGM.getIntrinsic(Int, Ty);
   return CGF.EmitNeonCall(F, Ops, s);
 }
 
 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
                                                const CallExpr *E) {
+
   // Process AArch64 scalar builtins
-  if (Value *Result = EmitAArch64ScalarBuiltinExpr(*this, BuiltinID, E))
+  llvm::ArrayRef<NeonIntrinsicInfo> SISDInfo(AArch64SISDIntrinsicInfo);
+  const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+      SISDInfo, BuiltinID, AArch64SISDIntrinsicInfoProvenSorted);
+
+  if (Builtin) {
+    Value *Result = EmitAArch64ScalarBuiltinExpr(*this, *Builtin, E);
+    assert(Result && "SISD intrinsic should have been handled");
     return Result;
+  }
 
   // Process AArch64 table lookup builtins
   if (Value *Result = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E))
@@ -2899,20 +3740,60 @@
 
   SmallVector<Value *, 4> Ops;
   llvm::Value *Align = 0; // Alignment for load/store
+
+  if (BuiltinID == NEON::BI__builtin_neon_vldrq_p128) {
+   Value *Op = EmitScalarExpr(E->getArg(0));
+   unsigned addressSpace =
+     cast<llvm::PointerType>(Op->getType())->getAddressSpace();
+   llvm::Type *Ty = llvm::Type::getFP128PtrTy(getLLVMContext(), addressSpace);
+   Op = Builder.CreateBitCast(Op, Ty);
+   Op = Builder.CreateLoad(Op);
+   Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
+   return Builder.CreateBitCast(Op, Ty);
+  }
+  if (BuiltinID == NEON::BI__builtin_neon_vstrq_p128) {
+    Value *Op0 = EmitScalarExpr(E->getArg(0));
+    unsigned addressSpace =
+      cast<llvm::PointerType>(Op0->getType())->getAddressSpace();
+    llvm::Type *PTy = llvm::Type::getFP128PtrTy(getLLVMContext(), addressSpace);
+    Op0 = Builder.CreateBitCast(Op0, PTy);
+    Value *Op1 = EmitScalarExpr(E->getArg(1));
+    llvm::Type *Ty = llvm::Type::getFP128Ty(getLLVMContext());
+    Op1 = Builder.CreateBitCast(Op1, Ty);
+    return Builder.CreateStore(Op1, Op0);
+  }
   for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
     if (i == 0) {
       switch (BuiltinID) {
-      case AArch64::BI__builtin_neon_vst1_x2_v:
-      case AArch64::BI__builtin_neon_vst1q_x2_v:
-      case AArch64::BI__builtin_neon_vst1_x3_v:
-      case AArch64::BI__builtin_neon_vst1q_x3_v:
-      case AArch64::BI__builtin_neon_vst1_x4_v:
-      case AArch64::BI__builtin_neon_vst1q_x4_v:
+      case NEON::BI__builtin_neon_vld1_v:
+      case NEON::BI__builtin_neon_vld1q_v:
+      case NEON::BI__builtin_neon_vst1_v:
+      case NEON::BI__builtin_neon_vst1q_v:
+      case NEON::BI__builtin_neon_vst2_v:
+      case NEON::BI__builtin_neon_vst2q_v:
+      case NEON::BI__builtin_neon_vst3_v:
+      case NEON::BI__builtin_neon_vst3q_v:
+      case NEON::BI__builtin_neon_vst4_v:
+      case NEON::BI__builtin_neon_vst4q_v:
+      case NEON::BI__builtin_neon_vst1_x2_v:
+      case NEON::BI__builtin_neon_vst1q_x2_v:
+      case NEON::BI__builtin_neon_vst1_x3_v:
+      case NEON::BI__builtin_neon_vst1q_x3_v:
+      case NEON::BI__builtin_neon_vst1_x4_v:
+      case NEON::BI__builtin_neon_vst1q_x4_v:
       // Handle ld1/st1 lane in this function a little different from ARM.
-      case AArch64::BI__builtin_neon_vld1_lane_v:
-      case AArch64::BI__builtin_neon_vld1q_lane_v:
-      case AArch64::BI__builtin_neon_vst1_lane_v:
-      case AArch64::BI__builtin_neon_vst1q_lane_v:
+      case NEON::BI__builtin_neon_vld1_lane_v:
+      case NEON::BI__builtin_neon_vld1q_lane_v:
+      case NEON::BI__builtin_neon_vst1_lane_v:
+      case NEON::BI__builtin_neon_vst1q_lane_v:
+      case NEON::BI__builtin_neon_vst2_lane_v:
+      case NEON::BI__builtin_neon_vst2q_lane_v:
+      case NEON::BI__builtin_neon_vst3_lane_v:
+      case NEON::BI__builtin_neon_vst3q_lane_v:
+      case NEON::BI__builtin_neon_vst4_lane_v:
+      case NEON::BI__builtin_neon_vst4q_lane_v:
+      case NEON::BI__builtin_neon_vld1_dup_v:
+      case NEON::BI__builtin_neon_vld1q_dup_v:
         // Get the alignment for the argument in addition to the value;
         // we'll use it later.
         std::pair<llvm::Value *, unsigned> Src =
@@ -2924,21 +3805,31 @@
     }
     if (i == 1) {
       switch (BuiltinID) {
-      case AArch64::BI__builtin_neon_vld1_x2_v:
-      case AArch64::BI__builtin_neon_vld1q_x2_v:
-      case AArch64::BI__builtin_neon_vld1_x3_v:
-      case AArch64::BI__builtin_neon_vld1q_x3_v:
-      case AArch64::BI__builtin_neon_vld1_x4_v:
-      case AArch64::BI__builtin_neon_vld1q_x4_v:
+      case NEON::BI__builtin_neon_vld2_v:
+      case NEON::BI__builtin_neon_vld2q_v:
+      case NEON::BI__builtin_neon_vld3_v:
+      case NEON::BI__builtin_neon_vld3q_v:
+      case NEON::BI__builtin_neon_vld4_v:
+      case NEON::BI__builtin_neon_vld4q_v:
+      case NEON::BI__builtin_neon_vld1_x2_v:
+      case NEON::BI__builtin_neon_vld1q_x2_v:
+      case NEON::BI__builtin_neon_vld1_x3_v:
+      case NEON::BI__builtin_neon_vld1q_x3_v:
+      case NEON::BI__builtin_neon_vld1_x4_v:
+      case NEON::BI__builtin_neon_vld1q_x4_v:
       // Handle ld1/st1 dup lane in this function a little different from ARM.
-      case AArch64::BI__builtin_neon_vld2_dup_v:
-      case AArch64::BI__builtin_neon_vld2q_dup_v:
-      case AArch64::BI__builtin_neon_vld3_dup_v:
-      case AArch64::BI__builtin_neon_vld3q_dup_v:
-      case AArch64::BI__builtin_neon_vld4_dup_v:
-      case AArch64::BI__builtin_neon_vld4q_dup_v:
-      case AArch64::BI__builtin_neon_vld2_lane_v:
-      case AArch64::BI__builtin_neon_vld2q_lane_v:
+      case NEON::BI__builtin_neon_vld2_dup_v:
+      case NEON::BI__builtin_neon_vld2q_dup_v:
+      case NEON::BI__builtin_neon_vld3_dup_v:
+      case NEON::BI__builtin_neon_vld3q_dup_v:
+      case NEON::BI__builtin_neon_vld4_dup_v:
+      case NEON::BI__builtin_neon_vld4q_dup_v:
+      case NEON::BI__builtin_neon_vld2_lane_v:
+      case NEON::BI__builtin_neon_vld2q_lane_v:
+      case NEON::BI__builtin_neon_vld3_lane_v:
+      case NEON::BI__builtin_neon_vld3q_lane_v:
+      case NEON::BI__builtin_neon_vld4_lane_v:
+      case NEON::BI__builtin_neon_vld4q_lane_v:
         // Get the alignment for the argument in addition to the value;
         // we'll use it later.
         std::pair<llvm::Value *, unsigned> Src =
@@ -2967,6 +3858,17 @@
   if (!Ty)
     return 0;
 
+
+  // Many NEON builtins have identical semantics and uses in ARM and
+  // AArch64. Emit these in a single function.
+  llvm::ArrayRef<NeonIntrinsicInfo> IntrinsicMap(ARMSIMDIntrinsicMap);
+  Builtin = findNeonIntrinsicInMap(IntrinsicMap, BuiltinID,
+                                   NEONSIMDIntrinsicsProvenSorted);
+  if (Builtin)
+    return EmitCommonNeonBuiltinExpr(
+        Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
+        Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
+
   unsigned Int;
   switch (BuiltinID) {
   default:
@@ -2975,208 +3877,30 @@
   // AArch64 builtins mapping to legacy ARM v7 builtins.
   // FIXME: the mapped builtins listed correspond to what has been tested
   // in aarch64-neon-intrinsics.c so far.
-  case AArch64::BI__builtin_neon_vuzp_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vuzp_v, E);
-  case AArch64::BI__builtin_neon_vuzpq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vuzpq_v, E);
-  case AArch64::BI__builtin_neon_vzip_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vzip_v, E);
-  case AArch64::BI__builtin_neon_vzipq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vzipq_v, E);
-  case AArch64::BI__builtin_neon_vtrn_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtrn_v, E);
-  case AArch64::BI__builtin_neon_vtrnq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtrnq_v, E);
-  case AArch64::BI__builtin_neon_vext_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vext_v, E);
-  case AArch64::BI__builtin_neon_vextq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vextq_v, E);
-  case AArch64::BI__builtin_neon_vmul_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmul_v, E);
-  case AArch64::BI__builtin_neon_vmulq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmulq_v, E);
-  case AArch64::BI__builtin_neon_vabd_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabd_v, E);
-  case AArch64::BI__builtin_neon_vabdq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabdq_v, E);
-  case AArch64::BI__builtin_neon_vfma_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfma_v, E);
-  case AArch64::BI__builtin_neon_vfmaq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfmaq_v, E);
-  case AArch64::BI__builtin_neon_vbsl_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbsl_v, E);
-  case AArch64::BI__builtin_neon_vbslq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbslq_v, E);
-  case AArch64::BI__builtin_neon_vrsqrts_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrts_v, E);
-  case AArch64::BI__builtin_neon_vrsqrtsq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrtsq_v, E);
-  case AArch64::BI__builtin_neon_vrecps_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecps_v, E);
-  case AArch64::BI__builtin_neon_vrecpsq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpsq_v, E);
-  case AArch64::BI__builtin_neon_vcale_v:
-    if (VTy->getVectorNumElements() == 1) {
-      std::swap(Ops[0], Ops[1]);
-    } else {
-      return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcale_v, E);
-    }
-  case AArch64::BI__builtin_neon_vcage_v:
-    if (VTy->getVectorNumElements() == 1) {
-      // Determine the types of this overloaded AArch64 intrinsic
-      SmallVector<llvm::Type *, 3> Tys;
-      Tys.push_back(VTy);
-      VTy = llvm::VectorType::get(DoubleTy, 1);
-      Tys.push_back(VTy);
-      Tys.push_back(VTy);
-      Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vcage, Tys);
-      return EmitNeonCall(F, Ops, "vcage");
-    }
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcage_v, E);
-  case AArch64::BI__builtin_neon_vcaleq_v:
-    std::swap(Ops[0], Ops[1]);
-  case AArch64::BI__builtin_neon_vcageq_v: {
-    Function *F;
-    if (VTy->getElementType()->isIntegerTy(64))
-      F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgeq);
-    else
-      F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
-    return EmitNeonCall(F, Ops, "vcage");
-  }
-  case AArch64::BI__builtin_neon_vcalt_v:
-    if (VTy->getVectorNumElements() == 1) {
-      std::swap(Ops[0], Ops[1]);
-    } else {
-      return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcalt_v, E);
-    }
-  case AArch64::BI__builtin_neon_vcagt_v:
-    if (VTy->getVectorNumElements() == 1) {
-      // Determine the types of this overloaded AArch64 intrinsic
-      SmallVector<llvm::Type *, 3> Tys;
-      Tys.push_back(VTy);
-      VTy = llvm::VectorType::get(DoubleTy, 1);
-      Tys.push_back(VTy);
-      Tys.push_back(VTy);
-      Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vcagt, Tys);
-      return EmitNeonCall(F, Ops, "vcagt");
-    }
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcagt_v, E);
-  case AArch64::BI__builtin_neon_vcaltq_v:
-    std::swap(Ops[0], Ops[1]);
-  case AArch64::BI__builtin_neon_vcagtq_v: {
-    Function *F;
-    if (VTy->getElementType()->isIntegerTy(64))
-      F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgtq);
-    else
-      F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
-    return EmitNeonCall(F, Ops, "vcagt");
-  }
-  case AArch64::BI__builtin_neon_vtst_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtst_v, E);
-  case AArch64::BI__builtin_neon_vtstq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtstq_v, E);
-  case AArch64::BI__builtin_neon_vhadd_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhadd_v, E);
-  case AArch64::BI__builtin_neon_vhaddq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhaddq_v, E);
-  case AArch64::BI__builtin_neon_vhsub_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsub_v, E);
-  case AArch64::BI__builtin_neon_vhsubq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsubq_v, E);
-  case AArch64::BI__builtin_neon_vrhadd_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhadd_v, E);
-  case AArch64::BI__builtin_neon_vrhaddq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhaddq_v, E);
-  case AArch64::BI__builtin_neon_vqadd_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqadd_v, E);
-  case AArch64::BI__builtin_neon_vqaddq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqaddq_v, E);
-  case AArch64::BI__builtin_neon_vqsub_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsub_v, E);
-  case AArch64::BI__builtin_neon_vqsubq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsubq_v, E);
-  case AArch64::BI__builtin_neon_vshl_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_v, E);
-  case AArch64::BI__builtin_neon_vshlq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_v, E);
-  case AArch64::BI__builtin_neon_vqshl_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_v, E);
-  case AArch64::BI__builtin_neon_vqshlq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_v, E);
-  case AArch64::BI__builtin_neon_vrshl_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshl_v, E);
-  case AArch64::BI__builtin_neon_vrshlq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshlq_v, E);
-  case AArch64::BI__builtin_neon_vqrshl_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshl_v, E);
-  case AArch64::BI__builtin_neon_vqrshlq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshlq_v, E);
-  case AArch64::BI__builtin_neon_vaddhn_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vaddhn_v, E);
-  case AArch64::BI__builtin_neon_vraddhn_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vraddhn_v, E);
-  case AArch64::BI__builtin_neon_vsubhn_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsubhn_v, E);
-  case AArch64::BI__builtin_neon_vrsubhn_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsubhn_v, E);
-  case AArch64::BI__builtin_neon_vmull_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmull_v, E);
-  case AArch64::BI__builtin_neon_vqdmull_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmull_v, E);
-  case AArch64::BI__builtin_neon_vqdmlal_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlal_v, E);
-  case AArch64::BI__builtin_neon_vqdmlsl_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlsl_v, E);
-  case AArch64::BI__builtin_neon_vmax_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmax_v, E);
-  case AArch64::BI__builtin_neon_vmaxq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmaxq_v, E);
-  case AArch64::BI__builtin_neon_vmin_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmin_v, E);
-  case AArch64::BI__builtin_neon_vminq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vminq_v, E);
-  case AArch64::BI__builtin_neon_vpmax_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmax_v, E);
-  case AArch64::BI__builtin_neon_vpmin_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmin_v, E);
-  case AArch64::BI__builtin_neon_vpadd_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadd_v, E);
-  case AArch64::BI__builtin_neon_vqdmulh_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulh_v, E);
-  case AArch64::BI__builtin_neon_vqdmulhq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulhq_v, E);
-  case AArch64::BI__builtin_neon_vqrdmulh_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulh_v, E);
-  case AArch64::BI__builtin_neon_vqrdmulhq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulhq_v, E);
 
   // Shift by immediate
-  case AArch64::BI__builtin_neon_vshr_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshr_n_v, E);
-  case AArch64::BI__builtin_neon_vshrq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshrq_n_v, E);
-  case AArch64::BI__builtin_neon_vrshr_n_v:
-  case AArch64::BI__builtin_neon_vrshrq_n_v:
+  case NEON::BI__builtin_neon_vrshr_n_v:
+  case NEON::BI__builtin_neon_vrshrq_n_v:
     Int = usgn ? Intrinsic::aarch64_neon_vurshr
                : Intrinsic::aarch64_neon_vsrshr;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n");
-  case AArch64::BI__builtin_neon_vsra_n_v:
+  case NEON::BI__builtin_neon_vsra_n_v:
     if (VTy->getElementType()->isIntegerTy(64)) {
       Int = usgn ? Intrinsic::aarch64_neon_vsradu_n
                  : Intrinsic::aarch64_neon_vsrads_n;
       return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vsra_n");
     }
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E);
-  case AArch64::BI__builtin_neon_vsraq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E);
-  case AArch64::BI__builtin_neon_vrsra_n_v:
+    return EmitARMBuiltinExpr(NEON::BI__builtin_neon_vsra_n_v, E);
+  case NEON::BI__builtin_neon_vsraq_n_v:
+    return EmitARMBuiltinExpr(NEON::BI__builtin_neon_vsraq_n_v, E);
+  case NEON::BI__builtin_neon_vrsra_n_v:
     if (VTy->getElementType()->isIntegerTy(64)) {
       Int = usgn ? Intrinsic::aarch64_neon_vrsradu_n
                  : Intrinsic::aarch64_neon_vrsrads_n;
       return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vrsra_n");
     }
     // fall through
-  case AArch64::BI__builtin_neon_vrsraq_n_v: {
+  case NEON::BI__builtin_neon_vrsraq_n_v: {
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Int = usgn ? Intrinsic::aarch64_neon_vurshr
@@ -3184,73 +3908,39 @@
     Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
     return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
   }
-  case AArch64::BI__builtin_neon_vshl_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_n_v, E);
-  case AArch64::BI__builtin_neon_vshlq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_n_v, E);
-  case AArch64::BI__builtin_neon_vqshl_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_n_v, E);
-  case AArch64::BI__builtin_neon_vqshlq_n_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_n_v, E);
-  case AArch64::BI__builtin_neon_vqshlu_n_v:
-  case AArch64::BI__builtin_neon_vqshluq_n_v:
+  case NEON::BI__builtin_neon_vqshlu_n_v:
+  case NEON::BI__builtin_neon_vqshluq_n_v:
     Int = Intrinsic::aarch64_neon_vsqshlu;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n");
-  case AArch64::BI__builtin_neon_vsri_n_v:
-  case AArch64::BI__builtin_neon_vsriq_n_v:
+  case NEON::BI__builtin_neon_vsri_n_v:
+  case NEON::BI__builtin_neon_vsriq_n_v:
     Int = Intrinsic::aarch64_neon_vsri;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsri_n");
-  case AArch64::BI__builtin_neon_vsli_n_v:
-  case AArch64::BI__builtin_neon_vsliq_n_v:
+  case NEON::BI__builtin_neon_vsli_n_v:
+  case NEON::BI__builtin_neon_vsliq_n_v:
     Int = Intrinsic::aarch64_neon_vsli;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsli_n");
-  case AArch64::BI__builtin_neon_vshll_n_v: {
-    llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
-    Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
-    if (usgn)
-      Ops[0] = Builder.CreateZExt(Ops[0], VTy);
-    else
-      Ops[0] = Builder.CreateSExt(Ops[0], VTy);
-    Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
-    return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
-  }
-  case AArch64::BI__builtin_neon_vshrn_n_v: {
-    llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
-    Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
-    Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
-    if (usgn)
-      Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
-    else
-      Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
-    return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
-  }
-  case AArch64::BI__builtin_neon_vqshrun_n_v:
+  case NEON::BI__builtin_neon_vqshrun_n_v:
     Int = Intrinsic::aarch64_neon_vsqshrun;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
-  case AArch64::BI__builtin_neon_vrshrn_n_v:
+  case NEON::BI__builtin_neon_vrshrn_n_v:
     Int = Intrinsic::aarch64_neon_vrshrn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
-  case AArch64::BI__builtin_neon_vqrshrun_n_v:
+  case NEON::BI__builtin_neon_vqrshrun_n_v:
     Int = Intrinsic::aarch64_neon_vsqrshrun;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
-  case AArch64::BI__builtin_neon_vqshrn_n_v:
+  case NEON::BI__builtin_neon_vqshrn_n_v:
     Int = usgn ? Intrinsic::aarch64_neon_vuqshrn
                : Intrinsic::aarch64_neon_vsqshrn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
-  case AArch64::BI__builtin_neon_vqrshrn_n_v:
+  case NEON::BI__builtin_neon_vqrshrn_n_v:
     Int = usgn ? Intrinsic::aarch64_neon_vuqrshrn
                : Intrinsic::aarch64_neon_vsqrshrn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
 
   // Convert
-  case AArch64::BI__builtin_neon_vmovl_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovl_v, E);
-  case AArch64::BI__builtin_neon_vcvt_n_f32_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_f32_v, E);
-  case AArch64::BI__builtin_neon_vcvtq_n_f32_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_f32_v, E);
-  case AArch64::BI__builtin_neon_vcvt_n_f64_v:
-  case AArch64::BI__builtin_neon_vcvtq_n_f64_v: {
+  case NEON::BI__builtin_neon_vcvt_n_f64_v:
+  case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
     llvm::Type *FloatTy =
         GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
     llvm::Type *Tys[2] = { FloatTy, Ty };
@@ -3259,78 +3949,26 @@
     Function *F = CGM.getIntrinsic(Int, Tys);
     return EmitNeonCall(F, Ops, "vcvt_n");
   }
-  case AArch64::BI__builtin_neon_vcvt_n_s32_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_s32_v, E);
-  case AArch64::BI__builtin_neon_vcvtq_n_s32_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_s32_v, E);
-  case AArch64::BI__builtin_neon_vcvt_n_u32_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_u32_v, E);
-  case AArch64::BI__builtin_neon_vcvtq_n_u32_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_u32_v, E);
-  case AArch64::BI__builtin_neon_vcvt_n_s64_v:
-  case AArch64::BI__builtin_neon_vcvt_n_u64_v:
-  case AArch64::BI__builtin_neon_vcvtq_n_s64_v:
-  case AArch64::BI__builtin_neon_vcvtq_n_u64_v: {
-    llvm::Type *FloatTy =
-        GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
-    llvm::Type *Tys[2] = { Ty, FloatTy };
-    Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
-               : Intrinsic::arm_neon_vcvtfp2fxs;
-    Function *F = CGM.getIntrinsic(Int, Tys);
-    return EmitNeonCall(F, Ops, "vcvt_n");
-  }
 
   // Load/Store
-  case AArch64::BI__builtin_neon_vld1_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_v, E);
-  case AArch64::BI__builtin_neon_vld1q_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_v, E);
-  case AArch64::BI__builtin_neon_vld2_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2_v, E);
-  case AArch64::BI__builtin_neon_vld2q_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_v, E);
-  case AArch64::BI__builtin_neon_vld3_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_v, E);
-  case AArch64::BI__builtin_neon_vld3q_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_v, E);
-  case AArch64::BI__builtin_neon_vld4_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_v, E);
-  case AArch64::BI__builtin_neon_vld4q_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_v, E);
-  case AArch64::BI__builtin_neon_vst1_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1_v, E);
-  case AArch64::BI__builtin_neon_vst1q_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1q_v, E);
-  case AArch64::BI__builtin_neon_vst2_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_v, E);
-  case AArch64::BI__builtin_neon_vst2q_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_v, E);
-  case AArch64::BI__builtin_neon_vst3_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_v, E);
-  case AArch64::BI__builtin_neon_vst3q_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_v, E);
-  case AArch64::BI__builtin_neon_vst4_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_v, E);
-  case AArch64::BI__builtin_neon_vst4q_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_v, E);
-  case AArch64::BI__builtin_neon_vld1_x2_v:
-  case AArch64::BI__builtin_neon_vld1q_x2_v:
-  case AArch64::BI__builtin_neon_vld1_x3_v:
-  case AArch64::BI__builtin_neon_vld1q_x3_v:
-  case AArch64::BI__builtin_neon_vld1_x4_v:
-  case AArch64::BI__builtin_neon_vld1q_x4_v: {
+  case NEON::BI__builtin_neon_vld1_x2_v:
+  case NEON::BI__builtin_neon_vld1q_x2_v:
+  case NEON::BI__builtin_neon_vld1_x3_v:
+  case NEON::BI__builtin_neon_vld1q_x3_v:
+  case NEON::BI__builtin_neon_vld1_x4_v:
+  case NEON::BI__builtin_neon_vld1q_x4_v: {
     unsigned Int;
     switch (BuiltinID) {
-    case AArch64::BI__builtin_neon_vld1_x2_v:
-    case AArch64::BI__builtin_neon_vld1q_x2_v:
+    case NEON::BI__builtin_neon_vld1_x2_v:
+    case NEON::BI__builtin_neon_vld1q_x2_v:
       Int = Intrinsic::aarch64_neon_vld1x2;
       break;
-    case AArch64::BI__builtin_neon_vld1_x3_v:
-    case AArch64::BI__builtin_neon_vld1q_x3_v:
+    case NEON::BI__builtin_neon_vld1_x3_v:
+    case NEON::BI__builtin_neon_vld1q_x3_v:
       Int = Intrinsic::aarch64_neon_vld1x3;
       break;
-    case AArch64::BI__builtin_neon_vld1_x4_v:
-    case AArch64::BI__builtin_neon_vld1q_x4_v:
+    case NEON::BI__builtin_neon_vld1_x4_v:
+    case NEON::BI__builtin_neon_vld1q_x4_v:
       Int = Intrinsic::aarch64_neon_vld1x4;
       break;
     }
@@ -3340,32 +3978,32 @@
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     return Builder.CreateStore(Ops[1], Ops[0]);
   }
-  case AArch64::BI__builtin_neon_vst1_x2_v:
-  case AArch64::BI__builtin_neon_vst1q_x2_v:
-  case AArch64::BI__builtin_neon_vst1_x3_v:
-  case AArch64::BI__builtin_neon_vst1q_x3_v:
-  case AArch64::BI__builtin_neon_vst1_x4_v:
-  case AArch64::BI__builtin_neon_vst1q_x4_v: {
+  case NEON::BI__builtin_neon_vst1_x2_v:
+  case NEON::BI__builtin_neon_vst1q_x2_v:
+  case NEON::BI__builtin_neon_vst1_x3_v:
+  case NEON::BI__builtin_neon_vst1q_x3_v:
+  case NEON::BI__builtin_neon_vst1_x4_v:
+  case NEON::BI__builtin_neon_vst1q_x4_v: {
     Ops.push_back(Align);
     unsigned Int;
     switch (BuiltinID) {
-    case AArch64::BI__builtin_neon_vst1_x2_v:
-    case AArch64::BI__builtin_neon_vst1q_x2_v:
+    case NEON::BI__builtin_neon_vst1_x2_v:
+    case NEON::BI__builtin_neon_vst1q_x2_v:
       Int = Intrinsic::aarch64_neon_vst1x2;
       break;
-    case AArch64::BI__builtin_neon_vst1_x3_v:
-    case AArch64::BI__builtin_neon_vst1q_x3_v:
+    case NEON::BI__builtin_neon_vst1_x3_v:
+    case NEON::BI__builtin_neon_vst1q_x3_v:
       Int = Intrinsic::aarch64_neon_vst1x3;
       break;
-    case AArch64::BI__builtin_neon_vst1_x4_v:
-    case AArch64::BI__builtin_neon_vst1q_x4_v:
+    case NEON::BI__builtin_neon_vst1_x4_v:
+    case NEON::BI__builtin_neon_vst1q_x4_v:
       Int = Intrinsic::aarch64_neon_vst1x4;
       break;
     }
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
   }
-  case AArch64::BI__builtin_neon_vld1_lane_v:
-  case AArch64::BI__builtin_neon_vld1q_lane_v: {
+  case NEON::BI__builtin_neon_vld1_lane_v:
+  case NEON::BI__builtin_neon_vld1q_lane_v: {
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ty = llvm::PointerType::getUnqual(VTy->getElementType());
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -3373,20 +4011,8 @@
     Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
     return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
   }
-  case AArch64::BI__builtin_neon_vld2_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_lane_v, E);
-  case AArch64::BI__builtin_neon_vld2q_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_lane_v, E);
-  case AArch64::BI__builtin_neon_vld3_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_lane_v, E);
-  case AArch64::BI__builtin_neon_vld3q_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_lane_v, E);
-  case AArch64::BI__builtin_neon_vld4_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_lane_v, E);
-  case AArch64::BI__builtin_neon_vld4q_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_lane_v, E);
-  case AArch64::BI__builtin_neon_vst1_lane_v:
-  case AArch64::BI__builtin_neon_vst1q_lane_v: {
+  case NEON::BI__builtin_neon_vst1_lane_v:
+  case NEON::BI__builtin_neon_vst1q_lane_v: {
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -3395,39 +4021,23 @@
     St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
     return St;
   }
-  case AArch64::BI__builtin_neon_vst2_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_lane_v, E);
-  case AArch64::BI__builtin_neon_vst2q_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_lane_v, E);
-  case AArch64::BI__builtin_neon_vst3_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_lane_v, E);
-  case AArch64::BI__builtin_neon_vst3q_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_lane_v, E);
-  case AArch64::BI__builtin_neon_vst4_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_lane_v, E);
-  case AArch64::BI__builtin_neon_vst4q_lane_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_lane_v, E);
-  case AArch64::BI__builtin_neon_vld1_dup_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_dup_v, E);
-  case AArch64::BI__builtin_neon_vld1q_dup_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_dup_v, E);
-  case AArch64::BI__builtin_neon_vld2_dup_v:
-  case AArch64::BI__builtin_neon_vld2q_dup_v:
-  case AArch64::BI__builtin_neon_vld3_dup_v:
-  case AArch64::BI__builtin_neon_vld3q_dup_v:
-  case AArch64::BI__builtin_neon_vld4_dup_v:
-  case AArch64::BI__builtin_neon_vld4q_dup_v: {
+  case NEON::BI__builtin_neon_vld2_dup_v:
+  case NEON::BI__builtin_neon_vld2q_dup_v:
+  case NEON::BI__builtin_neon_vld3_dup_v:
+  case NEON::BI__builtin_neon_vld3q_dup_v:
+  case NEON::BI__builtin_neon_vld4_dup_v:
+  case NEON::BI__builtin_neon_vld4q_dup_v: {
     // Handle 64-bit x 1 elements as a special-case.  There is no "dup" needed.
     if (VTy->getElementType()->getPrimitiveSizeInBits() == 64 &&
         VTy->getNumElements() == 1) {
       switch (BuiltinID) {
-      case AArch64::BI__builtin_neon_vld2_dup_v:
+      case NEON::BI__builtin_neon_vld2_dup_v:
         Int = Intrinsic::arm_neon_vld2;
         break;
-      case AArch64::BI__builtin_neon_vld3_dup_v:
+      case NEON::BI__builtin_neon_vld3_dup_v:
         Int = Intrinsic::arm_neon_vld3;
         break;
-      case AArch64::BI__builtin_neon_vld4_dup_v:
+      case NEON::BI__builtin_neon_vld4_dup_v:
         Int = Intrinsic::arm_neon_vld4;
         break;
       default:
@@ -3440,16 +4050,16 @@
       return Builder.CreateStore(Ops[1], Ops[0]);
     }
     switch (BuiltinID) {
-    case AArch64::BI__builtin_neon_vld2_dup_v:
-    case AArch64::BI__builtin_neon_vld2q_dup_v:
+    case NEON::BI__builtin_neon_vld2_dup_v:
+    case NEON::BI__builtin_neon_vld2q_dup_v:
       Int = Intrinsic::arm_neon_vld2lane;
       break;
-    case AArch64::BI__builtin_neon_vld3_dup_v:
-    case AArch64::BI__builtin_neon_vld3q_dup_v:
+    case NEON::BI__builtin_neon_vld3_dup_v:
+    case NEON::BI__builtin_neon_vld3q_dup_v:
       Int = Intrinsic::arm_neon_vld3lane;
       break;
-    case AArch64::BI__builtin_neon_vld4_dup_v:
-    case AArch64::BI__builtin_neon_vld4q_dup_v:
+    case NEON::BI__builtin_neon_vld4_dup_v:
+    case NEON::BI__builtin_neon_vld4q_dup_v:
       Int = Intrinsic::arm_neon_vld4lane;
       break;
     }
@@ -3478,42 +4088,11 @@
     return Builder.CreateStore(Ops[1], Ops[0]);
   }
 
-  // Crypto
-  case AArch64::BI__builtin_neon_vaeseq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aese, Ty),
-                        Ops, "aese");
-  case AArch64::BI__builtin_neon_vaesdq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesd, Ty),
-                        Ops, "aesd");
-  case AArch64::BI__builtin_neon_vaesmcq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesmc, Ty),
-                        Ops, "aesmc");
-  case AArch64::BI__builtin_neon_vaesimcq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesimc, Ty),
-                        Ops, "aesimc");
-  case AArch64::BI__builtin_neon_vsha1su1q_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su1, Ty),
-                        Ops, "sha1su1");
-  case AArch64::BI__builtin_neon_vsha256su0q_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su0, Ty),
-                        Ops, "sha256su0");
-  case AArch64::BI__builtin_neon_vsha1su0q_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su0, Ty),
-                        Ops, "sha1su0");
-  case AArch64::BI__builtin_neon_vsha256hq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h, Ty),
-                        Ops, "sha256h");
-  case AArch64::BI__builtin_neon_vsha256h2q_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h2, Ty),
-                        Ops, "sha256h2");
-  case AArch64::BI__builtin_neon_vsha256su1q_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su1, Ty),
-                        Ops, "sha256su1");
-  case AArch64::BI__builtin_neon_vmul_lane_v:
-  case AArch64::BI__builtin_neon_vmul_laneq_v: {
+  case NEON::BI__builtin_neon_vmul_lane_v:
+  case NEON::BI__builtin_neon_vmul_laneq_v: {
     // v1f64 vmul_lane should be mapped to Neon scalar mul lane
     bool Quad = false;
-    if (BuiltinID == AArch64::BI__builtin_neon_vmul_laneq_v)
+    if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
       Quad = true;
     Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
     llvm::Type *VTy = GetNeonType(this,
@@ -3525,7 +4104,7 @@
   }
 
   // AArch64-only builtins
-  case AArch64::BI__builtin_neon_vfmaq_laneq_v: {
+  case NEON::BI__builtin_neon_vfmaq_laneq_v: {
     Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -3534,7 +4113,7 @@
     Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
     return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
   }
-  case AArch64::BI__builtin_neon_vfmaq_lane_v: {
+  case NEON::BI__builtin_neon_vfmaq_lane_v: {
     Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -3549,7 +4128,7 @@
 
     return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
   }
-  case AArch64::BI__builtin_neon_vfma_lane_v: {
+  case NEON::BI__builtin_neon_vfma_lane_v: {
     llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
     // v1f64 fma should be mapped to Neon scalar f64 fma
     if (VTy && VTy->getElementType() == DoubleTy) {
@@ -3571,7 +4150,7 @@
     Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
     return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
   }
-  case AArch64::BI__builtin_neon_vfma_laneq_v: {
+  case NEON::BI__builtin_neon_vfma_laneq_v: {
     llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
     // v1f64 fma should be mapped to Neon scalar f64 fma
     if (VTy && VTy->getElementType() == DoubleTy) {
@@ -3598,8 +4177,8 @@
 
     return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
   }
-  case AArch64::BI__builtin_neon_vfms_v:
-  case AArch64::BI__builtin_neon_vfmsq_v: {
+  case NEON::BI__builtin_neon_vfms_v:
+  case NEON::BI__builtin_neon_vfmsq_v: {
     Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -3610,314 +4189,136 @@
     // AArch64 intrinsic has it first.
     return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
   }
-  case AArch64::BI__builtin_neon_vmaxnm_v:
-  case AArch64::BI__builtin_neon_vmaxnmq_v: {
+  case NEON::BI__builtin_neon_vmaxnm_v:
+  case NEON::BI__builtin_neon_vmaxnmq_v: {
     Int = Intrinsic::aarch64_neon_vmaxnm;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
   }
-  case AArch64::BI__builtin_neon_vminnm_v:
-  case AArch64::BI__builtin_neon_vminnmq_v: {
+  case NEON::BI__builtin_neon_vminnm_v:
+  case NEON::BI__builtin_neon_vminnmq_v: {
     Int = Intrinsic::aarch64_neon_vminnm;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
   }
-  case AArch64::BI__builtin_neon_vpmaxnm_v:
-  case AArch64::BI__builtin_neon_vpmaxnmq_v: {
+  case NEON::BI__builtin_neon_vpmaxnm_v:
+  case NEON::BI__builtin_neon_vpmaxnmq_v: {
     Int = Intrinsic::aarch64_neon_vpmaxnm;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
   }
-  case AArch64::BI__builtin_neon_vpminnm_v:
-  case AArch64::BI__builtin_neon_vpminnmq_v: {
+  case NEON::BI__builtin_neon_vpminnm_v:
+  case NEON::BI__builtin_neon_vpminnmq_v: {
     Int = Intrinsic::aarch64_neon_vpminnm;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
   }
-  case AArch64::BI__builtin_neon_vpmaxq_v: {
+  case NEON::BI__builtin_neon_vpmaxq_v: {
     Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
   }
-  case AArch64::BI__builtin_neon_vpminq_v: {
+  case NEON::BI__builtin_neon_vpminq_v: {
     Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
   }
-  case AArch64::BI__builtin_neon_vpaddq_v: {
-    Int = Intrinsic::arm_neon_vpadd;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpadd");
-  }
-  case AArch64::BI__builtin_neon_vmulx_v:
-  case AArch64::BI__builtin_neon_vmulxq_v: {
+  case NEON::BI__builtin_neon_vmulx_v:
+  case NEON::BI__builtin_neon_vmulxq_v: {
     Int = Intrinsic::aarch64_neon_vmulx;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
   }
-  case AArch64::BI__builtin_neon_vpaddl_v:
-  case AArch64::BI__builtin_neon_vpaddlq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpaddl_v, E);
-  case AArch64::BI__builtin_neon_vpadal_v:
-  case AArch64::BI__builtin_neon_vpadalq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadal_v, E);
-  case AArch64::BI__builtin_neon_vqabs_v:
-  case AArch64::BI__builtin_neon_vqabsq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqabs_v, E);
-  case AArch64::BI__builtin_neon_vqneg_v:
-  case AArch64::BI__builtin_neon_vqnegq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqneg_v, E);
-  case AArch64::BI__builtin_neon_vabs_v:
-  case AArch64::BI__builtin_neon_vabsq_v: {
-    if (VTy->getElementType()->isFloatingPointTy()) {
-      return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
-    }
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabs_v, E);
-  }
-  case AArch64::BI__builtin_neon_vsqadd_v:
-  case AArch64::BI__builtin_neon_vsqaddq_v: {
+  case NEON::BI__builtin_neon_vsqadd_v:
+  case NEON::BI__builtin_neon_vsqaddq_v: {
     Int = Intrinsic::aarch64_neon_usqadd;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
   }
-  case AArch64::BI__builtin_neon_vuqadd_v:
-  case AArch64::BI__builtin_neon_vuqaddq_v: {
+  case NEON::BI__builtin_neon_vuqadd_v:
+  case NEON::BI__builtin_neon_vuqaddq_v: {
     Int = Intrinsic::aarch64_neon_suqadd;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
   }
-  case AArch64::BI__builtin_neon_vcls_v:
-  case AArch64::BI__builtin_neon_vclsq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcls_v, E);
-  case AArch64::BI__builtin_neon_vclz_v:
-  case AArch64::BI__builtin_neon_vclzq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vclz_v, E);
-  case AArch64::BI__builtin_neon_vcnt_v:
-  case AArch64::BI__builtin_neon_vcntq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcnt_v, E);
-  case AArch64::BI__builtin_neon_vrbit_v:
-  case AArch64::BI__builtin_neon_vrbitq_v:
+  case NEON::BI__builtin_neon_vrbit_v:
+  case NEON::BI__builtin_neon_vrbitq_v:
     Int = Intrinsic::aarch64_neon_rbit;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
-  case AArch64::BI__builtin_neon_vmovn_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovn_v, E);
-  case AArch64::BI__builtin_neon_vqmovun_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovun_v, E);
-  case AArch64::BI__builtin_neon_vqmovn_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovn_v, E);
-  case AArch64::BI__builtin_neon_vcvt_f16_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f16_v, E);
-  case AArch64::BI__builtin_neon_vcvt_f32_f16:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f32_f16, E);
-  case AArch64::BI__builtin_neon_vcvt_f32_f64: {
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, false));
+  case NEON::BI__builtin_neon_vcvt_f32_f64: {
+    NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
+    Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
     return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
   }
-  case AArch64::BI__builtin_neon_vcvtx_f32_v: {
+  case NEON::BI__builtin_neon_vcvtx_f32_v: {
     llvm::Type *EltTy = FloatTy;
     llvm::Type *ResTy = llvm::VectorType::get(EltTy, 2);
     llvm::Type *Tys[2] = { ResTy, Ty };
-    Int = Intrinsic::aarch64_neon_fcvtxn;
+    Int = Intrinsic::aarch64_neon_vcvtxn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtx_f32_f64");
   }
-  case AArch64::BI__builtin_neon_vcvt_f64_f32: {
+  case NEON::BI__builtin_neon_vcvt_f64_f32: {
     llvm::Type *OpTy =
         GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, false));
     Ops[0] = Builder.CreateBitCast(Ops[0], OpTy);
     return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
   }
-  case AArch64::BI__builtin_neon_vcvt_f64_v:
-  case AArch64::BI__builtin_neon_vcvtq_f64_v: {
+  case NEON::BI__builtin_neon_vcvt_f64_v:
+  case NEON::BI__builtin_neon_vcvtq_f64_v: {
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
     return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
                 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
   }
-  case AArch64::BI__builtin_neon_vrndn_v:
-  case AArch64::BI__builtin_neon_vrndnq_v: {
+  case NEON::BI__builtin_neon_vrndn_v:
+  case NEON::BI__builtin_neon_vrndnq_v: {
     Int = Intrinsic::aarch64_neon_frintn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
   }
-  case AArch64::BI__builtin_neon_vrnda_v:
-  case AArch64::BI__builtin_neon_vrndaq_v: {
+  case NEON::BI__builtin_neon_vrnda_v:
+  case NEON::BI__builtin_neon_vrndaq_v: {
     Int = Intrinsic::round;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
   }
-  case AArch64::BI__builtin_neon_vrndp_v:
-  case AArch64::BI__builtin_neon_vrndpq_v: {
+  case NEON::BI__builtin_neon_vrndp_v:
+  case NEON::BI__builtin_neon_vrndpq_v: {
     Int = Intrinsic::ceil;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
   }
-  case AArch64::BI__builtin_neon_vrndm_v:
-  case AArch64::BI__builtin_neon_vrndmq_v: {
+  case NEON::BI__builtin_neon_vrndm_v:
+  case NEON::BI__builtin_neon_vrndmq_v: {
     Int = Intrinsic::floor;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
   }
-  case AArch64::BI__builtin_neon_vrndx_v:
-  case AArch64::BI__builtin_neon_vrndxq_v: {
+  case NEON::BI__builtin_neon_vrndx_v:
+  case NEON::BI__builtin_neon_vrndxq_v: {
     Int = Intrinsic::rint;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
   }
-  case AArch64::BI__builtin_neon_vrnd_v:
-  case AArch64::BI__builtin_neon_vrndq_v: {
+  case NEON::BI__builtin_neon_vrnd_v:
+  case NEON::BI__builtin_neon_vrndq_v: {
     Int = Intrinsic::trunc;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd");
   }
-  case AArch64::BI__builtin_neon_vrndi_v:
-  case AArch64::BI__builtin_neon_vrndiq_v: {
+  case NEON::BI__builtin_neon_vrndi_v:
+  case NEON::BI__builtin_neon_vrndiq_v: {
     Int = Intrinsic::nearbyint;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
   }
-  case AArch64::BI__builtin_neon_vcvt_s32_v:
-  case AArch64::BI__builtin_neon_vcvt_u32_v:
-  case AArch64::BI__builtin_neon_vcvtq_s32_v:
-  case AArch64::BI__builtin_neon_vcvtq_u32_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_u32_v, E);
-  case AArch64::BI__builtin_neon_vcvt_s64_v:
-  case AArch64::BI__builtin_neon_vcvt_u64_v:
-  case AArch64::BI__builtin_neon_vcvtq_s64_v:
-  case AArch64::BI__builtin_neon_vcvtq_u64_v: {
-    llvm::Type *DoubleTy =
-        GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
-    Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
-    return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
-                : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
-  }
-  case AArch64::BI__builtin_neon_vcvtn_s32_v:
-  case AArch64::BI__builtin_neon_vcvtnq_s32_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtns;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtns_f32");
-  }
-  case AArch64::BI__builtin_neon_vcvtn_s64_v:
-  case AArch64::BI__builtin_neon_vcvtnq_s64_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtns;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtns_f64");
-  }
-  case AArch64::BI__builtin_neon_vcvtn_u32_v:
-  case AArch64::BI__builtin_neon_vcvtnq_u32_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtnu;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtnu_f32");
-  }
-  case AArch64::BI__builtin_neon_vcvtn_u64_v:
-  case AArch64::BI__builtin_neon_vcvtnq_u64_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtnu;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtnu_f64");
-  }
-  case AArch64::BI__builtin_neon_vcvtp_s32_v:
-  case AArch64::BI__builtin_neon_vcvtpq_s32_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtps;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtps_f32");
-  }
-  case AArch64::BI__builtin_neon_vcvtp_s64_v:
-  case AArch64::BI__builtin_neon_vcvtpq_s64_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtps;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtps_f64");
-  }
-  case AArch64::BI__builtin_neon_vcvtp_u32_v:
-  case AArch64::BI__builtin_neon_vcvtpq_u32_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtpu;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtpu_f32");
-  }
-  case AArch64::BI__builtin_neon_vcvtp_u64_v:
-  case AArch64::BI__builtin_neon_vcvtpq_u64_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtpu;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtpu_f64");
-  }
-  case AArch64::BI__builtin_neon_vcvtm_s32_v:
-  case AArch64::BI__builtin_neon_vcvtmq_s32_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtms;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtms_f32");
-  }
-  case AArch64::BI__builtin_neon_vcvtm_s64_v:
-  case AArch64::BI__builtin_neon_vcvtmq_s64_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtms;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtms_f64");
-  }
-  case AArch64::BI__builtin_neon_vcvtm_u32_v:
-  case AArch64::BI__builtin_neon_vcvtmq_u32_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtmu;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtmu_f32");
-  }
-  case AArch64::BI__builtin_neon_vcvtm_u64_v:
-  case AArch64::BI__builtin_neon_vcvtmq_u64_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtmu;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtmu_f64");
-  }
-  case AArch64::BI__builtin_neon_vcvta_s32_v:
-  case AArch64::BI__builtin_neon_vcvtaq_s32_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtas;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtas_f32");
-  }
-  case AArch64::BI__builtin_neon_vcvta_s64_v:
-  case AArch64::BI__builtin_neon_vcvtaq_s64_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtas;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtas_f64");
-  }
-  case AArch64::BI__builtin_neon_vcvta_u32_v:
-  case AArch64::BI__builtin_neon_vcvtaq_u32_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtau;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtau_f32");
-  }
-  case AArch64::BI__builtin_neon_vcvta_u64_v:
-  case AArch64::BI__builtin_neon_vcvtaq_u64_v: {
-    llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
-    llvm::Type *Tys[2] = { Ty, OpTy };
-    Int = Intrinsic::aarch64_neon_fcvtau;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtau_f64");
-  }
-  case AArch64::BI__builtin_neon_vrecpe_v:
-  case AArch64::BI__builtin_neon_vrecpeq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpe_v, E);
-  case AArch64::BI__builtin_neon_vrsqrte_v:
-  case AArch64::BI__builtin_neon_vrsqrteq_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrte_v, E);
-  case AArch64::BI__builtin_neon_vsqrt_v:
-  case AArch64::BI__builtin_neon_vsqrtq_v: {
+  case NEON::BI__builtin_neon_vsqrt_v:
+  case NEON::BI__builtin_neon_vsqrtq_v: {
     Int = Intrinsic::sqrt;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
   }
-  case AArch64::BI__builtin_neon_vcvt_f32_v:
-  case AArch64::BI__builtin_neon_vcvtq_f32_v:
-    return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f32_v, E);
-  case AArch64::BI__builtin_neon_vceqz_v:
-  case AArch64::BI__builtin_neon_vceqzq_v:
+  case NEON::BI__builtin_neon_vceqz_v:
+  case NEON::BI__builtin_neon_vceqzq_v:
     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
                                          ICmpInst::ICMP_EQ, "vceqz");
-  case AArch64::BI__builtin_neon_vcgez_v:
-  case AArch64::BI__builtin_neon_vcgezq_v:
+  case NEON::BI__builtin_neon_vcgez_v:
+  case NEON::BI__builtin_neon_vcgezq_v:
     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
                                          ICmpInst::ICMP_SGE, "vcgez");
-  case AArch64::BI__builtin_neon_vclez_v:
-  case AArch64::BI__builtin_neon_vclezq_v:
+  case NEON::BI__builtin_neon_vclez_v:
+  case NEON::BI__builtin_neon_vclezq_v:
     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
                                          ICmpInst::ICMP_SLE, "vclez");
-  case AArch64::BI__builtin_neon_vcgtz_v:
-  case AArch64::BI__builtin_neon_vcgtzq_v:
+  case NEON::BI__builtin_neon_vcgtz_v:
+  case NEON::BI__builtin_neon_vcgtzq_v:
     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
                                          ICmpInst::ICMP_SGT, "vcgtz");
-  case AArch64::BI__builtin_neon_vcltz_v:
-  case AArch64::BI__builtin_neon_vcltzq_v:
+  case NEON::BI__builtin_neon_vcltz_v:
+  case NEON::BI__builtin_neon_vcltzq_v:
     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
                                          ICmpInst::ICMP_SLT, "vcltz");
   }
@@ -4074,28 +4475,28 @@
   for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
     if (i == 0) {
       switch (BuiltinID) {
-      case ARM::BI__builtin_neon_vld1_v:
-      case ARM::BI__builtin_neon_vld1q_v:
-      case ARM::BI__builtin_neon_vld1q_lane_v:
-      case ARM::BI__builtin_neon_vld1_lane_v:
-      case ARM::BI__builtin_neon_vld1_dup_v:
-      case ARM::BI__builtin_neon_vld1q_dup_v:
-      case ARM::BI__builtin_neon_vst1_v:
-      case ARM::BI__builtin_neon_vst1q_v:
-      case ARM::BI__builtin_neon_vst1q_lane_v:
-      case ARM::BI__builtin_neon_vst1_lane_v:
-      case ARM::BI__builtin_neon_vst2_v:
-      case ARM::BI__builtin_neon_vst2q_v:
-      case ARM::BI__builtin_neon_vst2_lane_v:
-      case ARM::BI__builtin_neon_vst2q_lane_v:
-      case ARM::BI__builtin_neon_vst3_v:
-      case ARM::BI__builtin_neon_vst3q_v:
-      case ARM::BI__builtin_neon_vst3_lane_v:
-      case ARM::BI__builtin_neon_vst3q_lane_v:
-      case ARM::BI__builtin_neon_vst4_v:
-      case ARM::BI__builtin_neon_vst4q_v:
-      case ARM::BI__builtin_neon_vst4_lane_v:
-      case ARM::BI__builtin_neon_vst4q_lane_v:
+      case NEON::BI__builtin_neon_vld1_v:
+      case NEON::BI__builtin_neon_vld1q_v:
+      case NEON::BI__builtin_neon_vld1q_lane_v:
+      case NEON::BI__builtin_neon_vld1_lane_v:
+      case NEON::BI__builtin_neon_vld1_dup_v:
+      case NEON::BI__builtin_neon_vld1q_dup_v:
+      case NEON::BI__builtin_neon_vst1_v:
+      case NEON::BI__builtin_neon_vst1q_v:
+      case NEON::BI__builtin_neon_vst1q_lane_v:
+      case NEON::BI__builtin_neon_vst1_lane_v:
+      case NEON::BI__builtin_neon_vst2_v:
+      case NEON::BI__builtin_neon_vst2q_v:
+      case NEON::BI__builtin_neon_vst2_lane_v:
+      case NEON::BI__builtin_neon_vst2q_lane_v:
+      case NEON::BI__builtin_neon_vst3_v:
+      case NEON::BI__builtin_neon_vst3q_v:
+      case NEON::BI__builtin_neon_vst3_lane_v:
+      case NEON::BI__builtin_neon_vst3q_lane_v:
+      case NEON::BI__builtin_neon_vst4_v:
+      case NEON::BI__builtin_neon_vst4q_v:
+      case NEON::BI__builtin_neon_vst4_lane_v:
+      case NEON::BI__builtin_neon_vst4q_lane_v:
         // Get the alignment for the argument in addition to the value;
         // we'll use it later.
         std::pair<llvm::Value*, unsigned> Src =
@@ -4107,21 +4508,21 @@
     }
     if (i == 1) {
       switch (BuiltinID) {
-      case ARM::BI__builtin_neon_vld2_v:
-      case ARM::BI__builtin_neon_vld2q_v:
-      case ARM::BI__builtin_neon_vld3_v:
-      case ARM::BI__builtin_neon_vld3q_v:
-      case ARM::BI__builtin_neon_vld4_v:
-      case ARM::BI__builtin_neon_vld4q_v:
-      case ARM::BI__builtin_neon_vld2_lane_v:
-      case ARM::BI__builtin_neon_vld2q_lane_v:
-      case ARM::BI__builtin_neon_vld3_lane_v:
-      case ARM::BI__builtin_neon_vld3q_lane_v:
-      case ARM::BI__builtin_neon_vld4_lane_v:
-      case ARM::BI__builtin_neon_vld4q_lane_v:
-      case ARM::BI__builtin_neon_vld2_dup_v:
-      case ARM::BI__builtin_neon_vld3_dup_v:
-      case ARM::BI__builtin_neon_vld4_dup_v:
+      case NEON::BI__builtin_neon_vld2_v:
+      case NEON::BI__builtin_neon_vld2q_v:
+      case NEON::BI__builtin_neon_vld3_v:
+      case NEON::BI__builtin_neon_vld3q_v:
+      case NEON::BI__builtin_neon_vld4_v:
+      case NEON::BI__builtin_neon_vld4q_v:
+      case NEON::BI__builtin_neon_vld2_lane_v:
+      case NEON::BI__builtin_neon_vld2q_lane_v:
+      case NEON::BI__builtin_neon_vld3_lane_v:
+      case NEON::BI__builtin_neon_vld3q_lane_v:
+      case NEON::BI__builtin_neon_vld4_lane_v:
+      case NEON::BI__builtin_neon_vld4q_lane_v:
+      case NEON::BI__builtin_neon_vld2_dup_v:
+      case NEON::BI__builtin_neon_vld3_dup_v:
+      case NEON::BI__builtin_neon_vld4_dup_v:
         // Get the alignment for the argument in addition to the value;
         // we'll use it later.
         std::pair<llvm::Value*, unsigned> Src =
@@ -4134,34 +4535,52 @@
     Ops.push_back(EmitScalarExpr(E->getArg(i)));
   }
 
-  // vget_lane and vset_lane are not overloaded and do not have an extra
-  // argument that specifies the vector type.
   switch (BuiltinID) {
   default: break;
-  case ARM::BI__builtin_neon_vget_lane_i8:
-  case ARM::BI__builtin_neon_vget_lane_i16:
-  case ARM::BI__builtin_neon_vget_lane_i32:
-  case ARM::BI__builtin_neon_vget_lane_i64:
-  case ARM::BI__builtin_neon_vget_lane_f32:
-  case ARM::BI__builtin_neon_vgetq_lane_i8:
-  case ARM::BI__builtin_neon_vgetq_lane_i16:
-  case ARM::BI__builtin_neon_vgetq_lane_i32:
-  case ARM::BI__builtin_neon_vgetq_lane_i64:
-  case ARM::BI__builtin_neon_vgetq_lane_f32:
+  // vget_lane and vset_lane are not overloaded and do not have an extra
+  // argument that specifies the vector type.
+  case NEON::BI__builtin_neon_vget_lane_i8:
+  case NEON::BI__builtin_neon_vget_lane_i16:
+  case NEON::BI__builtin_neon_vget_lane_i32:
+  case NEON::BI__builtin_neon_vget_lane_i64:
+  case NEON::BI__builtin_neon_vget_lane_f32:
+  case NEON::BI__builtin_neon_vgetq_lane_i8:
+  case NEON::BI__builtin_neon_vgetq_lane_i16:
+  case NEON::BI__builtin_neon_vgetq_lane_i32:
+  case NEON::BI__builtin_neon_vgetq_lane_i64:
+  case NEON::BI__builtin_neon_vgetq_lane_f32:
     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
                                         "vget_lane");
-  case ARM::BI__builtin_neon_vset_lane_i8:
-  case ARM::BI__builtin_neon_vset_lane_i16:
-  case ARM::BI__builtin_neon_vset_lane_i32:
-  case ARM::BI__builtin_neon_vset_lane_i64:
-  case ARM::BI__builtin_neon_vset_lane_f32:
-  case ARM::BI__builtin_neon_vsetq_lane_i8:
-  case ARM::BI__builtin_neon_vsetq_lane_i16:
-  case ARM::BI__builtin_neon_vsetq_lane_i32:
-  case ARM::BI__builtin_neon_vsetq_lane_i64:
-  case ARM::BI__builtin_neon_vsetq_lane_f32:
+  case NEON::BI__builtin_neon_vset_lane_i8:
+  case NEON::BI__builtin_neon_vset_lane_i16:
+  case NEON::BI__builtin_neon_vset_lane_i32:
+  case NEON::BI__builtin_neon_vset_lane_i64:
+  case NEON::BI__builtin_neon_vset_lane_f32:
+  case NEON::BI__builtin_neon_vsetq_lane_i8:
+  case NEON::BI__builtin_neon_vsetq_lane_i16:
+  case NEON::BI__builtin_neon_vsetq_lane_i32:
+  case NEON::BI__builtin_neon_vsetq_lane_i64:
+  case NEON::BI__builtin_neon_vsetq_lane_f32:
     Ops.push_back(EmitScalarExpr(E->getArg(2)));
     return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+
+  // Non-polymorphic crypto instructions also not overloaded
+  case NEON::BI__builtin_neon_vsha1h_u32:
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
+                        "vsha1h");
+  case NEON::BI__builtin_neon_vsha1cq_u32:
+    Ops.push_back(EmitScalarExpr(E->getArg(2)));
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
+                        "vsha1h");
+  case NEON::BI__builtin_neon_vsha1pq_u32:
+    Ops.push_back(EmitScalarExpr(E->getArg(2)));
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
+                        "vsha1h");
+  case NEON::BI__builtin_neon_vsha1mq_u32:
+    Ops.push_back(EmitScalarExpr(E->getArg(2)));
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
+                        "vsha1h");
   }
 
   // Get the last argument, which specifies the vector type.
@@ -4191,7 +4610,6 @@
   // Determine the type of this overloaded NEON intrinsic.
   NeonTypeFlags Type(Result.getZExtValue());
   bool usgn = Type.isUnsigned();
-  bool quad = Type.isQuad();
   bool rightShift = false;
 
   llvm::VectorType *VTy = GetNeonType(this, Type);
@@ -4199,158 +4617,20 @@
   if (!Ty)
     return 0;
 
+  // Many NEON builtins have identical semantics and uses in ARM and
+  // AArch64. Emit these in a single function.
+  llvm::ArrayRef<NeonIntrinsicInfo> IntrinsicMap(ARMSIMDIntrinsicMap);
+  const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+      IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
+  if (Builtin)
+    return EmitCommonNeonBuiltinExpr(
+        Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
+        Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
+
   unsigned Int;
   switch (BuiltinID) {
   default: return 0;
-  case ARM::BI__builtin_neon_vbsl_v:
-  case ARM::BI__builtin_neon_vbslq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty),
-                        Ops, "vbsl");
-  case ARM::BI__builtin_neon_vabd_v:
-  case ARM::BI__builtin_neon_vabdq_v:
-    Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
-  case ARM::BI__builtin_neon_vabs_v:
-  case ARM::BI__builtin_neon_vabsq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
-                        Ops, "vabs");
-  case ARM::BI__builtin_neon_vaddhn_v: {
-    llvm::VectorType *SrcTy =
-        llvm::VectorType::getExtendedElementVectorType(VTy);
-
-    // %sum = add <4 x i32> %lhs, %rhs
-    Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
-    Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
-    Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
-
-    // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
-    Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
-                                       SrcTy->getScalarSizeInBits() / 2);
-    ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
-    Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
-
-    // %res = trunc <4 x i32> %high to <4 x i16>
-    return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
-  }
-  case ARM::BI__builtin_neon_vcale_v:
-    std::swap(Ops[0], Ops[1]);
-  case ARM::BI__builtin_neon_vcage_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged);
-    return EmitNeonCall(F, Ops, "vcage");
-  }
-  case ARM::BI__builtin_neon_vcaleq_v:
-    std::swap(Ops[0], Ops[1]);
-  case ARM::BI__builtin_neon_vcageq_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
-    return EmitNeonCall(F, Ops, "vcage");
-  }
-  case ARM::BI__builtin_neon_vcalt_v:
-    std::swap(Ops[0], Ops[1]);
-  case ARM::BI__builtin_neon_vcagt_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd);
-    return EmitNeonCall(F, Ops, "vcagt");
-  }
-  case ARM::BI__builtin_neon_vcaltq_v:
-    std::swap(Ops[0], Ops[1]);
-  case ARM::BI__builtin_neon_vcagtq_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
-    return EmitNeonCall(F, Ops, "vcagt");
-  }
-  case ARM::BI__builtin_neon_vcls_v:
-  case ARM::BI__builtin_neon_vclsq_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty);
-    return EmitNeonCall(F, Ops, "vcls");
-  }
-  case ARM::BI__builtin_neon_vclz_v:
-  case ARM::BI__builtin_neon_vclzq_v: {
-    // Generate target-independent intrinsic; also need to add second argument
-    // for whether or not clz of zero is undefined; on ARM it isn't.
-    Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty);
-    Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
-    return EmitNeonCall(F, Ops, "vclz");
-  }
-  case ARM::BI__builtin_neon_vcnt_v:
-  case ARM::BI__builtin_neon_vcntq_v: {
-    // generate target-independent intrinsic
-    Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty);
-    return EmitNeonCall(F, Ops, "vctpop");
-  }
-  case ARM::BI__builtin_neon_vcvt_f16_v: {
-    assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
-           "unexpected vcvt_f16_v builtin");
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
-    return EmitNeonCall(F, Ops, "vcvt");
-  }
-  case ARM::BI__builtin_neon_vcvt_f32_f16: {
-    assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
-           "unexpected vcvt_f32_f16 builtin");
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
-    return EmitNeonCall(F, Ops, "vcvt");
-  }
-  case ARM::BI__builtin_neon_vcvt_f32_v:
-  case ARM::BI__builtin_neon_vcvtq_f32_v:
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
-    return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
-                : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
-  case ARM::BI__builtin_neon_vcvt_s32_v:
-  case ARM::BI__builtin_neon_vcvt_u32_v:
-  case ARM::BI__builtin_neon_vcvtq_s32_v:
-  case ARM::BI__builtin_neon_vcvtq_u32_v: {
-    llvm::Type *FloatTy =
-      GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
-    Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
-    return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
-                : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
-  }
-  case ARM::BI__builtin_neon_vcvt_n_f32_v:
-  case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
-    llvm::Type *FloatTy =
-      GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
-    llvm::Type *Tys[2] = { FloatTy, Ty };
-    Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
-               : Intrinsic::arm_neon_vcvtfxs2fp;
-    Function *F = CGM.getIntrinsic(Int, Tys);
-    return EmitNeonCall(F, Ops, "vcvt_n");
-  }
-  case ARM::BI__builtin_neon_vcvt_n_s32_v:
-  case ARM::BI__builtin_neon_vcvt_n_u32_v:
-  case ARM::BI__builtin_neon_vcvtq_n_s32_v:
-  case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
-    llvm::Type *FloatTy =
-      GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
-    llvm::Type *Tys[2] = { Ty, FloatTy };
-    Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
-               : Intrinsic::arm_neon_vcvtfp2fxs;
-    Function *F = CGM.getIntrinsic(Int, Tys);
-    return EmitNeonCall(F, Ops, "vcvt_n");
-  }
-  case ARM::BI__builtin_neon_vext_v:
-  case ARM::BI__builtin_neon_vextq_v: {
-    int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
-    SmallVector<Constant*, 16> Indices;
-    for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
-      Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
-
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
-    Value *SV = llvm::ConstantVector::get(Indices);
-    return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
-  }
-  case ARM::BI__builtin_neon_vhadd_v:
-  case ARM::BI__builtin_neon_vhaddq_v:
-    Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd");
-  case ARM::BI__builtin_neon_vhsub_v:
-  case ARM::BI__builtin_neon_vhsubq_v:
-    Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
-  case ARM::BI__builtin_neon_vld1_v:
-  case ARM::BI__builtin_neon_vld1q_v:
-    Ops.push_back(Align);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
-                        Ops, "vld1");
-  case ARM::BI__builtin_neon_vld1q_lane_v:
+  case NEON::BI__builtin_neon_vld1q_lane_v:
     // Handle 64-bit integer elements as a special case.  Use shuffles of
     // one-element vectors to avoid poor code for i64 in the backend.
     if (VTy->getElementType()->isIntegerTy(64)) {
@@ -4371,7 +4651,7 @@
       return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
     }
     // fall through
-  case ARM::BI__builtin_neon_vld1_lane_v: {
+  case NEON::BI__builtin_neon_vld1_lane_v: {
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ty = llvm::PointerType::getUnqual(VTy->getElementType());
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -4379,90 +4659,19 @@
     Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
     return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
   }
-  case ARM::BI__builtin_neon_vld1_dup_v:
-  case ARM::BI__builtin_neon_vld1q_dup_v: {
-    Value *V = UndefValue::get(Ty);
-    Ty = llvm::PointerType::getUnqual(VTy->getElementType());
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    LoadInst *Ld = Builder.CreateLoad(Ops[0]);
-    Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
-    llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
-    Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
-    return EmitNeonSplat(Ops[0], CI);
-  }
-  case ARM::BI__builtin_neon_vld2_v:
-  case ARM::BI__builtin_neon_vld2q_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
-    Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
-    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    return Builder.CreateStore(Ops[1], Ops[0]);
-  }
-  case ARM::BI__builtin_neon_vld3_v:
-  case ARM::BI__builtin_neon_vld3q_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
-    Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
-    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    return Builder.CreateStore(Ops[1], Ops[0]);
-  }
-  case ARM::BI__builtin_neon_vld4_v:
-  case ARM::BI__builtin_neon_vld4q_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
-    Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
-    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    return Builder.CreateStore(Ops[1], Ops[0]);
-  }
-  case ARM::BI__builtin_neon_vld2_lane_v:
-  case ARM::BI__builtin_neon_vld2q_lane_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
-    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
-    Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
-    Ops.push_back(Align);
-    Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
-    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    return Builder.CreateStore(Ops[1], Ops[0]);
-  }
-  case ARM::BI__builtin_neon_vld3_lane_v:
-  case ARM::BI__builtin_neon_vld3q_lane_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty);
-    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
-    Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
-    Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
-    Ops.push_back(Align);
-    Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
-    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    return Builder.CreateStore(Ops[1], Ops[0]);
-  }
-  case ARM::BI__builtin_neon_vld4_lane_v:
-  case ARM::BI__builtin_neon_vld4q_lane_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty);
-    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
-    Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
-    Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
-    Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
-    Ops.push_back(Align);
-    Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
-    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    return Builder.CreateStore(Ops[1], Ops[0]);
-  }
-  case ARM::BI__builtin_neon_vld2_dup_v:
-  case ARM::BI__builtin_neon_vld3_dup_v:
-  case ARM::BI__builtin_neon_vld4_dup_v: {
+  case NEON::BI__builtin_neon_vld2_dup_v:
+  case NEON::BI__builtin_neon_vld3_dup_v:
+  case NEON::BI__builtin_neon_vld4_dup_v: {
     // Handle 64-bit elements as a special-case.  There is no "dup" needed.
     if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
       switch (BuiltinID) {
-      case ARM::BI__builtin_neon_vld2_dup_v:
+      case NEON::BI__builtin_neon_vld2_dup_v:
         Int = Intrinsic::arm_neon_vld2;
         break;
-      case ARM::BI__builtin_neon_vld3_dup_v:
+      case NEON::BI__builtin_neon_vld3_dup_v:
         Int = Intrinsic::arm_neon_vld3;
         break;
-      case ARM::BI__builtin_neon_vld4_dup_v:
+      case NEON::BI__builtin_neon_vld4_dup_v:
         Int = Intrinsic::arm_neon_vld4;
         break;
       default: llvm_unreachable("unknown vld_dup intrinsic?");
@@ -4474,13 +4683,13 @@
       return Builder.CreateStore(Ops[1], Ops[0]);
     }
     switch (BuiltinID) {
-    case ARM::BI__builtin_neon_vld2_dup_v:
+    case NEON::BI__builtin_neon_vld2_dup_v:
       Int = Intrinsic::arm_neon_vld2lane;
       break;
-    case ARM::BI__builtin_neon_vld3_dup_v:
+    case NEON::BI__builtin_neon_vld3_dup_v:
       Int = Intrinsic::arm_neon_vld3lane;
       break;
-    case ARM::BI__builtin_neon_vld4_dup_v:
+    case NEON::BI__builtin_neon_vld4_dup_v:
       Int = Intrinsic::arm_neon_vld4lane;
       break;
     default: llvm_unreachable("unknown vld_dup intrinsic?");
@@ -4509,251 +4718,58 @@
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     return Builder.CreateStore(Ops[1], Ops[0]);
   }
-  case ARM::BI__builtin_neon_vmax_v:
-  case ARM::BI__builtin_neon_vmaxq_v:
-    Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
-  case ARM::BI__builtin_neon_vmin_v:
-  case ARM::BI__builtin_neon_vminq_v:
-    Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
-  case ARM::BI__builtin_neon_vmovl_v: {
-    llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
-    Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
-    if (usgn)
-      return Builder.CreateZExt(Ops[0], Ty, "vmovl");
-    return Builder.CreateSExt(Ops[0], Ty, "vmovl");
-  }
-  case ARM::BI__builtin_neon_vmovn_v: {
-    llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
-    Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
-    return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
-  }
-  case ARM::BI__builtin_neon_vmul_v:
-  case ARM::BI__builtin_neon_vmulq_v:
-    assert(Type.isPoly() && "vmul builtin only supported for polynomial types");
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
-                        Ops, "vmul");
-  case ARM::BI__builtin_neon_vmull_v:
-    // FIXME: the integer vmull operations could be emitted in terms of pure
-    // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
-    // hoisting the exts outside loops. Until global ISel comes along that can
-    // see through such movement this leads to bad CodeGen. So we need an
-    // intrinsic for now.
-    Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
-    Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
-  case ARM::BI__builtin_neon_vfma_v:
-  case ARM::BI__builtin_neon_vfmaq_v: {
-    Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
-    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
-    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
-    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
-
-    // NEON intrinsic puts accumulator first, unlike the LLVM fma.
-    return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
-  }
-  case ARM::BI__builtin_neon_vpadal_v:
-  case ARM::BI__builtin_neon_vpadalq_v: {
-    Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
-    // The source operand type has twice as many elements of half the size.
-    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
-    llvm::Type *EltTy =
-      llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
-    llvm::Type *NarrowTy =
-      llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
-    llvm::Type *Tys[2] = { Ty, NarrowTy };
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal");
-  }
-  case ARM::BI__builtin_neon_vpadd_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty),
-                        Ops, "vpadd");
-  case ARM::BI__builtin_neon_vpaddl_v:
-  case ARM::BI__builtin_neon_vpaddlq_v: {
-    Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
-    // The source operand type has twice as many elements of half the size.
-    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
-    llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
-    llvm::Type *NarrowTy =
-      llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
-    llvm::Type *Tys[2] = { Ty, NarrowTy };
-    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
-  }
-  case ARM::BI__builtin_neon_vpmax_v:
-    Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
-  case ARM::BI__builtin_neon_vpmin_v:
-    Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
-  case ARM::BI__builtin_neon_vqabs_v:
-  case ARM::BI__builtin_neon_vqabsq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty),
-                        Ops, "vqabs");
-  case ARM::BI__builtin_neon_vqadd_v:
-  case ARM::BI__builtin_neon_vqaddq_v:
-    Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
-  case ARM::BI__builtin_neon_vqdmlal_v: {
-    SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
-    Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
-                              MulOps, "vqdmlal");
-
-    SmallVector<Value *, 2> AddOps;
-    AddOps.push_back(Ops[0]);
-    AddOps.push_back(Mul);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqadds, Ty),
-                        AddOps, "vqdmlal");
-  }
-  case ARM::BI__builtin_neon_vqdmlsl_v: {
-    SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
-    Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
-                              MulOps, "vqdmlsl");
-
-    SmallVector<Value *, 2> SubOps;
-    SubOps.push_back(Ops[0]);
-    SubOps.push_back(Mul);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqsubs, Ty),
-                        SubOps, "vqdmlsl");
-  }
-  case ARM::BI__builtin_neon_vqdmulh_v:
-  case ARM::BI__builtin_neon_vqdmulhq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
-                        Ops, "vqdmulh");
-  case ARM::BI__builtin_neon_vqdmull_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
-                        Ops, "vqdmull");
-  case ARM::BI__builtin_neon_vqmovn_v:
-    Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn");
-  case ARM::BI__builtin_neon_vqmovun_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty),
-                        Ops, "vqdmull");
-  case ARM::BI__builtin_neon_vqneg_v:
-  case ARM::BI__builtin_neon_vqnegq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty),
-                        Ops, "vqneg");
-  case ARM::BI__builtin_neon_vqrdmulh_v:
-  case ARM::BI__builtin_neon_vqrdmulhq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty),
-                        Ops, "vqrdmulh");
-  case ARM::BI__builtin_neon_vqrshl_v:
-  case ARM::BI__builtin_neon_vqrshlq_v:
-    Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
-  case ARM::BI__builtin_neon_vqrshrn_n_v:
+  case NEON::BI__builtin_neon_vqrshrn_n_v:
     Int =
       usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
                         1, true);
-  case ARM::BI__builtin_neon_vqrshrun_n_v:
+  case NEON::BI__builtin_neon_vqrshrun_n_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
                         Ops, "vqrshrun_n", 1, true);
-  case ARM::BI__builtin_neon_vqshl_v:
-  case ARM::BI__builtin_neon_vqshlq_v:
-    Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl");
-  case ARM::BI__builtin_neon_vqshl_n_v:
-  case ARM::BI__builtin_neon_vqshlq_n_v:
-    Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
-                        1, false);
-  case ARM::BI__builtin_neon_vqshlu_n_v:
-  case ARM::BI__builtin_neon_vqshluq_n_v:
+  case NEON::BI__builtin_neon_vqshlu_n_v:
+  case NEON::BI__builtin_neon_vqshluq_n_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
                         Ops, "vqshlu", 1, false);
-  case ARM::BI__builtin_neon_vqshrn_n_v:
+  case NEON::BI__builtin_neon_vqshrn_n_v:
     Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
                         1, true);
-  case ARM::BI__builtin_neon_vqshrun_n_v:
+  case NEON::BI__builtin_neon_vqshrun_n_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
                         Ops, "vqshrun_n", 1, true);
-  case ARM::BI__builtin_neon_vqsub_v:
-  case ARM::BI__builtin_neon_vqsubq_v:
-    Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub");
-  case ARM::BI__builtin_neon_vraddhn_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty),
-                        Ops, "vraddhn");
-  case ARM::BI__builtin_neon_vrecpe_v:
-  case ARM::BI__builtin_neon_vrecpeq_v:
+  case NEON::BI__builtin_neon_vrecpe_v:
+  case NEON::BI__builtin_neon_vrecpeq_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
                         Ops, "vrecpe");
-  case ARM::BI__builtin_neon_vrecps_v:
-  case ARM::BI__builtin_neon_vrecpsq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty),
-                        Ops, "vrecps");
-  case ARM::BI__builtin_neon_vrhadd_v:
-  case ARM::BI__builtin_neon_vrhaddq_v:
-    Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd");
-  case ARM::BI__builtin_neon_vrshl_v:
-  case ARM::BI__builtin_neon_vrshlq_v:
-    Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl");
-  case ARM::BI__builtin_neon_vrshrn_n_v:
+  case NEON::BI__builtin_neon_vrshrn_n_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
                         Ops, "vrshrn_n", 1, true);
-  case ARM::BI__builtin_neon_vrshr_n_v:
-  case ARM::BI__builtin_neon_vrshrq_n_v:
+  case NEON::BI__builtin_neon_vrshr_n_v:
+  case NEON::BI__builtin_neon_vrshrq_n_v:
     Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
-  case ARM::BI__builtin_neon_vrsqrte_v:
-  case ARM::BI__builtin_neon_vrsqrteq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty),
-                        Ops, "vrsqrte");
-  case ARM::BI__builtin_neon_vrsqrts_v:
-  case ARM::BI__builtin_neon_vrsqrtsq_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty),
-                        Ops, "vrsqrts");
-  case ARM::BI__builtin_neon_vrsra_n_v:
-  case ARM::BI__builtin_neon_vrsraq_n_v:
+  case NEON::BI__builtin_neon_vrsra_n_v:
+  case NEON::BI__builtin_neon_vrsraq_n_v:
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
     Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
     Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
     return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
-  case ARM::BI__builtin_neon_vrsubhn_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
-                        Ops, "vrsubhn");
-  case ARM::BI__builtin_neon_vshl_v:
-  case ARM::BI__builtin_neon_vshlq_v:
-    Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl");
-  case ARM::BI__builtin_neon_vshll_n_v:
-    Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
-  case ARM::BI__builtin_neon_vshl_n_v:
-  case ARM::BI__builtin_neon_vshlq_n_v:
-    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
-    return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
-                             "vshl_n");
-  case ARM::BI__builtin_neon_vshrn_n_v:
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
-                        Ops, "vshrn_n", 1, true);
-  case ARM::BI__builtin_neon_vshr_n_v:
-  case ARM::BI__builtin_neon_vshrq_n_v:
-    return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, usgn, "vshr_n");
-  case ARM::BI__builtin_neon_vsri_n_v:
-  case ARM::BI__builtin_neon_vsriq_n_v:
+  case NEON::BI__builtin_neon_vsri_n_v:
+  case NEON::BI__builtin_neon_vsriq_n_v:
     rightShift = true;
-  case ARM::BI__builtin_neon_vsli_n_v:
-  case ARM::BI__builtin_neon_vsliq_n_v:
+  case NEON::BI__builtin_neon_vsli_n_v:
+  case NEON::BI__builtin_neon_vsliq_n_v:
     Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
                         Ops, "vsli_n");
-  case ARM::BI__builtin_neon_vsra_n_v:
-  case ARM::BI__builtin_neon_vsraq_n_v:
+  case NEON::BI__builtin_neon_vsra_n_v:
+  case NEON::BI__builtin_neon_vsraq_n_v:
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
     return Builder.CreateAdd(Ops[0], Ops[1]);
-  case ARM::BI__builtin_neon_vst1_v:
-  case ARM::BI__builtin_neon_vst1q_v:
-    Ops.push_back(Align);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
-                        Ops, "");
-  case ARM::BI__builtin_neon_vst1q_lane_v:
+  case NEON::BI__builtin_neon_vst1q_lane_v:
     // Handle 64-bit integer elements as a special case.  Use a shuffle to get
     // a one-element vector and avoid poor code for i64 in the backend.
     if (VTy->getElementType()->isIntegerTy(64)) {
@@ -4765,7 +4781,7 @@
                                                  Ops[1]->getType()), Ops);
     }
     // fall through
-  case ARM::BI__builtin_neon_vst1_lane_v: {
+  case NEON::BI__builtin_neon_vst1_lane_v: {
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -4774,89 +4790,1922 @@
     St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
     return St;
   }
-  case ARM::BI__builtin_neon_vst2_v:
-  case ARM::BI__builtin_neon_vst2q_v:
-    Ops.push_back(Align);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
-                        Ops, "");
-  case ARM::BI__builtin_neon_vst2_lane_v:
-  case ARM::BI__builtin_neon_vst2q_lane_v:
-    Ops.push_back(Align);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
-                        Ops, "");
-  case ARM::BI__builtin_neon_vst3_v:
-  case ARM::BI__builtin_neon_vst3q_v:
-    Ops.push_back(Align);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
-                        Ops, "");
-  case ARM::BI__builtin_neon_vst3_lane_v:
-  case ARM::BI__builtin_neon_vst3q_lane_v:
-    Ops.push_back(Align);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
-                        Ops, "");
-  case ARM::BI__builtin_neon_vst4_v:
-  case ARM::BI__builtin_neon_vst4q_v:
-    Ops.push_back(Align);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
-                        Ops, "");
-  case ARM::BI__builtin_neon_vst4_lane_v:
-  case ARM::BI__builtin_neon_vst4q_lane_v:
-    Ops.push_back(Align);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
-                        Ops, "");
-  case ARM::BI__builtin_neon_vsubhn_v: {
-    llvm::VectorType *SrcTy =
-        llvm::VectorType::getExtendedElementVectorType(VTy);
-
-    // %sum = add <4 x i32> %lhs, %rhs
-    Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
-    Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
-    Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
-
-    // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
-    Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
-                                       SrcTy->getScalarSizeInBits() / 2);
-    ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
-    Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
-
-    // %res = trunc <4 x i32> %high to <4 x i16>
-    return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
-  }
-  case ARM::BI__builtin_neon_vtbl1_v:
+  case NEON::BI__builtin_neon_vtbl1_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
                         Ops, "vtbl1");
-  case ARM::BI__builtin_neon_vtbl2_v:
+  case NEON::BI__builtin_neon_vtbl2_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
                         Ops, "vtbl2");
-  case ARM::BI__builtin_neon_vtbl3_v:
+  case NEON::BI__builtin_neon_vtbl3_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
                         Ops, "vtbl3");
-  case ARM::BI__builtin_neon_vtbl4_v:
+  case NEON::BI__builtin_neon_vtbl4_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
                         Ops, "vtbl4");
-  case ARM::BI__builtin_neon_vtbx1_v:
+  case NEON::BI__builtin_neon_vtbx1_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
                         Ops, "vtbx1");
-  case ARM::BI__builtin_neon_vtbx2_v:
+  case NEON::BI__builtin_neon_vtbx2_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
                         Ops, "vtbx2");
-  case ARM::BI__builtin_neon_vtbx3_v:
+  case NEON::BI__builtin_neon_vtbx3_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
                         Ops, "vtbx3");
-  case ARM::BI__builtin_neon_vtbx4_v:
+  case NEON::BI__builtin_neon_vtbx4_v:
     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
                         Ops, "vtbx4");
-  case ARM::BI__builtin_neon_vtst_v:
-  case ARM::BI__builtin_neon_vtstq_v: {
+  }
+}
+
+static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
+                                      const CallExpr *E,
+                                      SmallVectorImpl<Value *> &Ops) {
+  unsigned int Int = 0;
+  const char *s = NULL;
+
+  unsigned TblPos;
+  switch (BuiltinID) {
+  default:
+    return 0;
+  case NEON::BI__builtin_neon_vtbl1_v:
+  case NEON::BI__builtin_neon_vqtbl1_v:
+  case NEON::BI__builtin_neon_vqtbl1q_v:
+  case NEON::BI__builtin_neon_vtbl2_v:
+  case NEON::BI__builtin_neon_vqtbl2_v:
+  case NEON::BI__builtin_neon_vqtbl2q_v:
+  case NEON::BI__builtin_neon_vtbl3_v:
+  case NEON::BI__builtin_neon_vqtbl3_v:
+  case NEON::BI__builtin_neon_vqtbl3q_v:
+  case NEON::BI__builtin_neon_vtbl4_v:
+  case NEON::BI__builtin_neon_vqtbl4_v:
+  case NEON::BI__builtin_neon_vqtbl4q_v:
+    TblPos = 0;
+    break;
+  case NEON::BI__builtin_neon_vtbx1_v:
+  case NEON::BI__builtin_neon_vqtbx1_v:
+  case NEON::BI__builtin_neon_vqtbx1q_v:
+  case NEON::BI__builtin_neon_vtbx2_v:
+  case NEON::BI__builtin_neon_vqtbx2_v:
+  case NEON::BI__builtin_neon_vqtbx2q_v:
+  case NEON::BI__builtin_neon_vtbx3_v:
+  case NEON::BI__builtin_neon_vqtbx3_v:
+  case NEON::BI__builtin_neon_vqtbx3q_v:
+  case NEON::BI__builtin_neon_vtbx4_v:
+  case NEON::BI__builtin_neon_vqtbx4_v:
+  case NEON::BI__builtin_neon_vqtbx4q_v:
+    TblPos = 1;
+    break;
+  }
+
+  assert(E->getNumArgs() >= 3);
+
+  // Get the last argument, which specifies the vector type.
+  llvm::APSInt Result;
+  const Expr *Arg = E->getArg(E->getNumArgs() - 1);
+  if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
+    return 0;
+
+  // Determine the type of this overloaded NEON intrinsic.
+  NeonTypeFlags Type(Result.getZExtValue());
+  llvm::VectorType *VTy = GetNeonType(&CGF, Type);
+  llvm::Type *Ty = VTy;
+  if (!Ty)
+    return 0;
+
+  Arg = E->getArg(TblPos);
+  unsigned nElts = VTy->getNumElements();
+
+  CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+  // AArch64 scalar builtins are not overloaded, they do not have an extra
+  // argument that specifies the vector type, need to handle each case.
+  SmallVector<Value *, 2> TblOps;
+  switch (BuiltinID) {
+  case NEON::BI__builtin_neon_vtbl1_v: {
+    TblOps.push_back(Ops[0]);
+    return packTBLDVectorList(CGF, TblOps, 0, Ops[1], Ty,
+                              Intrinsic::arm64_neon_tbl1, "vtbl1");
+  }
+  case NEON::BI__builtin_neon_vtbl2_v: {
+    TblOps.push_back(Ops[0]);
+    TblOps.push_back(Ops[1]);
+    return packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
+                              Intrinsic::arm64_neon_tbl1, "vtbl1");
+  }
+  case NEON::BI__builtin_neon_vtbl3_v: {
+    TblOps.push_back(Ops[0]);
+    TblOps.push_back(Ops[1]);
+    TblOps.push_back(Ops[2]);
+    return packTBLDVectorList(CGF, TblOps, 0, Ops[3], Ty,
+                              Intrinsic::arm64_neon_tbl2, "vtbl2");
+  }
+  case NEON::BI__builtin_neon_vtbl4_v: {
+    TblOps.push_back(Ops[0]);
+    TblOps.push_back(Ops[1]);
+    TblOps.push_back(Ops[2]);
+    TblOps.push_back(Ops[3]);
+    return packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
+                              Intrinsic::arm64_neon_tbl2, "vtbl2");
+  }
+  case NEON::BI__builtin_neon_vtbx1_v: {
+    TblOps.push_back(Ops[1]);
+    Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
+                                    Intrinsic::arm64_neon_tbl1, "vtbl1");
+
+    llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
+    Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
+    Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
+    CmpRes = Builder.CreateSExt(CmpRes, Ty);
+
+    Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
+    Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
+    return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
+  }
+  case NEON::BI__builtin_neon_vtbx2_v: {
+    TblOps.push_back(Ops[1]);
+    TblOps.push_back(Ops[2]);
+    return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
+                              Intrinsic::arm64_neon_tbx1, "vtbx1");
+  }
+  case NEON::BI__builtin_neon_vtbx3_v: {
+    TblOps.push_back(Ops[1]);
+    TblOps.push_back(Ops[2]);
+    TblOps.push_back(Ops[3]);
+    Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
+                                       Intrinsic::arm64_neon_tbl2, "vtbl2");
+
+    llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
+    Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
+    Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
+                                           TwentyFourV);
+    CmpRes = Builder.CreateSExt(CmpRes, Ty);
+
+    Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
+    Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
+    return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
+  }
+  case NEON::BI__builtin_neon_vtbx4_v: {
+    TblOps.push_back(Ops[1]);
+    TblOps.push_back(Ops[2]);
+    TblOps.push_back(Ops[3]);
+    TblOps.push_back(Ops[4]);
+    return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
+                              Intrinsic::arm64_neon_tbx2, "vtbx2");
+  }
+  case NEON::BI__builtin_neon_vqtbl1_v:
+  case NEON::BI__builtin_neon_vqtbl1q_v:
+    Int = Intrinsic::arm64_neon_tbl1; s = "vtbl1"; break;
+  case NEON::BI__builtin_neon_vqtbl2_v:
+  case NEON::BI__builtin_neon_vqtbl2q_v: {
+    Int = Intrinsic::arm64_neon_tbl2; s = "vtbl2"; break;
+  case NEON::BI__builtin_neon_vqtbl3_v:
+  case NEON::BI__builtin_neon_vqtbl3q_v:
+    Int = Intrinsic::arm64_neon_tbl3; s = "vtbl3"; break;
+  case NEON::BI__builtin_neon_vqtbl4_v:
+  case NEON::BI__builtin_neon_vqtbl4q_v:
+    Int = Intrinsic::arm64_neon_tbl4; s = "vtbl4"; break;
+  case NEON::BI__builtin_neon_vqtbx1_v:
+  case NEON::BI__builtin_neon_vqtbx1q_v:
+    Int = Intrinsic::arm64_neon_tbx1; s = "vtbx1"; break;
+  case NEON::BI__builtin_neon_vqtbx2_v:
+  case NEON::BI__builtin_neon_vqtbx2q_v:
+    Int = Intrinsic::arm64_neon_tbx2; s = "vtbx2"; break;
+  case NEON::BI__builtin_neon_vqtbx3_v:
+  case NEON::BI__builtin_neon_vqtbx3q_v:
+    Int = Intrinsic::arm64_neon_tbx3; s = "vtbx3"; break;
+  case NEON::BI__builtin_neon_vqtbx4_v:
+  case NEON::BI__builtin_neon_vqtbx4q_v:
+    Int = Intrinsic::arm64_neon_tbx4; s = "vtbx4"; break;
+  }
+  }
+
+  if (!Int)
+    return 0;
+
+  Function *F = CGF.CGM.getIntrinsic(Int, Ty);
+  return CGF.EmitNeonCall(F, Ops, s);
+}
+
+Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
+  llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
+  Op = Builder.CreateBitCast(Op, Int16Ty);
+  Value *V = UndefValue::get(VTy);
+  llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+  Op = Builder.CreateInsertElement(V, Op, CI);
+  return Op;
+}
+
+Value *CodeGenFunction::vectorWrapScalar8(Value *Op) {
+  llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
+  Op = Builder.CreateBitCast(Op, Int8Ty);
+  Value *V = UndefValue::get(VTy);
+  llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+  Op = Builder.CreateInsertElement(V, Op, CI);
+  return Op;
+}
+
+Value *CodeGenFunction::
+emitVectorWrappedScalar8Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
+                                  const char *Name) {
+  // i8 is not a legal types for ARM64, so we can't just use
+  // a normal overloaed intrinsic call for these scalar types. Instead
+  // we'll build 64-bit vectors w/ lane zero being our input values and
+  // perform the operation on that. The back end can pattern match directly
+  // to the scalar instruction.
+  Ops[0] = vectorWrapScalar8(Ops[0]);
+  Ops[1] = vectorWrapScalar8(Ops[1]);
+  llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
+  Value *V = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, Name);
+  Constant *CI = ConstantInt::get(Int32Ty, 0);
+  return Builder.CreateExtractElement(V, CI, "lane0");
+}
+
+Value *CodeGenFunction::
+emitVectorWrappedScalar16Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
+                                   const char *Name) {
+  // i16 is not a legal types for ARM64, so we can't just use
+  // a normal overloaed intrinsic call for these scalar types. Instead
+  // we'll build 64-bit vectors w/ lane zero being our input values and
+  // perform the operation on that. The back end can pattern match directly
+  // to the scalar instruction.
+  Ops[0] = vectorWrapScalar16(Ops[0]);
+  Ops[1] = vectorWrapScalar16(Ops[1]);
+  llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
+  Value *V = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, Name);
+  Constant *CI = ConstantInt::get(Int32Ty, 0);
+  return Builder.CreateExtractElement(V, CI, "lane0");
+}
+
+Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID,
+                                             const CallExpr *E) {
+  if (BuiltinID == ARM64::BI__clear_cache) {
+    assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
+    const FunctionDecl *FD = E->getDirectCallee();
+    SmallVector<Value*, 2> Ops;
+    for (unsigned i = 0; i < 2; i++)
+      Ops.push_back(EmitScalarExpr(E->getArg(i)));
+    llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+    llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+    StringRef Name = FD->getName();
+    return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
+  }
+
+  if (BuiltinID == ARM64::BI__builtin_arm_ldrex &&
+      getContext().getTypeSize(E->getType()) == 128) {
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_ldxp);
+
+    Value *LdPtr = EmitScalarExpr(E->getArg(0));
+    Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
+                                    "ldxp");
+
+    Value *Val0 = Builder.CreateExtractValue(Val, 1);
+    Value *Val1 = Builder.CreateExtractValue(Val, 0);
+    llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
+    Val0 = Builder.CreateZExt(Val0, Int128Ty);
+    Val1 = Builder.CreateZExt(Val1, Int128Ty);
+
+    Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
+    Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
+    Val = Builder.CreateOr(Val, Val1);
+    return Builder.CreateBitCast(Val, ConvertType(E->getType()));
+  } else if (BuiltinID == ARM64::BI__builtin_arm_ldrex) {
+    Value *LoadAddr = EmitScalarExpr(E->getArg(0));
+
+    QualType Ty = E->getType();
+    llvm::Type *RealResTy = ConvertType(Ty);
+    llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
+                                                  getContext().getTypeSize(Ty));
+    LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
+
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_ldxr, LoadAddr->getType());
+    Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+
+    if (RealResTy->isPointerTy())
+      return Builder.CreateIntToPtr(Val, RealResTy);
+
+    Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
+    return Builder.CreateBitCast(Val, RealResTy);
+  }
+
+  if (BuiltinID == ARM64::BI__builtin_arm_strex &&
+      getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_stxp);
+    llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, NULL);
+
+    Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+    Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
+                                      One);
+    Value *Val = EmitScalarExpr(E->getArg(0));
+    Builder.CreateStore(Val, Tmp);
+
+    Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+    Val = Builder.CreateLoad(LdPtr);
+
+    Value *Arg0 = Builder.CreateExtractValue(Val, 0);
+    Value *Arg1 = Builder.CreateExtractValue(Val, 1);
+    Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
+                                         Int8PtrTy);
+    return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "stxp");
+  } else if (BuiltinID == ARM64::BI__builtin_arm_strex) {
+    Value *StoreVal = EmitScalarExpr(E->getArg(0));
+    Value *StoreAddr = EmitScalarExpr(E->getArg(1));
+
+    QualType Ty = E->getArg(0)->getType();
+    llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
+                                                 getContext().getTypeSize(Ty));
+    StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
+
+    if (StoreVal->getType()->isPointerTy())
+      StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
+    else {
+      StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
+      StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
+    }
+
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_stxr, StoreAddr->getType());
+    return Builder.CreateCall2(F, StoreVal, StoreAddr, "stxr");
+  }
+
+  if (BuiltinID == ARM64::BI__builtin_arm_clrex) {
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_clrex);
+    return Builder.CreateCall(F);
+  }
+
+  // CRC32
+  Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
+  switch (BuiltinID) {
+  case ARM64::BI__builtin_arm_crc32b:
+    CRCIntrinsicID = Intrinsic::arm64_crc32b; break;
+  case ARM64::BI__builtin_arm_crc32cb:
+    CRCIntrinsicID = Intrinsic::arm64_crc32cb; break;
+  case ARM64::BI__builtin_arm_crc32h:
+    CRCIntrinsicID = Intrinsic::arm64_crc32h; break;
+  case ARM64::BI__builtin_arm_crc32ch:
+    CRCIntrinsicID = Intrinsic::arm64_crc32ch; break;
+  case ARM64::BI__builtin_arm_crc32w:
+    CRCIntrinsicID = Intrinsic::arm64_crc32w; break;
+  case ARM64::BI__builtin_arm_crc32cw:
+    CRCIntrinsicID = Intrinsic::arm64_crc32cw; break;
+  case ARM64::BI__builtin_arm_crc32d:
+    CRCIntrinsicID = Intrinsic::arm64_crc32x; break;
+  case ARM64::BI__builtin_arm_crc32cd:
+    CRCIntrinsicID = Intrinsic::arm64_crc32cx; break;
+  }
+
+  if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
+    Value *Arg0 = EmitScalarExpr(E->getArg(0));
+    Value *Arg1 = EmitScalarExpr(E->getArg(1));
+    Function *F = CGM.getIntrinsic(CRCIntrinsicID);
+
+    llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
+    Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
+
+    return Builder.CreateCall2(F, Arg0, Arg1);
+  }
+
+  llvm::SmallVector<Value*, 4> Ops;
+  for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
+    Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+  llvm::ArrayRef<NeonIntrinsicInfo> SISDMap(ARM64SISDIntrinsicMap);
+  const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+      SISDMap, BuiltinID, ARM64SISDIntrinsicsProvenSorted);
+
+  if (Builtin) {
+    Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
+    Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
+    assert(Result && "SISD intrinsic should have been handled");
+    return Result;
+  }
+
+  llvm::APSInt Result;
+  const Expr *Arg = E->getArg(E->getNumArgs()-1);
+  NeonTypeFlags Type(0);
+  if (Arg->isIntegerConstantExpr(Result, getContext()))
+    // Determine the type of this overloaded NEON intrinsic.
+    Type = NeonTypeFlags(Result.getZExtValue());
+
+  bool usgn = Type.isUnsigned();
+  bool quad = Type.isQuad();
+
+  // Handle non-overloaded intrinsics first.
+  switch (BuiltinID) {
+  default: break;
+  case NEON::BI__builtin_neon_vldrq_p128: {
+    llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
+    Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
+    return Builder.CreateLoad(Ptr);
+  }
+  case NEON::BI__builtin_neon_vstrq_p128: {
+    llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
+    Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
+    return Builder.CreateStore(EmitScalarExpr(E->getArg(1)), Ptr);
+  }
+  case NEON::BI__builtin_neon_vcvts_u32_f32:
+  case NEON::BI__builtin_neon_vcvtd_u64_f64:
+    usgn = true;
+    // FALL THROUGH
+  case NEON::BI__builtin_neon_vcvts_s32_f32:
+  case NEON::BI__builtin_neon_vcvtd_s64_f64: {
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
+    llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
+    llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
+    Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
+    if (usgn)
+      return Builder.CreateFPToUI(Ops[0], InTy);
+    return Builder.CreateFPToSI(Ops[0], InTy);
+  }
+  case NEON::BI__builtin_neon_vcvts_f32_u32:
+  case NEON::BI__builtin_neon_vcvtd_f64_u64:
+    usgn = true;
+    // FALL THROUGH
+  case NEON::BI__builtin_neon_vcvts_f32_s32:
+  case NEON::BI__builtin_neon_vcvtd_f64_s64: {
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
+    llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
+    llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
+    Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
+    if (usgn)
+      return Builder.CreateUIToFP(Ops[0], FTy);
+    return Builder.CreateSIToFP(Ops[0], FTy);
+  }
+  case NEON::BI__builtin_neon_vpaddd_s64: {
+    llvm::Type *Ty =
+      llvm::VectorType::get(llvm::Type::getInt64Ty(getLLVMContext()), 2);
+    Value *Vec = EmitScalarExpr(E->getArg(0));
+    // The vector is v2f64, so make sure it's bitcast to that.
+    Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
+    llvm::Value *Idx0 = llvm::ConstantInt::get(Int32Ty, 0);
+    llvm::Value *Idx1 = llvm::ConstantInt::get(Int32Ty, 1);
+    Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
+    Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
+    // Pairwise addition of a v2f64 into a scalar f64.
+    return Builder.CreateAdd(Op0, Op1, "vpaddd");
+  }
+  case NEON::BI__builtin_neon_vpaddd_f64: {
+    llvm::Type *Ty =
+      llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2);
+    Value *Vec = EmitScalarExpr(E->getArg(0));
+    // The vector is v2f64, so make sure it's bitcast to that.
+    Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
+    llvm::Value *Idx0 = llvm::ConstantInt::get(Int32Ty, 0);
+    llvm::Value *Idx1 = llvm::ConstantInt::get(Int32Ty, 1);
+    Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
+    Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
+    // Pairwise addition of a v2f64 into a scalar f64.
+    return Builder.CreateFAdd(Op0, Op1, "vpaddd");
+  }
+  case NEON::BI__builtin_neon_vpadds_f32: {
+    llvm::Type *Ty =
+      llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2);
+    Value *Vec = EmitScalarExpr(E->getArg(0));
+    // The vector is v2f32, so make sure it's bitcast to that.
+    Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
+    llvm::Value *Idx0 = llvm::ConstantInt::get(Int32Ty, 0);
+    llvm::Value *Idx1 = llvm::ConstantInt::get(Int32Ty, 1);
+    Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
+    Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
+    // Pairwise addition of a v2f32 into a scalar f32.
+    return Builder.CreateFAdd(Op0, Op1, "vpaddd");
+  }
+  case NEON::BI__builtin_neon_vceqzd_s64:
+  case NEON::BI__builtin_neon_vceqzd_f64:
+  case NEON::BI__builtin_neon_vceqzs_f32:
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitAArch64CompareBuiltinExpr(
+        Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OEQ,
+        ICmpInst::ICMP_EQ, "vceqz");
+  case NEON::BI__builtin_neon_vcgezd_s64:
+  case NEON::BI__builtin_neon_vcgezd_f64:
+  case NEON::BI__builtin_neon_vcgezs_f32:
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitAArch64CompareBuiltinExpr(
+        Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OGE,
+        ICmpInst::ICMP_SGE, "vcgez");
+  case NEON::BI__builtin_neon_vclezd_s64:
+  case NEON::BI__builtin_neon_vclezd_f64:
+  case NEON::BI__builtin_neon_vclezs_f32:
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitAArch64CompareBuiltinExpr(
+        Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OLE,
+        ICmpInst::ICMP_SLE, "vclez");
+  case NEON::BI__builtin_neon_vcgtzd_s64:
+  case NEON::BI__builtin_neon_vcgtzd_f64:
+  case NEON::BI__builtin_neon_vcgtzs_f32:
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitAArch64CompareBuiltinExpr(
+        Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OGT,
+        ICmpInst::ICMP_SGT, "vcgtz");
+  case NEON::BI__builtin_neon_vcltzd_s64:
+  case NEON::BI__builtin_neon_vcltzd_f64:
+  case NEON::BI__builtin_neon_vcltzs_f32:
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitAArch64CompareBuiltinExpr(
+        Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OLT,
+        ICmpInst::ICMP_SLT, "vcltz");
+
+  case NEON::BI__builtin_neon_vceqzd_u64: {
+    llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[0] = Builder.CreateICmp(llvm::ICmpInst::ICMP_EQ, Ops[0],
+                                llvm::Constant::getNullValue(Ty));
+    return Builder.CreateSExt(Ops[0], Ty, "vceqzd");
+  }
+  case NEON::BI__builtin_neon_vceqd_f64:
+  case NEON::BI__builtin_neon_vcled_f64:
+  case NEON::BI__builtin_neon_vcltd_f64:
+  case NEON::BI__builtin_neon_vcged_f64:
+  case NEON::BI__builtin_neon_vcgtd_f64: {
+    llvm::CmpInst::Predicate P;
+    switch (BuiltinID) {
+    default: llvm_unreachable("missing builtin ID in switch!");
+    case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
+    case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
+    case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
+    case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
+    case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
+    }
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
+    Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+    Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
+    Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+    return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
+  }
+  case NEON::BI__builtin_neon_vceqs_f32:
+  case NEON::BI__builtin_neon_vcles_f32:
+  case NEON::BI__builtin_neon_vclts_f32:
+  case NEON::BI__builtin_neon_vcges_f32:
+  case NEON::BI__builtin_neon_vcgts_f32: {
+    llvm::CmpInst::Predicate P;
+    switch (BuiltinID) {
+    default: llvm_unreachable("missing builtin ID in switch!");
+    case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
+    case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
+    case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
+    case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
+    case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
+    }
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
+    Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
+    Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
+    Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+    return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
+  }
+  case NEON::BI__builtin_neon_vceqd_s64:
+  case NEON::BI__builtin_neon_vceqd_u64:
+  case NEON::BI__builtin_neon_vcgtd_s64:
+  case NEON::BI__builtin_neon_vcgtd_u64:
+  case NEON::BI__builtin_neon_vcltd_s64:
+  case NEON::BI__builtin_neon_vcltd_u64:
+  case NEON::BI__builtin_neon_vcged_u64:
+  case NEON::BI__builtin_neon_vcged_s64:
+  case NEON::BI__builtin_neon_vcled_u64:
+  case NEON::BI__builtin_neon_vcled_s64: {
+    llvm::CmpInst::Predicate P;
+    switch (BuiltinID) {
+    default: llvm_unreachable("missing builtin ID in switch!");
+    case NEON::BI__builtin_neon_vceqd_s64:
+    case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
+    case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
+    case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
+    case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
+    case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
+    case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
+    case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
+    case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
+    case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
+    }
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
+    Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
+    Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
+    return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
+  }
+  case NEON::BI__builtin_neon_vtstd_s64:
+  case NEON::BI__builtin_neon_vtstd_u64: {
+    llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
     Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
-                                ConstantAggregateZero::get(Ty));
-    return Builder.CreateSExt(Ops[0], Ty, "vtst");
+                                llvm::Constant::getNullValue(Ty));
+    return Builder.CreateSExt(Ops[0], Ty, "vtstd");
   }
-  case ARM::BI__builtin_neon_vtrn_v:
-  case ARM::BI__builtin_neon_vtrnq_v: {
+  case NEON::BI__builtin_neon_vset_lane_i8:
+  case NEON::BI__builtin_neon_vset_lane_i16:
+  case NEON::BI__builtin_neon_vset_lane_i32:
+  case NEON::BI__builtin_neon_vset_lane_i64:
+  case NEON::BI__builtin_neon_vset_lane_f32:
+  case NEON::BI__builtin_neon_vsetq_lane_i8:
+  case NEON::BI__builtin_neon_vsetq_lane_i16:
+  case NEON::BI__builtin_neon_vsetq_lane_i32:
+  case NEON::BI__builtin_neon_vsetq_lane_i64:
+  case NEON::BI__builtin_neon_vsetq_lane_f32:
+    Ops.push_back(EmitScalarExpr(E->getArg(2)));
+    return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+  case NEON::BI__builtin_neon_vset_lane_f64:
+    // The vector type needs a cast for the v1f64 variant.
+    Ops[1] = Builder.CreateBitCast(Ops[1],
+                                   llvm::VectorType::get(DoubleTy, 1));
+    Ops.push_back(EmitScalarExpr(E->getArg(2)));
+    return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+  case NEON::BI__builtin_neon_vsetq_lane_f64:
+    // The vector type needs a cast for the v2f64 variant.
+    Ops[1] = Builder.CreateBitCast(Ops[1],
+        llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
+    Ops.push_back(EmitScalarExpr(E->getArg(2)));
+    return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+
+  case NEON::BI__builtin_neon_vget_lane_i8:
+  case NEON::BI__builtin_neon_vdupb_lane_i8:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vget_lane");
+  case NEON::BI__builtin_neon_vgetq_lane_i8:
+  case NEON::BI__builtin_neon_vdupb_laneq_i8:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vgetq_lane");
+  case NEON::BI__builtin_neon_vget_lane_i16:
+  case NEON::BI__builtin_neon_vduph_lane_i16:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vget_lane");
+  case NEON::BI__builtin_neon_vgetq_lane_i16:
+  case NEON::BI__builtin_neon_vduph_laneq_i16:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vgetq_lane");
+  case NEON::BI__builtin_neon_vget_lane_i32:
+  case NEON::BI__builtin_neon_vdups_lane_i32:
+    Ops[0] = Builder.CreateBitCast(
+        Ops[0],
+        llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 2));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vget_lane");
+  case NEON::BI__builtin_neon_vdups_lane_f32:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vdups_lane");
+  case NEON::BI__builtin_neon_vgetq_lane_i32:
+  case NEON::BI__builtin_neon_vdups_laneq_i32:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 4));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vgetq_lane");
+  case NEON::BI__builtin_neon_vget_lane_i64:
+  case NEON::BI__builtin_neon_vdupd_lane_i64:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 1));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vget_lane");
+  case NEON::BI__builtin_neon_vdupd_lane_f64:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vdupd_lane");
+  case NEON::BI__builtin_neon_vgetq_lane_i64:
+  case NEON::BI__builtin_neon_vdupd_laneq_i64:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 2));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vgetq_lane");
+  case NEON::BI__builtin_neon_vget_lane_f32:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vget_lane");
+  case NEON::BI__builtin_neon_vget_lane_f64:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vget_lane");
+  case NEON::BI__builtin_neon_vgetq_lane_f32:
+  case NEON::BI__builtin_neon_vdups_laneq_f32:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 4));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vgetq_lane");
+  case NEON::BI__builtin_neon_vgetq_lane_f64:
+  case NEON::BI__builtin_neon_vdupd_laneq_f64:
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+        llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
+    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+                                        "vgetq_lane");
+  case NEON::BI__builtin_neon_vaddd_s64:
+  case NEON::BI__builtin_neon_vaddd_u64:
+    return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
+  case NEON::BI__builtin_neon_vsubd_s64:
+  case NEON::BI__builtin_neon_vsubd_u64:
+    return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
+  case NEON::BI__builtin_neon_vqdmlalh_s16:
+  case NEON::BI__builtin_neon_vqdmlslh_s16: {
+    SmallVector<Value *, 2> ProductOps;
+    ProductOps.push_back(vectorWrapScalar16(Ops[1]));
+    ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
+    llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
+    Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmull, VTy),
+                          ProductOps, "vqdmlXl");
+    Constant *CI = ConstantInt::get(Int32Ty, 0);
+    Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
+
+    unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
+                                        ? Intrinsic::arm64_neon_sqadd
+                                        : Intrinsic::arm64_neon_sqsub;
+    return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
+  }
+  case NEON::BI__builtin_neon_vqshlud_n_s64: {
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
+    Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
+    llvm::Type *VTy = llvm::VectorType::get(Int64Ty, 1);
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqshlu, VTy),
+                          Ops, "vqshlu_n");
+    return Builder.CreateBitCast(Ops[0], Int64Ty);
+  }
+  case NEON::BI__builtin_neon_vqshld_n_u64:
+  case NEON::BI__builtin_neon_vqshld_n_s64: {
+    unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
+                                   ? Intrinsic::arm64_neon_uqshl
+                                   : Intrinsic::arm64_neon_sqshl;
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
+    Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
+    llvm::Type *VTy = llvm::VectorType::get(Int64Ty, 1);
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, "vqshl_n");
+    return Builder.CreateBitCast(Ops[0], Int64Ty);
+  }
+  case NEON::BI__builtin_neon_vrshrd_n_u64:
+  case NEON::BI__builtin_neon_vrshrd_n_s64: {
+    unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
+                                   ? Intrinsic::arm64_neon_urshl
+                                   : Intrinsic::arm64_neon_srshl;
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
+    llvm::Type *VTy = llvm::VectorType::get(Int64Ty, 1);
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, "vrshr_n", 1, true);
+    return Builder.CreateBitCast(Ops[0], Int64Ty);
+  }
+  case NEON::BI__builtin_neon_vrsrad_n_u64:
+  case NEON::BI__builtin_neon_vrsrad_n_s64: {
+    unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
+                                   ? Intrinsic::arm64_neon_urshl
+                                   : Intrinsic::arm64_neon_srshl;
+    Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
+    Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
+    Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Int64Ty), Ops[1],
+                                 Builder.CreateSExt(Ops[2], Int64Ty));
+    return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
+  }
+  case NEON::BI__builtin_neon_vshld_n_s64:
+  case NEON::BI__builtin_neon_vshld_n_u64: {
+    llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+    return Builder.CreateShl(
+        Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
+                                                   Amt->getZExtValue())),
+        "vshr_n");
+  }
+  case NEON::BI__builtin_neon_vshrd_n_s64: {
+    llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+    return Builder.CreateAShr(
+        Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
+                                                   Amt->getZExtValue())),
+        "vshr_n");
+  }
+  case NEON::BI__builtin_neon_vshrd_n_u64: {
+    llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+    return Builder.CreateLShr(
+        Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
+                                                   Amt->getZExtValue())),
+        "vshr_n");
+  }
+  case NEON::BI__builtin_neon_vsrad_n_s64: {
+    llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
+    Ops[1] = Builder.CreateAShr(
+        Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
+                                                   Amt->getZExtValue())),
+        "vshr_n");
+    return Builder.CreateAdd(Ops[0], Ops[1]);
+  }
+  case NEON::BI__builtin_neon_vsrad_n_u64: {
+    llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
+    Ops[1] = Builder.CreateLShr(
+        Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
+                                                   Amt->getZExtValue())),
+        "vshr_n");
+    return Builder.CreateAdd(Ops[0], Ops[1]);
+  }
+  case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
+  case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
+  case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
+  case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
+    Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
+                                          "lane");
+    SmallVector<Value *, 2> ProductOps;
+    ProductOps.push_back(vectorWrapScalar16(Ops[1]));
+    ProductOps.push_back(vectorWrapScalar16(Ops[2]));
+    llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
+    Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmull, VTy),
+                          ProductOps, "vqdmlXl");
+    Constant *CI = ConstantInt::get(Int32Ty, 0);
+    Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
+    Ops.pop_back();
+
+    unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
+                       BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
+                          ? Intrinsic::arm64_neon_sqadd
+                          : Intrinsic::arm64_neon_sqsub;
+    return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
+  }
+  case NEON::BI__builtin_neon_vqdmlals_s32:
+  case NEON::BI__builtin_neon_vqdmlsls_s32: {
+    SmallVector<Value *, 2> ProductOps;
+    ProductOps.push_back(Ops[1]);
+    ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
+    Ops[1] =
+        EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmulls_scalar),
+                     ProductOps, "vqdmlXl");
+
+    unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
+                                        ? Intrinsic::arm64_neon_sqadd
+                                        : Intrinsic::arm64_neon_sqsub;
+    return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
+  }
+  case NEON::BI__builtin_neon_vqdmlals_lane_s32:
+  case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
+  case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
+  case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
+    Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
+                                          "lane");
+    SmallVector<Value *, 2> ProductOps;
+    ProductOps.push_back(Ops[1]);
+    ProductOps.push_back(Ops[2]);
+    Ops[1] =
+        EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmulls_scalar),
+                     ProductOps, "vqdmlXl");
+    Ops.pop_back();
+
+    unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
+                       BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
+                          ? Intrinsic::arm64_neon_sqadd
+                          : Intrinsic::arm64_neon_sqsub;
+    return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
+  }
+  }
+
+  llvm::VectorType *VTy = GetNeonType(this, Type);
+  llvm::Type *Ty = VTy;
+  if (!Ty)
+    return 0;
+
+  // Not all intrinsics handled by the common case work for ARM64 yet, so only
+  // defer to common code if it's been added to our special map.
+  Builtin = findNeonIntrinsicInMap(ARM64SIMDIntrinsicMap, BuiltinID,
+                                   ARM64SIMDIntrinsicsProvenSorted);
+
+  if (Builtin)
+    return EmitCommonNeonBuiltinExpr(
+        Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
+        Builtin->NameHint, Builtin->TypeModifier, E, Ops, 0);
+
+  if (Value *V = EmitARM64TblBuiltinExpr(*this, BuiltinID, E, Ops))
+    return V;
+
+  unsigned Int;
+  switch (BuiltinID) {
+  default: return 0;
+  case NEON::BI__builtin_neon_vbsl_v:
+  case NEON::BI__builtin_neon_vbslq_v: {
+    llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
+    Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
+    Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
+    Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
+
+    Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
+    Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
+    Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
+    return Builder.CreateBitCast(Ops[0], Ty);
+  }
+  case NEON::BI__builtin_neon_vfma_lane_v:
+  case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
+    // The ARM builtins (and instructions) have the addend as the first
+    // operand, but the 'fma' intrinsics have it last. Swap it around here.
+    Value *Addend = Ops[0];
+    Value *Multiplicand = Ops[1];
+    Value *LaneSource = Ops[2];
+    Ops[0] = Multiplicand;
+    Ops[1] = LaneSource;
+    Ops[2] = Addend;
+
+    // Now adjust things to handle the lane access.
+    llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
+      llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
+      VTy;
+    llvm::Constant *cst = cast<Constant>(Ops[3]);
+    Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
+    Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
+    Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
+
+    Ops.pop_back();
+    Int = Intrinsic::fma;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
+  }
+  case NEON::BI__builtin_neon_vfma_laneq_v: {
+    llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+    // v1f64 fma should be mapped to Neon scalar f64 fma
+    if (VTy && VTy->getElementType() == DoubleTy) {
+      Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+      Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
+      llvm::Type *VTy = GetNeonType(this,
+        NeonTypeFlags(NeonTypeFlags::Float64, false, true));
+      Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
+      Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
+      Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
+      Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
+      return Builder.CreateBitCast(Result, Ty);
+    }
+    Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+
+    llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
+                                            VTy->getNumElements() * 2);
+    Ops[2] = Builder.CreateBitCast(Ops[2], STy);
+    Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
+                                               cast<ConstantInt>(Ops[3]));
+    Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
+
+    return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vfmaq_laneq_v: {
+    Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+
+    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+    Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
+    return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vfmas_lane_f32:
+  case NEON::BI__builtin_neon_vfmas_laneq_f32:
+  case NEON::BI__builtin_neon_vfmad_lane_f64:
+  case NEON::BI__builtin_neon_vfmad_laneq_f64: {
+    Ops.push_back(EmitScalarExpr(E->getArg(3)));
+    llvm::Type *Ty = ConvertType(E->getCallReturnType());
+    Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+    Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
+    return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vfms_v:
+  case NEON::BI__builtin_neon_vfmsq_v: {  // Only used for FP types
+    // FIXME: probably remove when we no longer support aarch64_simd.h
+    // (arm_neon.h delegates to vfma).
+
+    // The ARM builtins (and instructions) have the addend as the first
+    // operand, but the 'fma' intrinsics have it last. Swap it around here.
+    Value *Subtrahend = Ops[0];
+    Value *Multiplicand = Ops[2];
+    Ops[0] = Multiplicand;
+    Ops[2] = Subtrahend;
+    Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
+    Ops[1] = Builder.CreateFNeg(Ops[1]);
+    Int = Intrinsic::fma;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmls");
+  }
+  case NEON::BI__builtin_neon_vmull_v:
+    // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+    Int = usgn ? Intrinsic::arm64_neon_umull : Intrinsic::arm64_neon_smull;
+    if (Type.isPoly()) Int = Intrinsic::arm64_neon_pmull;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
+  case NEON::BI__builtin_neon_vmax_v:
+  case NEON::BI__builtin_neon_vmaxq_v:
+    // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+    Int = usgn ? Intrinsic::arm64_neon_umax : Intrinsic::arm64_neon_smax;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmax;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
+  case NEON::BI__builtin_neon_vmin_v:
+  case NEON::BI__builtin_neon_vminq_v:
+    // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+    Int = usgn ? Intrinsic::arm64_neon_umin : Intrinsic::arm64_neon_smin;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmin;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
+  case NEON::BI__builtin_neon_vabd_v:
+  case NEON::BI__builtin_neon_vabdq_v:
+    // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+    Int = usgn ? Intrinsic::arm64_neon_uabd : Intrinsic::arm64_neon_sabd;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fabd;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
+  case NEON::BI__builtin_neon_vpadal_v:
+  case NEON::BI__builtin_neon_vpadalq_v: {
+    unsigned ArgElts = VTy->getNumElements();
+    llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
+    unsigned BitWidth = EltTy->getBitWidth();
+    llvm::Type *ArgTy = llvm::VectorType::get(
+        llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
+    llvm::Type* Tys[2] = { VTy, ArgTy };
+    Int = usgn ? Intrinsic::arm64_neon_uaddlp : Intrinsic::arm64_neon_saddlp;
+    SmallVector<llvm::Value*, 1> TmpOps;
+    TmpOps.push_back(Ops[1]);
+    Function *F = CGM.getIntrinsic(Int, Tys);
+    llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
+    llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
+    return Builder.CreateAdd(tmp, addend);
+  }
+  case NEON::BI__builtin_neon_vpmin_v:
+  case NEON::BI__builtin_neon_vpminq_v:
+    // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+    Int = usgn ? Intrinsic::arm64_neon_uminp : Intrinsic::arm64_neon_sminp;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fminp;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
+  case NEON::BI__builtin_neon_vpmax_v:
+  case NEON::BI__builtin_neon_vpmaxq_v:
+    // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+    Int = usgn ? Intrinsic::arm64_neon_umaxp : Intrinsic::arm64_neon_smaxp;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmaxp;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
+  case NEON::BI__builtin_neon_vminnm_v:
+  case NEON::BI__builtin_neon_vminnmq_v:
+    Int = Intrinsic::arm64_neon_fminnm;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
+  case NEON::BI__builtin_neon_vmaxnm_v:
+  case NEON::BI__builtin_neon_vmaxnmq_v:
+    Int = Intrinsic::arm64_neon_fmaxnm;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
+  case NEON::BI__builtin_neon_vrecpss_f32: {
+    llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext());
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_frecps, f32Type),
+                        Ops, "vrecps");
+  }
+  case NEON::BI__builtin_neon_vrecpsd_f64: {
+    llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext());
+    Ops.push_back(EmitScalarExpr(E->getArg(1)));
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_frecps, f64Type),
+                        Ops, "vrecps");
+  }
+  case NEON::BI__builtin_neon_vrshr_n_v:
+  case NEON::BI__builtin_neon_vrshrq_n_v:
+    // FIXME: this can be shared with 32-bit ARM, but not AArch64 at the
+    // moment. After the final merge it should be added to
+    // EmitCommonNeonBuiltinExpr.
+    Int = usgn ? Intrinsic::arm64_neon_urshl : Intrinsic::arm64_neon_srshl;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
+  case NEON::BI__builtin_neon_vqshlu_n_v:
+  case NEON::BI__builtin_neon_vqshluq_n_v:
+    // FIXME: AArch64 and ARM use different intrinsics for this, but are
+    // essentially compatible. It should be in EmitCommonNeonBuiltinExpr after
+    // the final merge.
+    Int = Intrinsic::arm64_neon_sqshlu;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", 1, false);
+  case NEON::BI__builtin_neon_vqshrun_n_v:
+    // FIXME: as above
+    Int = Intrinsic::arm64_neon_sqshrun;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
+  case NEON::BI__builtin_neon_vqrshrun_n_v:
+    // FIXME: and again.
+    Int = Intrinsic::arm64_neon_sqrshrun;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
+  case NEON::BI__builtin_neon_vqshrn_n_v:
+    // FIXME: guess
+    Int = usgn ? Intrinsic::arm64_neon_uqshrn : Intrinsic::arm64_neon_sqshrn;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
+  case NEON::BI__builtin_neon_vrshrn_n_v:
+    // FIXME: there might be a pattern here.
+    Int = Intrinsic::arm64_neon_rshrn;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
+  case NEON::BI__builtin_neon_vqrshrn_n_v:
+    // FIXME: another one
+    Int = usgn ? Intrinsic::arm64_neon_uqrshrn : Intrinsic::arm64_neon_sqrshrn;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
+  case NEON::BI__builtin_neon_vrnda_v:
+  case NEON::BI__builtin_neon_vrndaq_v: {
+    Int = Intrinsic::round;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
+  }
+  case NEON::BI__builtin_neon_vrndi_v:
+  case NEON::BI__builtin_neon_vrndiq_v: {
+    Int = Intrinsic::nearbyint;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
+  }
+  case NEON::BI__builtin_neon_vrndm_v:
+  case NEON::BI__builtin_neon_vrndmq_v: {
+    Int = Intrinsic::floor;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
+  }
+  case NEON::BI__builtin_neon_vrndn_v:
+  case NEON::BI__builtin_neon_vrndnq_v: {
+    Int = Intrinsic::arm64_neon_frintn;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
+  }
+  case NEON::BI__builtin_neon_vrndp_v:
+  case NEON::BI__builtin_neon_vrndpq_v: {
+    Int = Intrinsic::ceil;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
+  }
+  case NEON::BI__builtin_neon_vrndx_v:
+  case NEON::BI__builtin_neon_vrndxq_v: {
+    Int = Intrinsic::rint;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
+  }
+  case NEON::BI__builtin_neon_vrnd_v:
+  case NEON::BI__builtin_neon_vrndq_v: {
+    Int = Intrinsic::trunc;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
+  }
+  case NEON::BI__builtin_neon_vceqz_v:
+  case NEON::BI__builtin_neon_vceqzq_v:
+    return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
+                                         ICmpInst::ICMP_EQ, "vceqz");
+  case NEON::BI__builtin_neon_vcgez_v:
+  case NEON::BI__builtin_neon_vcgezq_v:
+    return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
+                                         ICmpInst::ICMP_SGE, "vcgez");
+  case NEON::BI__builtin_neon_vclez_v:
+  case NEON::BI__builtin_neon_vclezq_v:
+    return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
+                                         ICmpInst::ICMP_SLE, "vclez");
+  case NEON::BI__builtin_neon_vcgtz_v:
+  case NEON::BI__builtin_neon_vcgtzq_v:
+    return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
+                                         ICmpInst::ICMP_SGT, "vcgtz");
+  case NEON::BI__builtin_neon_vcltz_v:
+  case NEON::BI__builtin_neon_vcltzq_v:
+    return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
+                                         ICmpInst::ICMP_SLT, "vcltz");
+  case NEON::BI__builtin_neon_vcvt_f64_v:
+  case NEON::BI__builtin_neon_vcvtq_f64_v:
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
+    return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+                : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+  case NEON::BI__builtin_neon_vcvt_f64_f32: {
+    assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
+           "unexpected vcvt_f64_f32 builtin");
+    NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
+    Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
+
+    return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
+  }
+  case NEON::BI__builtin_neon_vcvt_f32_f64: {
+    assert(Type.getEltType() == NeonTypeFlags::Float32 &&
+           "unexpected vcvt_f32_f64 builtin");
+    NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
+    Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
+
+    return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
+  }
+  case NEON::BI__builtin_neon_vcvt_s32_v:
+  case NEON::BI__builtin_neon_vcvt_u32_v:
+  case NEON::BI__builtin_neon_vcvt_s64_v:
+  case NEON::BI__builtin_neon_vcvt_u64_v:
+  case NEON::BI__builtin_neon_vcvtq_s32_v:
+  case NEON::BI__builtin_neon_vcvtq_u32_v:
+  case NEON::BI__builtin_neon_vcvtq_s64_v:
+  case NEON::BI__builtin_neon_vcvtq_u64_v: {
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *InTy =
+      GetNeonType(this,
+                  NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                : NeonTypeFlags::Float32, false, quad));
+    Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
+    if (usgn)
+      return Builder.CreateFPToUI(Ops[0], Ty);
+    return Builder.CreateFPToSI(Ops[0], Ty);
+  }
+  case NEON::BI__builtin_neon_vcvta_s32_v:
+  case NEON::BI__builtin_neon_vcvtaq_s32_v:
+  case NEON::BI__builtin_neon_vcvta_u32_v:
+  case NEON::BI__builtin_neon_vcvtaq_u32_v:
+  case NEON::BI__builtin_neon_vcvta_s64_v:
+  case NEON::BI__builtin_neon_vcvtaq_s64_v:
+  case NEON::BI__builtin_neon_vcvta_u64_v:
+  case NEON::BI__builtin_neon_vcvtaq_u64_v: {
+    Int = usgn ? Intrinsic::arm64_neon_fcvtau : Intrinsic::arm64_neon_fcvtas;
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *InTy =
+      GetNeonType(this,
+                  NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                : NeonTypeFlags::Float32, false, quad));
+    llvm::Type *Tys[2] = { Ty, InTy };
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
+  }
+  case NEON::BI__builtin_neon_vcvtm_s32_v:
+  case NEON::BI__builtin_neon_vcvtmq_s32_v:
+  case NEON::BI__builtin_neon_vcvtm_u32_v:
+  case NEON::BI__builtin_neon_vcvtmq_u32_v:
+  case NEON::BI__builtin_neon_vcvtm_s64_v:
+  case NEON::BI__builtin_neon_vcvtmq_s64_v:
+  case NEON::BI__builtin_neon_vcvtm_u64_v:
+  case NEON::BI__builtin_neon_vcvtmq_u64_v: {
+    Int = usgn ? Intrinsic::arm64_neon_fcvtmu : Intrinsic::arm64_neon_fcvtms;
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *InTy =
+      GetNeonType(this,
+                  NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                : NeonTypeFlags::Float32, false, quad));
+    llvm::Type *Tys[2] = { Ty, InTy };
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
+  }
+  case NEON::BI__builtin_neon_vcvtn_s32_v:
+  case NEON::BI__builtin_neon_vcvtnq_s32_v:
+  case NEON::BI__builtin_neon_vcvtn_u32_v:
+  case NEON::BI__builtin_neon_vcvtnq_u32_v:
+  case NEON::BI__builtin_neon_vcvtn_s64_v:
+  case NEON::BI__builtin_neon_vcvtnq_s64_v:
+  case NEON::BI__builtin_neon_vcvtn_u64_v:
+  case NEON::BI__builtin_neon_vcvtnq_u64_v: {
+    Int = usgn ? Intrinsic::arm64_neon_fcvtnu : Intrinsic::arm64_neon_fcvtns;
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *InTy =
+      GetNeonType(this,
+                  NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                : NeonTypeFlags::Float32, false, quad));
+    llvm::Type *Tys[2] = { Ty, InTy };
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
+  }
+  case NEON::BI__builtin_neon_vcvtp_s32_v:
+  case NEON::BI__builtin_neon_vcvtpq_s32_v:
+  case NEON::BI__builtin_neon_vcvtp_u32_v:
+  case NEON::BI__builtin_neon_vcvtpq_u32_v:
+  case NEON::BI__builtin_neon_vcvtp_s64_v:
+  case NEON::BI__builtin_neon_vcvtpq_s64_v:
+  case NEON::BI__builtin_neon_vcvtp_u64_v:
+  case NEON::BI__builtin_neon_vcvtpq_u64_v: {
+    Int = usgn ? Intrinsic::arm64_neon_fcvtpu : Intrinsic::arm64_neon_fcvtps;
+    bool Double =
+      (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
+    llvm::Type *InTy =
+      GetNeonType(this,
+                  NeonTypeFlags(Double ? NeonTypeFlags::Float64
+                                : NeonTypeFlags::Float32, false, quad));
+    llvm::Type *Tys[2] = { Ty, InTy };
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
+  }
+  case NEON::BI__builtin_neon_vmulx_v:
+  case NEON::BI__builtin_neon_vmulxq_v: {
+    Int = Intrinsic::arm64_neon_fmulx;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
+  }
+  case NEON::BI__builtin_neon_vmul_lane_v:
+  case NEON::BI__builtin_neon_vmul_laneq_v: {
+    // v1f64 vmul_lane should be mapped to Neon scalar mul lane
+    bool Quad = false;
+    if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
+      Quad = true;
+    Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+    llvm::Type *VTy = GetNeonType(this,
+      NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
+    Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
+    Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
+    Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
+    return Builder.CreateBitCast(Result, Ty);
+  }
+  case NEON::BI__builtin_neon_vnegd_s64:
+    return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
+  case NEON::BI__builtin_neon_vpmaxnm_v:
+  case NEON::BI__builtin_neon_vpmaxnmq_v: {
+    Int = Intrinsic::arm64_neon_fmaxnmp;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
+  }
+  case NEON::BI__builtin_neon_vpminnm_v:
+  case NEON::BI__builtin_neon_vpminnmq_v: {
+    Int = Intrinsic::arm64_neon_fminnmp;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
+  }
+  case NEON::BI__builtin_neon_vsqrt_v:
+  case NEON::BI__builtin_neon_vsqrtq_v: {
+    Int = Intrinsic::sqrt;
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
+  }
+  case NEON::BI__builtin_neon_vrbit_v:
+  case NEON::BI__builtin_neon_vrbitq_v: {
+    Int = Intrinsic::arm64_neon_rbit;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
+  }
+  case NEON::BI__builtin_neon_vaddv_u8:
+    // FIXME: These are handled by the AArch64 scalar code.
+    usgn = true;
+    // FALLTHROUGH
+  case NEON::BI__builtin_neon_vaddv_s8: {
+    Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vaddv_u16:
+    usgn = true;
+    // FALLTHROUGH
+  case NEON::BI__builtin_neon_vaddv_s16: {
+    Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vaddvq_u8:
+    usgn = true;
+    // FALLTHROUGH
+  case NEON::BI__builtin_neon_vaddvq_s8: {
+    Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vaddvq_u16:
+    usgn = true;
+    // FALLTHROUGH
+  case NEON::BI__builtin_neon_vaddvq_s16: {
+    Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vmaxv_u8: {
+    Int = Intrinsic::arm64_neon_umaxv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vmaxv_u16: {
+    Int = Intrinsic::arm64_neon_umaxv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vmaxvq_u8: {
+    Int = Intrinsic::arm64_neon_umaxv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vmaxvq_u16: {
+    Int = Intrinsic::arm64_neon_umaxv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vmaxv_s8: {
+    Int = Intrinsic::arm64_neon_smaxv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vmaxv_s16: {
+    Int = Intrinsic::arm64_neon_smaxv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vmaxvq_s8: {
+    Int = Intrinsic::arm64_neon_smaxv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vmaxvq_s16: {
+    Int = Intrinsic::arm64_neon_smaxv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vminv_u8: {
+    Int = Intrinsic::arm64_neon_uminv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vminv_u16: {
+    Int = Intrinsic::arm64_neon_uminv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vminvq_u8: {
+    Int = Intrinsic::arm64_neon_uminv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vminvq_u16: {
+    Int = Intrinsic::arm64_neon_uminv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vminv_s8: {
+    Int = Intrinsic::arm64_neon_sminv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vminv_s16: {
+    Int = Intrinsic::arm64_neon_sminv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vminvq_s8: {
+    Int = Intrinsic::arm64_neon_sminv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 8));
+  }
+  case NEON::BI__builtin_neon_vminvq_s16: {
+    Int = Intrinsic::arm64_neon_sminv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vmul_n_f64: {
+    Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+    Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
+    return Builder.CreateFMul(Ops[0], RHS);
+  }
+  case NEON::BI__builtin_neon_vaddlv_u8: {
+    Int = Intrinsic::arm64_neon_uaddlv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vaddlv_u16: {
+    Int = Intrinsic::arm64_neon_uaddlv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+  }
+  case NEON::BI__builtin_neon_vaddlvq_u8: {
+    Int = Intrinsic::arm64_neon_uaddlv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vaddlvq_u16: {
+    Int = Intrinsic::arm64_neon_uaddlv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+  }
+  case NEON::BI__builtin_neon_vaddlv_s8: {
+    Int = Intrinsic::arm64_neon_saddlv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vaddlv_s16: {
+    Int = Intrinsic::arm64_neon_saddlv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+  }
+  case NEON::BI__builtin_neon_vaddlvq_s8: {
+    Int = Intrinsic::arm64_neon_saddlv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+    return Builder.CreateTrunc(Ops[0],
+             llvm::IntegerType::get(getLLVMContext(), 16));
+  }
+  case NEON::BI__builtin_neon_vaddlvq_s16: {
+    Int = Intrinsic::arm64_neon_saddlv;
+    Ty = llvm::IntegerType::get(getLLVMContext(), 32);
+    VTy =
+      llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+    llvm::Type *Tys[2] = { Ty, VTy };
+    Ops.push_back(EmitScalarExpr(E->getArg(0)));
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+  }
+  case NEON::BI__builtin_neon_vsri_n_v:
+  case NEON::BI__builtin_neon_vsriq_n_v: {
+    Int = Intrinsic::arm64_neon_vsri;
+    llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
+    return EmitNeonCall(Intrin, Ops, "vsri_n");
+  }
+  case NEON::BI__builtin_neon_vsli_n_v:
+  case NEON::BI__builtin_neon_vsliq_n_v: {
+    Int = Intrinsic::arm64_neon_vsli;
+    llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
+    return EmitNeonCall(Intrin, Ops, "vsli_n");
+  }
+  case NEON::BI__builtin_neon_vsra_n_v:
+  case NEON::BI__builtin_neon_vsraq_n_v:
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
+    return Builder.CreateAdd(Ops[0], Ops[1]);
+  case NEON::BI__builtin_neon_vrsra_n_v:
+  case NEON::BI__builtin_neon_vrsraq_n_v: {
+    Int = usgn ? Intrinsic::arm64_neon_urshl : Intrinsic::arm64_neon_srshl;
+    SmallVector<llvm::Value*,2> TmpOps;
+    TmpOps.push_back(Ops[1]);
+    TmpOps.push_back(Ops[2]);
+    Function* F = CGM.getIntrinsic(Int, Ty);
+    llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
+    Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
+    return Builder.CreateAdd(Ops[0], tmp);
+  }
+    // FIXME: Sharing loads & stores with 32-bit is complicated by the absence
+    // of an Align parameter here.
+  case NEON::BI__builtin_neon_vld1_x2_v:
+  case NEON::BI__builtin_neon_vld1q_x2_v:
+  case NEON::BI__builtin_neon_vld1_x3_v:
+  case NEON::BI__builtin_neon_vld1q_x3_v:
+  case NEON::BI__builtin_neon_vld1_x4_v:
+  case NEON::BI__builtin_neon_vld1q_x4_v: {
+    llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+    Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+    llvm::Type *Tys[2] = { VTy, PTy };
+    unsigned Int;
+    switch (BuiltinID) {
+    case NEON::BI__builtin_neon_vld1_x2_v:
+    case NEON::BI__builtin_neon_vld1q_x2_v:
+      Int = Intrinsic::arm64_neon_ld1x2;
+      break;
+    case NEON::BI__builtin_neon_vld1_x3_v:
+    case NEON::BI__builtin_neon_vld1q_x3_v:
+      Int = Intrinsic::arm64_neon_ld1x3;
+      break;
+    case NEON::BI__builtin_neon_vld1_x4_v:
+    case NEON::BI__builtin_neon_vld1q_x4_v:
+      Int = Intrinsic::arm64_neon_ld1x4;
+      break;
+    }
+    Function *F = CGM.getIntrinsic(Int, Tys);
+    Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
+    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vst1_x2_v:
+  case NEON::BI__builtin_neon_vst1q_x2_v:
+  case NEON::BI__builtin_neon_vst1_x3_v:
+  case NEON::BI__builtin_neon_vst1q_x3_v:
+  case NEON::BI__builtin_neon_vst1_x4_v:
+  case NEON::BI__builtin_neon_vst1q_x4_v: {
+    llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+    llvm::Type *Tys[2] = { VTy, PTy };
+    unsigned Int;
+    switch (BuiltinID) {
+    case NEON::BI__builtin_neon_vst1_x2_v:
+    case NEON::BI__builtin_neon_vst1q_x2_v:
+      Int = Intrinsic::arm64_neon_st1x2;
+      break;
+    case NEON::BI__builtin_neon_vst1_x3_v:
+    case NEON::BI__builtin_neon_vst1q_x3_v:
+      Int = Intrinsic::arm64_neon_st1x3;
+      break;
+    case NEON::BI__builtin_neon_vst1_x4_v:
+    case NEON::BI__builtin_neon_vst1q_x4_v:
+      Int = Intrinsic::arm64_neon_st1x4;
+      break;
+    }
+    SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end());
+    IntOps.push_back(Ops[0]);
+    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), IntOps, "");
+  }
+  case NEON::BI__builtin_neon_vld1_v:
+  case NEON::BI__builtin_neon_vld1q_v:
+    Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
+    return Builder.CreateLoad(Ops[0]);
+  case NEON::BI__builtin_neon_vst1_v:
+  case NEON::BI__builtin_neon_vst1q_v:
+    Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
+    Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  case NEON::BI__builtin_neon_vld1_lane_v:
+  case NEON::BI__builtin_neon_vld1q_lane_v:
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[0] = Builder.CreateLoad(Ops[0]);
+    return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+  case NEON::BI__builtin_neon_vld1_dup_v:
+  case NEON::BI__builtin_neon_vld1q_dup_v: {
+    Value *V = UndefValue::get(Ty);
+    Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    Ops[0] = Builder.CreateLoad(Ops[0]);
+    llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+    Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
+    return EmitNeonSplat(Ops[0], CI);
+  }
+  case NEON::BI__builtin_neon_vst1_lane_v:
+  case NEON::BI__builtin_neon_vst1q_lane_v:
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+    return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+  case NEON::BI__builtin_neon_vld2_v:
+  case NEON::BI__builtin_neon_vld2q_v: {
+    llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
+    Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+    llvm::Type *Tys[2] = { VTy, PTy };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2, Tys);
+    Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+                llvm::PointerType::getUnqual(Ops[1]->getType()));
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld3_v:
+  case NEON::BI__builtin_neon_vld3q_v: {
+    llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
+    Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+    llvm::Type *Tys[2] = { VTy, PTy };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3, Tys);
+    Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+                llvm::PointerType::getUnqual(Ops[1]->getType()));
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld4_v:
+  case NEON::BI__builtin_neon_vld4q_v: {
+    llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
+    Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+    llvm::Type *Tys[2] = { VTy, PTy };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4, Tys);
+    Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+                llvm::PointerType::getUnqual(Ops[1]->getType()));
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld2_dup_v:
+  case NEON::BI__builtin_neon_vld2q_dup_v: {
+    llvm::Type *PTy =
+      llvm::PointerType::getUnqual(VTy->getElementType());
+    Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+    llvm::Type *Tys[2] = { VTy, PTy };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2r, Tys);
+    Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+                llvm::PointerType::getUnqual(Ops[1]->getType()));
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld3_dup_v:
+  case NEON::BI__builtin_neon_vld3q_dup_v: {
+    llvm::Type *PTy =
+      llvm::PointerType::getUnqual(VTy->getElementType());
+    Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+    llvm::Type *Tys[2] = { VTy, PTy };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3r, Tys);
+    Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+                llvm::PointerType::getUnqual(Ops[1]->getType()));
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld4_dup_v:
+  case NEON::BI__builtin_neon_vld4q_dup_v: {
+    llvm::Type *PTy =
+      llvm::PointerType::getUnqual(VTy->getElementType());
+    Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+    llvm::Type *Tys[2] = { VTy, PTy };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4r, Tys);
+    Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
+    Ops[0] = Builder.CreateBitCast(Ops[0],
+                llvm::PointerType::getUnqual(Ops[1]->getType()));
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld2_lane_v:
+  case NEON::BI__builtin_neon_vld2q_lane_v: {
+    llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2lane, Tys);
+    Ops.push_back(Ops[1]);
+    Ops.erase(Ops.begin()+1);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+    Ops[3] = Builder.CreateZExt(Ops[3],
+                llvm::IntegerType::get(getLLVMContext(), 64));
+    Ops[1] = Builder.CreateCall(F,
+                ArrayRef<Value*>(Ops).slice(1), "vld2_lane");
+    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld3_lane_v:
+  case NEON::BI__builtin_neon_vld3q_lane_v: {
+    llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3lane, Tys);
+    Ops.push_back(Ops[1]);
+    Ops.erase(Ops.begin()+1);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+    Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+    Ops[4] = Builder.CreateZExt(Ops[4],
+                llvm::IntegerType::get(getLLVMContext(), 64));
+    Ops[1] = Builder.CreateCall(F,
+                ArrayRef<Value*>(Ops).slice(1), "vld3_lane");
+    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vld4_lane_v:
+  case NEON::BI__builtin_neon_vld4q_lane_v: {
+    llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4lane, Tys);
+    Ops.push_back(Ops[1]);
+    Ops.erase(Ops.begin()+1);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+    Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+    Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+    Ops[5] = Builder.CreateZExt(Ops[5],
+                llvm::IntegerType::get(getLLVMContext(), 64));
+    Ops[1] = Builder.CreateCall(F,
+                ArrayRef<Value*>(Ops).slice(1), "vld4_lane");
+    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case NEON::BI__builtin_neon_vst2_v:
+  case NEON::BI__builtin_neon_vst2q_v: {
+    Ops.push_back(Ops[0]);
+    Ops.erase(Ops.begin());
+    llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st2, Tys),
+                        Ops, "");
+  }
+  case NEON::BI__builtin_neon_vst2_lane_v:
+  case NEON::BI__builtin_neon_vst2q_lane_v: {
+    Ops.push_back(Ops[0]);
+    Ops.erase(Ops.begin());
+    Ops[2] = Builder.CreateZExt(Ops[2],
+                llvm::IntegerType::get(getLLVMContext(), 64));
+    llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st2lane, Tys),
+                        Ops, "");
+  }
+  case NEON::BI__builtin_neon_vst3_v:
+  case NEON::BI__builtin_neon_vst3q_v: {
+    Ops.push_back(Ops[0]);
+    Ops.erase(Ops.begin());
+    llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st3, Tys),
+                        Ops, "");
+  }
+  case NEON::BI__builtin_neon_vst3_lane_v:
+  case NEON::BI__builtin_neon_vst3q_lane_v: {
+    Ops.push_back(Ops[0]);
+    Ops.erase(Ops.begin());
+    Ops[3] = Builder.CreateZExt(Ops[3],
+                llvm::IntegerType::get(getLLVMContext(), 64));
+    llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st3lane, Tys),
+                        Ops, "");
+  }
+  case NEON::BI__builtin_neon_vst4_v:
+  case NEON::BI__builtin_neon_vst4q_v: {
+    Ops.push_back(Ops[0]);
+    Ops.erase(Ops.begin());
+    llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st4, Tys),
+                        Ops, "");
+  }
+  case NEON::BI__builtin_neon_vst4_lane_v:
+  case NEON::BI__builtin_neon_vst4q_lane_v: {
+    Ops.push_back(Ops[0]);
+    Ops.erase(Ops.begin());
+    Ops[4] = Builder.CreateZExt(Ops[4],
+                llvm::IntegerType::get(getLLVMContext(), 64));
+    llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st4lane, Tys),
+                        Ops, "");
+  }
+  case NEON::BI__builtin_neon_vtrn_v:
+  case NEON::BI__builtin_neon_vtrnq_v: {
     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
@@ -4865,8 +6714,8 @@
     for (unsigned vi = 0; vi != 2; ++vi) {
       SmallVector<Constant*, 16> Indices;
       for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
-        Indices.push_back(Builder.getInt32(i+vi));
-        Indices.push_back(Builder.getInt32(i+e+vi));
+        Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
+        Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
       }
       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
       SV = llvm::ConstantVector::get(Indices);
@@ -4875,8 +6724,8 @@
     }
     return SV;
   }
-  case ARM::BI__builtin_neon_vuzp_v:
-  case ARM::BI__builtin_neon_vuzpq_v: {
+  case NEON::BI__builtin_neon_vuzp_v:
+  case NEON::BI__builtin_neon_vuzpq_v: {
     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
@@ -4894,8 +6743,8 @@
     }
     return SV;
   }
-  case ARM::BI__builtin_neon_vzip_v:
-  case ARM::BI__builtin_neon_vzipq_v: {
+  case NEON::BI__builtin_neon_vzip_v:
+  case NEON::BI__builtin_neon_vzipq_v: {
     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
@@ -4914,6 +6763,48 @@
     }
     return SV;
   }
+  case NEON::BI__builtin_neon_vqtbl1q_v: {
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl1, Ty),
+                        Ops, "vtbl1");
+  }
+  case NEON::BI__builtin_neon_vqtbl2q_v: {
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl2, Ty),
+                        Ops, "vtbl2");
+  }
+  case NEON::BI__builtin_neon_vqtbl3q_v: {
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl3, Ty),
+                        Ops, "vtbl3");
+  }
+  case NEON::BI__builtin_neon_vqtbl4q_v: {
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl4, Ty),
+                        Ops, "vtbl4");
+  }
+  case NEON::BI__builtin_neon_vqtbx1q_v: {
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx1, Ty),
+                        Ops, "vtbx1");
+  }
+  case NEON::BI__builtin_neon_vqtbx2q_v: {
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx2, Ty),
+                        Ops, "vtbx2");
+  }
+  case NEON::BI__builtin_neon_vqtbx3q_v: {
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx3, Ty),
+                        Ops, "vtbx3");
+  }
+  case NEON::BI__builtin_neon_vqtbx4q_v: {
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx4, Ty),
+                        Ops, "vtbx4");
+  }
+  case NEON::BI__builtin_neon_vsqadd_v:
+  case NEON::BI__builtin_neon_vsqaddq_v: {
+    Int = Intrinsic::arm64_neon_usqadd;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
+  }
+  case NEON::BI__builtin_neon_vuqadd_v:
+  case NEON::BI__builtin_neon_vuqaddq_v: {
+    Int = Intrinsic::arm64_neon_suqadd;
+    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
+  }
   }
 }
 
@@ -4970,6 +6861,14 @@
 
   switch (BuiltinID) {
   default: return 0;
+  case X86::BI_mm_prefetch: {
+    Value *Address = EmitScalarExpr(E->getArg(0));
+    Value *RW = ConstantInt::get(Int32Ty, 0);
+    Value *Locality = EmitScalarExpr(E->getArg(1));
+    Value *Data = ConstantInt::get(Int32Ty, 1);
+    Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+    return Builder.CreateCall4(F, Address, RW, Locality, Data);
+  }
   case X86::BI__builtin_ia32_vec_init_v8qi:
   case X86::BI__builtin_ia32_vec_init_v4hi:
   case X86::BI__builtin_ia32_vec_init_v2si:
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index 0ebf1aa..fb11751 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -17,9 +17,9 @@
 #include "CodeGenModule.h"
 #include "clang/AST/Decl.h"
 #include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DerivedTypes.h"
-#include "llvm/Support/CallSite.h"
 #include <vector>
 
 using namespace clang;
@@ -39,7 +39,7 @@
 public:
   CGNVCUDARuntime(CodeGenModule &CGM);
 
-  void EmitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
+  void EmitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args) override;
 };
 
 }
diff --git a/lib/CodeGen/CGCUDARuntime.cpp b/lib/CodeGen/CGCUDARuntime.cpp
index eaf31bb..54a28f5 100644
--- a/lib/CodeGen/CGCUDARuntime.cpp
+++ b/lib/CodeGen/CGCUDARuntime.cpp
@@ -31,7 +31,8 @@
   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("kcall.end");
 
   CodeGenFunction::ConditionalEvaluation eval(CGF);
-  CGF.EmitBranchOnBoolExpr(E->getConfig(), ContBlock, ConfigOKBlock);
+  CGF.EmitBranchOnBoolExpr(E->getConfig(), ContBlock, ConfigOKBlock,
+                           /*TrueCount=*/0);
 
   eval.begin(CGF);
   CGF.EmitBlock(ConfigOKBlock);
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index cfb2d62..ef29af7 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -35,7 +35,7 @@
     return true;
 
   // Producing an alias to a base class ctor/dtor can degrade debug quality
-  // as the debugger cannot tell them appart.
+  // as the debugger cannot tell them apart.
   if (getCodeGenOpts().OptimizationLevel == 0)
     return true;
 
@@ -56,22 +56,20 @@
 
   // If any field has a non-trivial destructor, we have to emit the
   // destructor separately.
-  for (CXXRecordDecl::field_iterator I = Class->field_begin(),
-         E = Class->field_end(); I != E; ++I)
+  for (const auto *I : Class->fields())
     if (I->getType().isDestructedType())
       return true;
 
   // Try to find a unique base class with a non-trivial destructor.
   const CXXRecordDecl *UniqueBase = 0;
-  for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
-         E = Class->bases_end(); I != E; ++I) {
+  for (const auto &I : Class->bases()) {
 
     // We're in the base destructor, so skip virtual bases.
-    if (I->isVirtual()) continue;
+    if (I.isVirtual()) continue;
 
     // Skip base classes with trivial destructors.
     const CXXRecordDecl *Base
-      = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
     if (Base->hasTrivialDestructor()) continue;
 
     // If we've already found a base class with a non-trivial
@@ -92,7 +90,13 @@
   if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero())
     return true;
 
+  // Give up if the calling conventions don't match. We could update the call,
+  // but it is probably not worth it.
   const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
+  if (BaseD->getType()->getAs<FunctionType>()->getCallConv() !=
+      D->getType()->getAs<FunctionType>()->getCallConv())
+    return true;
+
   return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
                                   GlobalDecl(BaseD, Dtor_Base),
                                   false);
@@ -107,7 +111,7 @@
   if (!getCodeGenOpts().CXXCtorDtorAliases)
     return true;
 
-  // The alias will use the linkage of the referrent.  If we can't
+  // The alias will use the linkage of the referent.  If we can't
   // support aliases with that linkage, fail.
   llvm::GlobalValue::LinkageTypes Linkage = getFunctionLinkage(AliasDecl);
 
@@ -130,7 +134,7 @@
   llvm::PointerType *AliasType
     = getTypes().GetFunctionType(AliasDecl)->getPointerTo();
 
-  // Find the referrent.  Some aliases might require a bitcast, in
+  // Find the referent.  Some aliases might require a bitcast, in
   // which case the caller is responsible for ensuring the soundness
   // of these semantics.
   llvm::GlobalValue *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
@@ -143,7 +147,7 @@
   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) &&
      (TargetLinkage != llvm::GlobalValue::AvailableExternallyLinkage ||
       !TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) {
-    // FIXME: An extern template instanciation will create functions with
+    // FIXME: An extern template instantiation will create functions with
     // linkage "AvailableExternally". In libc++, some classes also define
     // members with attribute "AlwaysInline" and expect no reference to
     // be generated. It is desirable to reenable this optimisation after
@@ -190,11 +194,13 @@
 
 void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
                                        CXXCtorType ctorType) {
-  // The complete constructor is equivalent to the base constructor
-  // for classes with no virtual bases.  Try to emit it as an alias.
-  if (getTarget().getCXXABI().hasConstructorVariants() &&
-      !ctor->getParent()->getNumVBases() &&
-      (ctorType == Ctor_Complete || ctorType == Ctor_Base)) {
+  if (!getTarget().getCXXABI().hasConstructorVariants()) {
+    // If there are no constructor variants, always emit the complete destructor.
+    ctorType = Ctor_Complete;
+  } else if (!ctor->getParent()->getNumVBases() &&
+             (ctorType == Ctor_Complete || ctorType == Ctor_Base)) {
+    // The complete constructor is equivalent to the base constructor
+    // for classes with no virtual bases.  Try to emit it as an alias.
     bool ProducedAlias =
         !TryEmitDefinitionAsAlias(GlobalDecl(ctor, Ctor_Complete),
                                   GlobalDecl(ctor, Ctor_Base), true);
@@ -205,8 +211,8 @@
   const CGFunctionInfo &fnInfo =
     getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
 
-  llvm::Function *fn =
-    cast<llvm::Function>(GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo));
+  llvm::Function *fn = cast<llvm::Function>(
+      GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo, true));
   setFunctionLinkage(GlobalDecl(ctor, ctorType), fn);
 
   CodeGenFunction(*this).GenerateCode(GlobalDecl(ctor, ctorType), fn, fnInfo);
@@ -218,7 +224,8 @@
 llvm::GlobalValue *
 CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
                                        CXXCtorType ctorType,
-                                       const CGFunctionInfo *fnInfo) {
+                                       const CGFunctionInfo *fnInfo,
+                                       bool DontDefer) {
   GlobalDecl GD(ctor, ctorType);
   
   StringRef name = getMangledName(GD);
@@ -230,7 +237,8 @@
 
   llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
   return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
-                                                      /*ForVTable=*/false));
+                                                      /*ForVTable=*/false,
+                                                      DontDefer));
 }
 
 void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
@@ -260,8 +268,8 @@
   const CGFunctionInfo &fnInfo =
     getTypes().arrangeCXXDestructor(dtor, dtorType);
 
-  llvm::Function *fn =
-    cast<llvm::Function>(GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo));
+  llvm::Function *fn = cast<llvm::Function>(
+      GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo, 0, true));
   setFunctionLinkage(GlobalDecl(dtor, dtorType), fn);
 
   CodeGenFunction(*this).GenerateCode(GlobalDecl(dtor, dtorType), fn, fnInfo);
@@ -274,7 +282,8 @@
 CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
                                       CXXDtorType dtorType,
                                       const CGFunctionInfo *fnInfo,
-                                      llvm::FunctionType *fnType) {
+                                      llvm::FunctionType *fnType,
+                                      bool DontDefer) {
   GlobalDecl GD(dtor, dtorType);
 
   StringRef name = getMangledName(GD);
@@ -286,7 +295,8 @@
     fnType = getTypes().GetFunctionType(*fnInfo);
   }
   return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
-                                                      /*ForVTable=*/false));
+                                                      /*ForVTable=*/false,
+                                                      DontDefer));
 }
 
 static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 412b278..2bb3907 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -37,10 +37,9 @@
   return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
 }
 
-llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
-                                                       llvm::Value *&This,
-                                                       llvm::Value *MemPtr,
-                                                 const MemberPointerType *MPT) {
+llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
+    CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
+    llvm::Value *MemPtr, const MemberPointerType *MPT) {
   ErrorUnsupportedABI(CGF, "calls through member pointers");
 
   const FunctionProtoType *FPT = 
@@ -52,10 +51,10 @@
   return llvm::Constant::getNullValue(FTy->getPointerTo());
 }
 
-llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
-                                                    llvm::Value *Base,
-                                                    llvm::Value *MemPtr,
-                                              const MemberPointerType *MPT) {
+llvm::Value *
+CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
+                                       llvm::Value *Base, llvm::Value *MemPtr,
+                                       const MemberPointerType *MPT) {
   ErrorUnsupportedABI(CGF, "loads of member pointers");
   llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
   return llvm::Constant::getNullValue(Ty);
@@ -116,7 +115,7 @@
   return true;
 }
 
-void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
+void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
 
   // FIXME: I'm not entirely sure I like using a fake decl just for code
@@ -281,8 +280,9 @@
     llvm::Function *InitFunc) {
 }
 
-LValue CGCXXABI::EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
-                                          const DeclRefExpr *DRE) {
+LValue CGCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
+                                              const VarDecl *VD,
+                                              QualType LValType) {
   ErrorUnsupportedABI(CGF, "odr-use of thread_local global");
   return LValue();
 }
@@ -290,3 +290,31 @@
 bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
   return false;
 }
+
+/// What sort of uniqueness rules should we use for the RTTI for the
+/// given type?
+CGCXXABI::RTTIUniquenessKind
+CGCXXABI::classifyRTTIUniqueness(QualType CanTy,
+                                 llvm::GlobalValue::LinkageTypes Linkage) {
+  if (shouldRTTIBeUnique())
+    return RUK_Unique;
+
+  // It's only necessary for linkonce_odr or weak_odr linkage.
+  if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
+      Linkage != llvm::GlobalValue::WeakODRLinkage)
+    return RUK_Unique;
+
+  // It's only necessary with default visibility.
+  if (CanTy->getVisibility() != DefaultVisibility)
+    return RUK_Unique;
+
+  // If we're not required to publish this symbol, hide it.
+  if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
+    return RUK_NonUniqueHidden;
+
+  // If we're required to publish this symbol, as we might be under an
+  // explicit instantiation, leave it with default visibility but
+  // enable string-comparisons.
+  assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
+  return RUK_NonUniqueVisible;
+}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 9e9a2a7..beaec2c 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -41,7 +41,7 @@
 class CGCXXABI {
 protected:
   CodeGenModule &CGM;
-  OwningPtr<MangleContext> MangleCtx;
+  std::unique_ptr<MangleContext> MangleCtx;
 
   CGCXXABI(CodeGenModule &CGM)
     : CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
@@ -60,15 +60,6 @@
   /// Get a null value for unsupported member pointers.
   llvm::Constant *GetBogusMemberPointer(QualType T);
 
-  // FIXME: Every place that calls getVTT{Decl,Value} is something
-  // that needs to be abstracted properly.
-  ImplicitParamDecl *&getVTTDecl(CodeGenFunction &CGF) {
-    return CGF.CXXStructorImplicitParamDecl;
-  }
-  llvm::Value *&getVTTValue(CodeGenFunction &CGF) {
-    return CGF.CXXStructorImplicitParamValue;
-  }
-
   ImplicitParamDecl *&getStructorImplicitParamDecl(CodeGenFunction &CGF) {
     return CGF.CXXStructorImplicitParamDecl;
   }
@@ -76,11 +67,8 @@
     return CGF.CXXStructorImplicitParamValue;
   }
 
-  /// Build a parameter variable suitable for 'this'.
-  void BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params);
-
   /// Perform prolog initialization of the parameter variable suitable
-  /// for 'this' emitted by BuildThisParam.
+  /// for 'this' emitted by buildThisParam.
   void EmitThisParam(CodeGenFunction &CGF);
 
   ASTContext &getContext() const { return CGM.getContext(); }
@@ -134,17 +122,15 @@
   /// Load a member function from an object and a member function
   /// pointer.  Apply the this-adjustment and set 'This' to the
   /// adjusted value.
-  virtual llvm::Value *
-  EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
-                                  llvm::Value *&This,
-                                  llvm::Value *MemPtr,
-                                  const MemberPointerType *MPT);
+  virtual llvm::Value *EmitLoadOfMemberFunctionPointer(
+      CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
+      llvm::Value *MemPtr, const MemberPointerType *MPT);
 
   /// Calculate an l-value from an object and a data member pointer.
-  virtual llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
-                                                    llvm::Value *Base,
-                                                    llvm::Value *MemPtr,
-                                            const MemberPointerType *MPT);
+  virtual llvm::Value *
+  EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
+                               llvm::Value *Base, llvm::Value *MemPtr,
+                               const MemberPointerType *MPT);
 
   /// Perform a derived-to-base, base-to-derived, or bitcast member
   /// pointer conversion.
@@ -272,23 +258,27 @@
   }
 
   /// Perform ABI-specific "this" argument adjustment required prior to
-  /// a virtual function call.
-  virtual llvm::Value *adjustThisArgumentForVirtualCall(CodeGenFunction &CGF,
-                                                        GlobalDecl GD,
-                                                        llvm::Value *This) {
+  /// a call of a virtual function.
+  /// The "VirtualCall" argument is true iff the call itself is virtual.
+  virtual llvm::Value *
+  adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
+                                           llvm::Value *This,
+                                           bool VirtualCall) {
     return This;
   }
 
-  /// Build the ABI-specific portion of the parameter list for a
-  /// function.  This generally involves a 'this' parameter and
-  /// possibly some extra data for constructors and destructors.
+  /// Build a parameter variable suitable for 'this'.
+  void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params);
+
+  /// Insert any ABI-specific implicit parameters into the parameter list for a
+  /// function.  This generally involves extra data for constructors and
+  /// destructors.
   ///
   /// ABIs may also choose to override the return type, which has been
   /// initialized with the type of 'this' if HasThisReturn(CGF.CurGD) is true or
   /// the formal return type of the function otherwise.
-  virtual void BuildInstanceFunctionParams(CodeGenFunction &CGF,
-                                           QualType &ResTy,
-                                           FunctionArgList &Params) = 0;
+  virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
+                                         FunctionArgList &Params) = 0;
 
   /// Perform ABI-specific "this" parameter adjustment in a virtual function
   /// prologue.
@@ -300,14 +290,20 @@
   /// Emit the ABI-specific prolog for the function.
   virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
 
-  /// Emit the constructor call. Return the function that is called.
-  virtual void EmitConstructorCall(CodeGenFunction &CGF,
-                                   const CXXConstructorDecl *D,
-                                   CXXCtorType Type,
-                                   bool ForVirtualBase, bool Delegating,
-                                   llvm::Value *This,
-                                   CallExpr::const_arg_iterator ArgBeg,
-                                   CallExpr::const_arg_iterator ArgEnd) = 0;
+  /// Add any ABI-specific implicit arguments needed to call a constructor.
+  ///
+  /// \return The number of args added to the call, which is typically zero or
+  /// one.
+  virtual unsigned
+  addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
+                             CXXCtorType Type, bool ForVirtualBase,
+                             bool Delegating, CallArgList &Args) = 0;
+
+  /// Emit the destructor call.
+  virtual void EmitDestructorCall(CodeGenFunction &CGF,
+                                  const CXXDestructorDecl *DD, CXXDtorType Type,
+                                  bool ForVirtualBase, bool Delegating,
+                                  llvm::Value *This) = 0;
 
   /// Emits the VTable definitions required for the given record type.
   virtual void emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -485,8 +481,39 @@
   /// Emit a reference to a non-local thread_local variable (including
   /// triggering the initialization of all thread_local variables in its
   /// translation unit).
-  virtual LValue EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
-                                            const DeclRefExpr *DRE);
+  virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
+                                              const VarDecl *VD,
+                                              QualType LValType);
+
+  /**************************** RTTI Uniqueness ******************************/
+
+protected:
+  /// Returns true if the ABI requires RTTI type_info objects to be unique
+  /// across a program.
+  virtual bool shouldRTTIBeUnique() { return true; }
+
+public:
+  /// What sort of unique-RTTI behavior should we use?
+  enum RTTIUniquenessKind {
+    /// We are guaranteeing, or need to guarantee, that the RTTI string
+    /// is unique.
+    RUK_Unique,
+
+    /// We are not guaranteeing uniqueness for the RTTI string, so we
+    /// can demote to hidden visibility but must use string comparisons.
+    RUK_NonUniqueHidden,
+
+    /// We are not guaranteeing uniqueness for the RTTI string, so we
+    /// have to use string comparisons, but we also have to emit it with
+    /// non-hidden visibility.
+    RUK_NonUniqueVisible
+  };
+
+  /// Return the required visibility status for the given type and linkage in
+  /// the current ABI.
+  RTTIUniquenessKind
+  classifyRTTIUniqueness(QualType CanTy,
+                         llvm::GlobalValue::LinkageTypes Linkage);
 };
 
 // Create an instance of a C++ ABI class:
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 22f2467..e26d6b2d 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -26,10 +26,10 @@
 #include "clang/Frontend/CodeGenOptions.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/InlineAsm.h"
-#include "llvm/MC/SubtargetFeature.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/IR/Intrinsics.h"
 #include "llvm/Transforms/Utils/Local.h"
 using namespace clang;
 using namespace CodeGen;
@@ -79,23 +79,26 @@
 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
   // When translating an unprototyped function type, always use a
   // variadic type.
-  return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
-                                 None, FTNP->getExtInfo(), RequiredArgs(0));
+  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
+                                 false, None, FTNP->getExtInfo(),
+                                 RequiredArgs(0));
 }
 
 /// Arrange the LLVM function layout for a value of the given function
 /// type, on top of any implicit parameters already stored.  Use the
 /// given ExtInfo instead of the ExtInfo from the function type.
 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
+                                                     bool IsInstanceMethod,
                                        SmallVectorImpl<CanQualType> &prefix,
                                              CanQual<FunctionProtoType> FTP,
                                               FunctionType::ExtInfo extInfo) {
   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
   // FIXME: Kill copy.
-  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
-    prefix.push_back(FTP->getArgType(i));
-  CanQualType resultType = FTP->getResultType().getUnqualifiedType();
-  return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
+  for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
+    prefix.push_back(FTP->getParamType(i));
+  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
+  return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix,
+                                     extInfo, required);
 }
 
 /// Arrange the argument and result information for a free function (i.e.
@@ -103,7 +106,7 @@
 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
                                       SmallVectorImpl<CanQualType> &prefix,
                                             CanQual<FunctionProtoType> FTP) {
-  return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
+  return arrangeLLVMFunctionInfo(CGT, false, prefix, FTP, FTP->getExtInfo());
 }
 
 /// Arrange the argument and result information for a free function (i.e.
@@ -112,7 +115,7 @@
                                       SmallVectorImpl<CanQualType> &prefix,
                                             CanQual<FunctionProtoType> FTP) {
   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
-  return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
+  return arrangeLLVMFunctionInfo(CGT, true, prefix, FTP, extInfo);
 }
 
 /// Arrange the argument and result information for a value of the
@@ -123,7 +126,7 @@
   return ::arrangeFreeFunctionType(*this, argTypes, FTP);
 }
 
-static CallingConv getCallingConventionForDecl(const Decl *D) {
+static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
   // Set the appropriate calling convention for the Function.
   if (D->hasAttr<StdCallAttr>())
     return CC_X86StdCall;
@@ -146,6 +149,12 @@
   if (D->hasAttr<IntelOclBiccAttr>())
     return CC_IntelOclBicc;
 
+  if (D->hasAttr<MSABIAttr>())
+    return IsWindows ? CC_C : CC_X86_64Win64;
+
+  if (D->hasAttr<SysVABIAttr>())
+    return IsWindows ? CC_X86_64SysV : CC_C;
+
   return CC_C;
 }
 
@@ -202,18 +211,41 @@
   CanQualType resultType =
     TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
 
-  TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
-
   CanQual<FunctionProtoType> FTP = GetFormalType(D);
 
-  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
-
   // Add the formal parameters.
-  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
-    argTypes.push_back(FTP->getArgType(i));
+  for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
+    argTypes.push_back(FTP->getParamType(i));
+
+  TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
+
+  RequiredArgs required =
+      (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
 
   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
-  return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
+  return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required);
+}
+
+/// Arrange a call to a C++ method, passing the given arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
+                                        const CXXConstructorDecl *D,
+                                        CXXCtorType CtorKind,
+                                        unsigned ExtraArgs) {
+  // FIXME: Kill copy.
+  SmallVector<CanQualType, 16> ArgTypes;
+  for (CallArgList::const_iterator i = args.begin(), e = args.end(); i != e;
+       ++i)
+    ArgTypes.push_back(Context.getCanonicalParamType(i->Ty));
+
+  CanQual<FunctionProtoType> FPT = GetFormalType(D);
+  RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
+  GlobalDecl GD(D, CtorKind);
+  CanQualType ResultType =
+      TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy;
+
+  FunctionType::ExtInfo Info = FPT->getExtInfo();
+  return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required);
 }
 
 /// Arrange the argument and result information for a declaration,
@@ -232,11 +264,11 @@
   TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
 
   CanQual<FunctionProtoType> FTP = GetFormalType(D);
-  assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
+  assert(FTP->getNumParams() == 0 && "dtor with formal parameters");
   assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
 
   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
-  return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
+  return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo,
                                  RequiredArgs::All);
 }
 
@@ -256,7 +288,7 @@
   // non-variadic type.
   if (isa<FunctionNoProtoType>(FTy)) {
     CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
-    return arrangeLLVMFunctionInfo(noProto->getResultType(), None,
+    return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None,
                                    noProto->getExtInfo(), RequiredArgs::All);
   }
 
@@ -286,13 +318,13 @@
   argTys.push_back(Context.getCanonicalParamType(receiverType));
   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
   // FIXME: Kill copy?
-  for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
-         e = MD->param_end(); i != e; ++i) {
-    argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+  for (const auto *I : MD->params()) {
+    argTys.push_back(Context.getCanonicalParamType(I->getType()));
   }
 
   FunctionType::ExtInfo einfo;
-  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
+  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
+  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
 
   if (getContext().getLangOpts().ObjCAutoRefCount &&
       MD->hasAttr<NSReturnsRetainedAttr>())
@@ -301,8 +333,8 @@
   RequiredArgs required =
     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
 
-  return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
-                                 einfo, required);
+  return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false,
+                                 argTys, einfo, required);
 }
 
 const CGFunctionInfo &
@@ -336,7 +368,7 @@
   // extra prefix plus the arguments in the prototype.
   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
     if (proto->isVariadic())
-      required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
+      required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
 
   // If we don't have a prototype at all, but we're supposed to
   // explicitly use the variadic convention for unprototyped calls,
@@ -348,7 +380,7 @@
     required = RequiredArgs(args.size());
   }
 
-  return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
+  return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args,
                                      fnType->getExtInfo(), required);
 }
 
@@ -380,8 +412,8 @@
   for (CallArgList::const_iterator i = args.begin(), e = args.end();
        i != e; ++i)
     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
-  return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
-                                 required);
+  return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes,
+                                 info, required);
 }
 
 /// Arrange a call to a C++ method, passing the given arguments.
@@ -396,15 +428,13 @@
     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
 
   FunctionType::ExtInfo info = FPT->getExtInfo();
-  return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
+  return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true,
                                  argTypes, info, required);
 }
 
-const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
-                                         const FunctionArgList &args,
-                                         const FunctionType::ExtInfo &info,
-                                         bool isVariadic) {
+const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
+    QualType resultType, const FunctionArgList &args,
+    const FunctionType::ExtInfo &info, bool isVariadic) {
   // FIXME: Kill copy.
   SmallVector<CanQualType, 16> argTypes;
   for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
@@ -413,12 +443,12 @@
 
   RequiredArgs required =
     (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
-  return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
+  return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info,
                                  required);
 }
 
 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
-  return arrangeLLVMFunctionInfo(getContext().VoidTy, None,
+  return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None,
                                  FunctionType::ExtInfo(), RequiredArgs::All);
 }
 
@@ -427,6 +457,7 @@
 /// above functions ultimately defer to.
 const CGFunctionInfo &
 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
+                                      bool IsInstanceMethod,
                                       ArrayRef<CanQualType> argTypes,
                                       FunctionType::ExtInfo info,
                                       RequiredArgs required) {
@@ -440,7 +471,8 @@
 
   // Lookup or create unique function info.
   llvm::FoldingSetNodeID ID;
-  CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
+  CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType,
+                          argTypes);
 
   void *insertPos = 0;
   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
@@ -448,7 +480,8 @@
     return *FI;
 
   // Construct the function info.  We co-allocate the ArgInfos.
-  FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
+  FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes,
+                              required);
   FunctionInfos.InsertNode(FI, insertPos);
 
   bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
@@ -464,10 +497,9 @@
   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
 
-  for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
-       I != E; ++I)
-    if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
-      I->info.setCoerceToType(ConvertType(I->type));
+  for (auto &I : FI->arguments())
+    if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == 0)
+      I.info.setCoerceToType(ConvertType(I.type));
 
   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
   assert(erased && "Not in set?");
@@ -476,6 +508,7 @@
 }
 
 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
+                                       bool IsInstanceMethod,
                                        const FunctionType::ExtInfo &info,
                                        CanQualType resultType,
                                        ArrayRef<CanQualType> argTypes,
@@ -486,11 +519,13 @@
   FI->CallingConvention = llvmCC;
   FI->EffectiveCallingConvention = llvmCC;
   FI->ASTCallingConvention = info.getCC();
+  FI->InstanceMethod = IsInstanceMethod;
   FI->NoReturn = info.getNoReturn();
   FI->ReturnsRetained = info.getProducesResult();
   FI->Required = required;
   FI->HasRegParm = info.getHasRegParm();
   FI->RegParm = info.getRegParm();
+  FI->ArgStruct = 0;
   FI->NumArgs = argTypes.size();
   FI->getArgsBuffer()[0].type = resultType;
   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
@@ -516,9 +551,7 @@
       const FieldDecl *LargestFD = 0;
       CharUnits UnionSize = CharUnits::Zero();
 
-      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-           i != e; ++i) {
-        const FieldDecl *FD = *i;
+      for (const auto *FD : RD->fields()) {
         assert(!FD->isBitField() &&
                "Cannot expand structure with bit-field members.");
         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
@@ -530,11 +563,10 @@
       if (LargestFD)
         GetExpandedTypes(LargestFD->getType(), expandedTypes);
     } else {
-      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-           i != e; ++i) {
-        assert(!i->isBitField() &&
+      for (const auto *I : RD->fields()) {
+        assert(!I->isBitField() &&
                "Cannot expand structure with bit-field members.");
-        GetExpandedTypes(i->getType(), expandedTypes);
+        GetExpandedTypes(I->getType(), expandedTypes);
       }
     }
   } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
@@ -567,9 +599,7 @@
       const FieldDecl *LargestFD = 0;
       CharUnits UnionSize = CharUnits::Zero();
 
-      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-           i != e; ++i) {
-        const FieldDecl *FD = *i;
+      for (const auto *FD : RD->fields()) {
         assert(!FD->isBitField() &&
                "Cannot expand structure with bit-field members.");
         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
@@ -584,9 +614,7 @@
         AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
       }
     } else {
-      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-           i != e; ++i) {
-        FieldDecl *FD = *i;
+      for (const auto *FD : RD->fields()) {
         QualType FT = FD->getType();
 
         // FIXME: What are the right qualifiers here?
@@ -850,6 +878,11 @@
   return FI.getReturnInfo().isIndirect();
 }
 
+bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
+  return ReturnTypeUsesSRet(FI) &&
+         getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
+}
+
 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
     switch (BT->getKind()) {
@@ -902,6 +935,18 @@
     resultType = retAI.getCoerceToType();
     break;
 
+  case ABIArgInfo::InAlloca:
+    if (retAI.getInAllocaSRet()) {
+      // sret things on win32 aren't void, they return the sret pointer.
+      QualType ret = FI.getReturnType();
+      llvm::Type *ty = ConvertType(ret);
+      unsigned addressSpace = Context.getTargetAddressSpace(ret);
+      resultType = llvm::PointerType::get(ty, addressSpace);
+    } else {
+      resultType = llvm::Type::getVoidTy(getLLVMContext());
+    }
+    break;
+
   case ABIArgInfo::Indirect: {
     assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
     resultType = llvm::Type::getVoidTy(getLLVMContext());
@@ -934,6 +979,7 @@
 
     switch (argAI.getKind()) {
     case ABIArgInfo::Ignore:
+    case ABIArgInfo::InAlloca:
       break;
 
     case ABIArgInfo::Indirect: {
@@ -964,6 +1010,10 @@
     }
   }
 
+  // Add the inalloca struct as the last parameter type.
+  if (llvm::StructType *ArgStruct = FI.getArgStruct())
+    argTypes.push_back(ArgStruct->getPointerTo());
+
   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
   assert(Erased && "Not in set?");
   
@@ -1006,6 +1056,8 @@
       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
     if (TargetDecl->hasAttr<NoReturnAttr>())
       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
+    if (TargetDecl->hasAttr<NoDuplicateAttr>())
+      FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
 
     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
       const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
@@ -1089,6 +1141,13 @@
   case ABIArgInfo::Ignore:
     break;
 
+  case ABIArgInfo::InAlloca: {
+    // inalloca disables readnone and readonly
+    FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
+      .removeAttribute(llvm::Attribute::ReadNone);
+    break;
+  }
+
   case ABIArgInfo::Indirect: {
     llvm::AttrBuilder SRETAttrs;
     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
@@ -1114,10 +1173,9 @@
                                     llvm::AttributeSet::ReturnIndex,
                                     RetAttrs));
 
-  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
-         ie = FI.arg_end(); it != ie; ++it) {
-    QualType ParamType = it->type;
-    const ABIArgInfo &AI = it->info;
+  for (const auto &I : FI.arguments()) {
+    QualType ParamType = I.type;
+    const ABIArgInfo &AI = I.info;
     llvm::AttrBuilder Attrs;
 
     if (AI.getPaddingType()) {
@@ -1173,6 +1231,13 @@
       // Skip increment, no matching LLVM parameter.
       continue;
 
+    case ABIArgInfo::InAlloca:
+      // inalloca disables readnone and readonly.
+      FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
+          .removeAttribute(llvm::Attribute::ReadNone);
+      // Skip increment, no matching LLVM parameter.
+      continue;
+
     case ABIArgInfo::Expand: {
       SmallVector<llvm::Type*, 8> types;
       // FIXME: This is rather inefficient. Do we ever actually need to do
@@ -1188,6 +1253,14 @@
       PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
     ++Index;
   }
+
+  // Add the inalloca attribute to the trailing inalloca parameter if present.
+  if (FI.usesInAlloca()) {
+    llvm::AttrBuilder Attrs;
+    Attrs.addAttribute(llvm::Attribute::InAlloca);
+    PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
+  }
+
   if (FuncAttrs.hasAttributes())
     PAL.push_back(llvm::
                   AttributeSet::get(getLLVMContext(),
@@ -1224,7 +1297,7 @@
   // return statements.
   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
     if (FD->hasImplicitReturnZero()) {
-      QualType RetTy = FD->getResultType().getUnqualifiedType();
+      QualType RetTy = FD->getReturnType().getUnqualifiedType();
       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
       Builder.CreateStore(Zero, ReturnValue);
@@ -1237,6 +1310,16 @@
   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
   llvm::Function::arg_iterator AI = Fn->arg_begin();
 
+  // If we're using inalloca, all the memory arguments are GEPs off of the last
+  // parameter, which is a pointer to the complete memory area.
+  llvm::Value *ArgStruct = 0;
+  if (FI.usesInAlloca()) {
+    llvm::Function::arg_iterator EI = Fn->arg_end();
+    --EI;
+    ArgStruct = EI;
+    assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
+  }
+
   // Name the struct return argument.
   if (CGM.ReturnTypeUsesSRet(FI)) {
     AI->setName("agg.result");
@@ -1246,6 +1329,18 @@
     ++AI;
   }
 
+  // Track if we received the parameter as a pointer (indirect, byval, or
+  // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
+  // into a local alloca for us.
+  enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
+  typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
+  SmallVector<ValueAndIsPtr, 16> ArgVals;
+  ArgVals.reserve(Args.size());
+
+  // Create a pointer value for every parameter declaration.  This usually
+  // entails copying one or more LLVM IR arguments into an alloca.  Don't push
+  // any cleanups or do anything that might unwind.  We do that separately, so
+  // we can push the cleanups in the correct order for the ABI.
   assert(FI.arg_size() == Args.size() &&
          "Mismatch between function signature & arguments.");
   unsigned ArgNo = 1;
@@ -1264,6 +1359,13 @@
       ++AI;
 
     switch (ArgI.getKind()) {
+    case ABIArgInfo::InAlloca: {
+      llvm::Value *V = Builder.CreateStructGEP(
+          ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName());
+      ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
+      continue;  // Don't increment AI!
+    }
+
     case ABIArgInfo::Indirect: {
       llvm::Value *V = AI;
 
@@ -1290,6 +1392,7 @@
                                false);
           V = AlignedTemp;
         }
+        ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
       } else {
         // Load scalar value from indirect argument.
         CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
@@ -1298,8 +1401,8 @@
 
         if (isPromoted)
           V = emitArgumentDemotion(*this, Arg, V);
+        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
       }
-      EmitParmDecl(*Arg, V, ArgNo);
       break;
     }
 
@@ -1340,7 +1443,7 @@
         if (V->getType() != LTy)
           V = Builder.CreateBitCast(V, LTy);
 
-        EmitParmDecl(*Arg, V, ArgNo);
+        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
         break;
       }
 
@@ -1412,8 +1515,10 @@
         V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
         if (isPromoted)
           V = emitArgumentDemotion(*this, Arg, V);
+        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
+      } else {
+        ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
       }
-      EmitParmDecl(*Arg, V, ArgNo);
       continue;  // Skip ++AI increment, already done.
     }
 
@@ -1426,7 +1531,7 @@
       Alloca->setAlignment(Align.getQuantity());
       LValue LV = MakeAddrLValue(Alloca, Ty, Align);
       llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
-      EmitParmDecl(*Arg, Alloca, ArgNo);
+      ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
 
       // Name the arguments used in expansion and increment AI.
       unsigned Index = 0;
@@ -1437,11 +1542,12 @@
 
     case ABIArgInfo::Ignore:
       // Initialize the local variable appropriately.
-      if (!hasScalarEvaluationKind(Ty))
-        EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
-      else
-        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
-                     ArgNo);
+      if (!hasScalarEvaluationKind(Ty)) {
+        ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
+      } else {
+        llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
+        ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
+      }
 
       // Skip increment, no matching LLVM parameter.
       continue;
@@ -1449,7 +1555,20 @@
 
     ++AI;
   }
+
+  if (FI.usesInAlloca())
+    ++AI;
   assert(AI == Fn->arg_end() && "Argument mismatch!");
+
+  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
+    for (int I = Args.size() - 1; I >= 0; --I)
+      EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
+                   I + 1);
+  } else {
+    for (unsigned I = 0, E = Args.size(); I != E; ++I)
+      EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
+                   I + 1);
+  }
 }
 
 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
@@ -1626,7 +1745,7 @@
   }
 
   llvm::StoreInst *store =
-    dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
+    dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
   if (!store) return 0;
 
   // These aren't actually possible for non-coerced returns, and we
@@ -1662,6 +1781,20 @@
   const ABIArgInfo &RetAI = FI.getReturnInfo();
 
   switch (RetAI.getKind()) {
+  case ABIArgInfo::InAlloca:
+    // Aggregrates get evaluated directly into the destination.  Sometimes we
+    // need to return the sret value in a register, though.
+    assert(hasAggregateEvaluationKind(RetTy));
+    if (RetAI.getInAllocaSRet()) {
+      llvm::Function::arg_iterator EI = CurFn->arg_end();
+      --EI;
+      llvm::Value *ArgStruct = EI;
+      llvm::Value *SRet =
+          Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex());
+      RV = Builder.CreateLoad(SRet, "sret");
+    }
+    break;
+
   case ABIArgInfo::Indirect: {
     switch (getEvaluationKind(RetTy)) {
     case TEK_Complex: {
@@ -1750,6 +1883,25 @@
     Ret->setDebugLoc(RetDbgLoc);
 }
 
+static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
+  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
+  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
+}
+
+static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
+  // FIXME: Generate IR in one pass, rather than going back and fixing up these
+  // placeholders.
+  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
+  llvm::Value *Placeholder =
+      llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
+  Placeholder = CGF.Builder.CreateLoad(Placeholder);
+  return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
+                               Ty.getQualifiers(),
+                               AggValueSlot::IsNotDestructed,
+                               AggValueSlot::DoesNotNeedGCBarriers,
+                               AggValueSlot::IsNotAliased);
+}
+
 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
                                           const VarDecl *param,
                                           SourceLocation loc) {
@@ -1773,6 +1925,20 @@
     return args.add(RValue::get(Builder.CreateLoad(local)), type);
   }
 
+  if (isInAllocaArgument(CGM.getCXXABI(), type)) {
+    AggValueSlot Slot = createPlaceholderSlot(*this, type);
+    Slot.setExternallyDestructed();
+
+    // FIXME: Either emit a copy constructor call, or figure out how to do
+    // guaranteed tail calls with perfect forwarding in LLVM.
+    CGM.ErrorUnsupported(param, "non-trivial argument copy for thunk");
+    EmitNullInitialization(Slot.getAddr(), type);
+
+    RValue RV = Slot.asRValue();
+    args.add(RV, type);
+    return;
+  }
+
   args.add(convertTempToRValue(local, type, loc), type);
 }
 
@@ -1852,14 +2018,13 @@
 
 static void emitWritebacks(CodeGenFunction &CGF,
                            const CallArgList &args) {
-  for (CallArgList::writeback_iterator
-         i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
-    emitWriteback(CGF, *i);
+  for (const auto &I : args.writebacks())
+    emitWriteback(CGF, I);
 }
 
 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
                                             const CallArgList &CallArgs) {
-  assert(CGF.getTarget().getCXXABI().isArgumentDestroyedByCallee());
+  assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
     CallArgs.getCleanupsToDeactivate();
   // Iterate in reverse to increase the likelihood of popping the cleanup.
@@ -2004,6 +2169,99 @@
   args.add(RValue::get(finalArgument), CRE->getType());
 }
 
+void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
+  assert(!StackBase && !StackCleanup.isValid());
+
+  // Save the stack.
+  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+  StackBase = CGF.Builder.CreateCall(F, "inalloca.save");
+
+  // Control gets really tied up in landing pads, so we have to spill the
+  // stacksave to an alloca to avoid violating SSA form.
+  // TODO: This is dead if we never emit the cleanup.  We should create the
+  // alloca and store lazily on the first cleanup emission.
+  StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
+  CGF.Builder.CreateStore(StackBase, StackBaseMem);
+  CGF.pushStackRestore(EHCleanup, StackBaseMem);
+  StackCleanup = CGF.EHStack.getInnermostEHScope();
+  assert(StackCleanup.isValid());
+}
+
+void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
+  if (StackBase) {
+    CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
+    llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+    // We could load StackBase from StackBaseMem, but in the non-exceptional
+    // case we can skip it.
+    CGF.Builder.CreateCall(F, StackBase);
+  }
+}
+
+void CodeGenFunction::EmitCallArgs(CallArgList &Args,
+                                   ArrayRef<QualType> ArgTypes,
+                                   CallExpr::const_arg_iterator ArgBeg,
+                                   CallExpr::const_arg_iterator ArgEnd,
+                                   bool ForceColumnInfo) {
+  CGDebugInfo *DI = getDebugInfo();
+  SourceLocation CallLoc;
+  if (DI) CallLoc = DI->getLocation();
+
+  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
+  // because arguments are destroyed left to right in the callee.
+  if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
+    // Insert a stack save if we're going to need any inalloca args.
+    bool HasInAllocaArgs = false;
+    for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
+         I != E && !HasInAllocaArgs; ++I)
+      HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
+    if (HasInAllocaArgs) {
+      assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
+      Args.allocateArgumentMemory(*this);
+    }
+
+    // Evaluate each argument.
+    size_t CallArgsStart = Args.size();
+    for (int I = ArgTypes.size() - 1; I >= 0; --I) {
+      CallExpr::const_arg_iterator Arg = ArgBeg + I;
+      EmitCallArg(Args, *Arg, ArgTypes[I]);
+      // Restore the debug location.
+      if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
+    }
+
+    // Un-reverse the arguments we just evaluated so they match up with the LLVM
+    // IR function.
+    std::reverse(Args.begin() + CallArgsStart, Args.end());
+    return;
+  }
+
+  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
+    CallExpr::const_arg_iterator Arg = ArgBeg + I;
+    assert(Arg != ArgEnd);
+    EmitCallArg(Args, *Arg, ArgTypes[I]);
+    // Restore the debug location.
+    if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
+  }
+}
+
+namespace {
+
+struct DestroyUnpassedArg : EHScopeStack::Cleanup {
+  DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
+      : Addr(Addr), Ty(Ty) {}
+
+  llvm::Value *Addr;
+  QualType Ty;
+
+  void Emit(CodeGenFunction &CGF, Flags flags) override {
+    const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
+    assert(!Dtor->isTrivial());
+    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
+                              /*Delegating=*/false, Addr);
+  }
+};
+
+}
+
 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
                                   QualType type) {
   if (const ObjCIndirectCopyRestoreExpr *CRE
@@ -2026,23 +2284,25 @@
   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
   // However, we still have to push an EH-only cleanup in case we unwind before
   // we make it to the call.
-  if (HasAggregateEvalKind &&
-      CGM.getTarget().getCXXABI().isArgumentDestroyedByCallee()) {
-    const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
-    if (RD && RD->hasNonTrivialDestructor()) {
-      AggValueSlot Slot = CreateAggTemp(type, "agg.arg.tmp");
-      Slot.setExternallyDestructed();
-      EmitAggExpr(E, Slot);
-      RValue RV = Slot.asRValue();
-      args.add(RV, type);
+  if (HasAggregateEvalKind && args.isUsingInAlloca()) {
+    assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
+    AggValueSlot Slot = createPlaceholderSlot(*this, type);
+    Slot.setExternallyDestructed();
+    EmitAggExpr(E, Slot);
+    RValue RV = Slot.asRValue();
+    args.add(RV, type);
 
-      pushDestroy(EHCleanup, RV.getAggregateAddr(), type, destroyCXXObject,
-                  /*useEHCleanupForArray*/ true);
+    const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
+    if (RD->hasNonTrivialDestructor()) {
+      // Create a no-op GEP between the placeholder and the cleanup so we can
+      // RAUW it successfully.  It also serves as a marker of the first
+      // instruction where the cleanup is active.
+      pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
       // This unreachable is a temporary marker which will be removed later.
       llvm::Instruction *IsActive = Builder.CreateUnreachable();
       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
-      return;
     }
+    return;
   }
 
   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
@@ -2128,6 +2388,7 @@
     call->setCallingConv(getRuntimeCC());
     Builder.CreateUnreachable();
   }
+  PGO.setCurrentRegionUnreachable();
 }
 
 /// Emits a call or invoke instruction to the given nullary runtime
@@ -2209,9 +2470,7 @@
       const FieldDecl *LargestFD = 0;
       CharUnits UnionSize = CharUnits::Zero();
 
-      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-           i != e; ++i) {
-        const FieldDecl *FD = *i;
+      for (const auto *FD : RD->fields()) {
         assert(!FD->isBitField() &&
                "Cannot expand structure with bit-field members.");
         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
@@ -2225,10 +2484,7 @@
         ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
       }
     } else {
-      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-           i != e; ++i) {
-        FieldDecl *FD = *i;
-
+      for (const auto *FD : RD->fields()) {
         RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
         ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
       }
@@ -2251,6 +2507,20 @@
   }
 }
 
+/// \brief Store a non-aggregate value to an address to initialize it.  For
+/// initialization, a non-atomic store will be used.
+static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
+                                        LValue Dst) {
+  if (Src.isScalar())
+    CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
+  else
+    CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
+}
+
+void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
+                                                  llvm::Value *New) {
+  DeferredReplacements.push_back(std::make_pair(Old, New));
+}
 
 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
                                  llvm::Value *Callee,
@@ -2272,14 +2542,32 @@
     cast<llvm::FunctionType>(
                   cast<llvm::PointerType>(Callee->getType())->getElementType());
 
+  // If we're using inalloca, insert the allocation after the stack save.
+  // FIXME: Do this earlier rather than hacking it in here!
+  llvm::Value *ArgMemory = 0;
+  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
+    llvm::AllocaInst *AI = new llvm::AllocaInst(
+        ArgStruct, "argmem", CallArgs.getStackBase()->getNextNode());
+    AI->setUsedWithInAlloca(true);
+    assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
+    ArgMemory = AI;
+  }
+
   // If the call returns a temporary with struct return, create a temporary
   // alloca to hold the result, unless one is given to us.
-  if (CGM.ReturnTypeUsesSRet(CallInfo)) {
-    llvm::Value *Value = ReturnValue.getValue();
-    if (!Value)
-      Value = CreateMemTemp(RetTy);
-    Args.push_back(Value);
-    checkArgMatches(Value, IRArgNo, IRFuncTy);
+  llvm::Value *SRetPtr = 0;
+  if (CGM.ReturnTypeUsesSRet(CallInfo) || RetAI.isInAlloca()) {
+    SRetPtr = ReturnValue.getValue();
+    if (!SRetPtr)
+      SRetPtr = CreateMemTemp(RetTy);
+    if (CGM.ReturnTypeUsesSRet(CallInfo)) {
+      Args.push_back(SRetPtr);
+      checkArgMatches(SRetPtr, IRArgNo, IRFuncTy);
+    } else {
+      llvm::Value *Addr =
+          Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
+      Builder.CreateStore(SRetPtr, Addr);
+    }
   }
 
   assert(CallInfo.arg_size() == CallArgs.size() &&
@@ -2299,6 +2587,35 @@
     }
 
     switch (ArgInfo.getKind()) {
+    case ABIArgInfo::InAlloca: {
+      assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
+      if (RV.isAggregate()) {
+        // Replace the placeholder with the appropriate argument slot GEP.
+        llvm::Instruction *Placeholder =
+            cast<llvm::Instruction>(RV.getAggregateAddr());
+        CGBuilderTy::InsertPoint IP = Builder.saveIP();
+        Builder.SetInsertPoint(Placeholder);
+        llvm::Value *Addr = Builder.CreateStructGEP(
+            ArgMemory, ArgInfo.getInAllocaFieldIndex());
+        Builder.restoreIP(IP);
+        deferPlaceholderReplacement(Placeholder, Addr);
+      } else {
+        // Store the RValue into the argument struct.
+        llvm::Value *Addr =
+            Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
+        unsigned AS = Addr->getType()->getPointerAddressSpace();
+        llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
+        // There are some cases where a trivial bitcast is not avoidable.  The
+        // definition of a type later in a translation unit may change it's type
+        // from {}* to (%struct.foo*)*.
+        if (Addr->getType() != MemType)
+          Addr = Builder.CreateBitCast(Addr, MemType);
+        LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
+        EmitInitStoreOfNonAggregate(*this, RV, argLV);
+      }
+      break; // Don't increment IRArgNo!
+    }
+
     case ABIArgInfo::Indirect: {
       if (RV.isScalar() || RV.isComplex()) {
         // Make a temporary alloca to pass the argument.
@@ -2307,13 +2624,8 @@
           AI->setAlignment(ArgInfo.getIndirectAlign());
         Args.push_back(AI);
 
-        LValue argLV =
-          MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
-        
-        if (RV.isScalar())
-          EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true);
-        else
-          EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true);
+        LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
+        EmitInitStoreOfNonAggregate(*this, RV, argLV);
         
         // Validate argument match.
         checkArgMatches(AI, IRArgNo, IRFuncTy);
@@ -2386,11 +2698,7 @@
       if (RV.isScalar() || RV.isComplex()) {
         SrcPtr = CreateMemTemp(I->Ty, "coerce");
         LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
-        if (RV.isScalar()) {
-          EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true);
-        } else {
-          EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true);
-        }
+        EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
       } else
         SrcPtr = RV.getAggregateAddr();
 
@@ -2456,6 +2764,34 @@
     }
   }
 
+  if (ArgMemory) {
+    llvm::Value *Arg = ArgMemory;
+    llvm::Type *LastParamTy =
+        IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
+    if (Arg->getType() != LastParamTy) {
+#ifndef NDEBUG
+      // Assert that these structs have equivalent element types.
+      llvm::StructType *FullTy = CallInfo.getArgStruct();
+      llvm::StructType *Prefix = cast<llvm::StructType>(
+          cast<llvm::PointerType>(LastParamTy)->getElementType());
+
+      // For variadic functions, the caller might supply a larger struct than
+      // the callee expects, and that's OK.
+      assert(Prefix->getNumElements() == FullTy->getNumElements() ||
+             (CallInfo.isVariadic() &&
+              Prefix->getNumElements() <= FullTy->getNumElements()));
+
+      for (llvm::StructType::element_iterator PI = Prefix->element_begin(),
+                                              PE = Prefix->element_end(),
+                                              FI = FullTy->element_begin();
+           PI != PE; ++PI, ++FI)
+        assert(*PI == *FI);
+#endif
+      Arg = Builder.CreateBitCast(Arg, LastParamTy);
+    }
+    Args.push_back(Arg);
+  }
+
   if (!CallArgs.getCleanupsToDeactivate().empty())
     deactivateArgCleanupsBeforeCall(*this, CallArgs);
 
@@ -2545,9 +2881,14 @@
   if (CallArgs.hasWritebacks())
     emitWritebacks(*this, CallArgs);
 
+  // The stack cleanup for inalloca arguments has to run out of the normal
+  // lexical order, so deactivate it and run it manually here.
+  CallArgs.freeArgumentMemory(*this);
+
   switch (RetAI.getKind()) {
+  case ABIArgInfo::InAlloca:
   case ABIArgInfo::Indirect:
-    return convertTempToRValue(Args[0], RetTy, SourceLocation());
+    return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
 
   case ABIArgInfo::Ignore:
     // If we are ignoring an argument that had a result, make sure to
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 532cb59..2e43d1d 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -56,6 +56,8 @@
   class CallArgList :
     public SmallVector<CallArg, 16> {
   public:
+    CallArgList() : StackBase(0), StackBaseMem(0) {}
+
     struct Writeback {
       /// The original argument.  Note that the argument l-value
       /// is potentially null.
@@ -97,9 +99,12 @@
 
     bool hasWritebacks() const { return !Writebacks.empty(); }
 
-    typedef SmallVectorImpl<Writeback>::const_iterator writeback_iterator;
-    writeback_iterator writeback_begin() const { return Writebacks.begin(); }
-    writeback_iterator writeback_end() const { return Writebacks.end(); }
+    typedef llvm::iterator_range<SmallVectorImpl<Writeback>::const_iterator>
+      writeback_const_range;
+
+    writeback_const_range writebacks() const {
+      return writeback_const_range(Writebacks.begin(), Writebacks.end());
+    }
 
     void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup,
                                    llvm::Instruction *IsActiveIP) {
@@ -113,6 +118,14 @@
       return CleanupsToDeactivate;
     }
 
+    void allocateArgumentMemory(CodeGenFunction &CGF);
+    llvm::Instruction *getStackBase() const { return StackBase; }
+    void freeArgumentMemory(CodeGenFunction &CGF) const;
+
+    /// \brief Returns if we're using an inalloca struct to pass arguments in
+    /// memory.
+    bool isUsingInAlloca() const { return StackBase; }
+
   private:
     SmallVector<Writeback, 1> Writebacks;
 
@@ -120,6 +133,17 @@
     /// is used to cleanup objects that are owned by the callee once the call
     /// occurs.
     SmallVector<CallArgCleanup, 1> CleanupsToDeactivate;
+
+    /// The stacksave call.  It dominates all of the argument evaluation.
+    llvm::CallInst *StackBase;
+
+    /// The alloca holding the stackbase.  We need it to maintain SSA form.
+    llvm::AllocaInst *StackBaseMem;
+
+    /// The iterator pointing to the stack restore cleanup.  We manually run and
+    /// deactivate this cleanup after the call in the unexceptional case because
+    /// it doesn't run in the normal order.
+    EHScopeStack::stable_iterator StackCleanup;
   };
 
   /// FunctionArgList - Type for representing both the decl and type
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 4848d75..071f8b3 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -12,10 +12,10 @@
 //===----------------------------------------------------------------------===//
 
 #include "CGBlocks.h"
+#include "CGCXXABI.h"
 #include "CGDebugInfo.h"
 #include "CGRecordLayout.h"
 #include "CodeGenFunction.h"
-#include "CGCXXABI.h"
 #include "clang/AST/CXXInheritance.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/AST/EvaluatedExprVisitor.h"
@@ -342,7 +342,7 @@
     CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
       : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       const CXXRecordDecl *DerivedClass =
         cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
 
@@ -549,10 +549,8 @@
     // If we are initializing an anonymous union field, drill down to
     // the field.
     IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
-    IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(),
-      IEnd = IndirectField->chain_end();
-    for ( ; I != IEnd; ++I)
-      LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I));
+    for (const auto *I : IndirectField->chain())
+      LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I));
     FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
   } else {
     LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
@@ -699,6 +697,10 @@
   const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
   CXXCtorType CtorType = CurGD.getCtorType();
 
+  assert((CGM.getTarget().getCXXABI().hasConstructorVariants() ||
+          CtorType == Ctor_Complete) &&
+         "can only generate complete ctor for this ABI");
+
   // Before we go any further, try the complete->base constructor
   // delegation optimization.
   if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
@@ -717,6 +719,9 @@
   if (IsTryBody)
     EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
 
+  RegionCounter Cnt = getPGORegionCounter(Body);
+  Cnt.beginRegion(Builder);
+
   RunCleanupsScope RunCleanups(*this);
 
   // TODO: in restricted cases, we can emit the vbase initializers of
@@ -1190,23 +1195,17 @@
     return false;
 
   // Check fields.
-  for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(),
-       E = BaseClassDecl->field_end(); I != E; ++I) {
-    const FieldDecl *Field = *I;
-    
+  for (const auto *Field : BaseClassDecl->fields())
     if (!FieldHasTrivialDestructorBody(Context, Field))
       return false;
-  }
 
   // Check non-virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = 
-       BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end();
-       I != E; ++I) {
-    if (I->isVirtual())
+  for (const auto &I : BaseClassDecl->bases()) {
+    if (I.isVirtual())
       continue;
 
     const CXXRecordDecl *NonVirtualBase =
-      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
     if (!HasTrivialDestructorBody(Context, NonVirtualBase,
                                   MostDerivedClassDecl))
       return false;
@@ -1214,11 +1213,9 @@
 
   if (BaseClassDecl == MostDerivedClassDecl) {
     // Check virtual bases.
-    for (CXXRecordDecl::base_class_const_iterator I = 
-         BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end();
-         I != E; ++I) {
+    for (const auto &I : BaseClassDecl->vbases()) {
       const CXXRecordDecl *VirtualBase =
-        cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+        cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
       if (!HasTrivialDestructorBody(Context, VirtualBase,
                                     MostDerivedClassDecl))
         return false;      
@@ -1251,13 +1248,9 @@
 
   // Check the fields.
   const CXXRecordDecl *ClassDecl = Dtor->getParent();
-  for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
-       E = ClassDecl->field_end(); I != E; ++I) {
-    const FieldDecl *Field = *I;
-
+  for (const auto *Field : ClassDecl->fields())
     if (!FieldHasTrivialDestructorBody(Context, Field))
       return false;
-  }
 
   return true;
 }
@@ -1315,6 +1308,9 @@
   case Dtor_Base:
     assert(Body);
 
+    RegionCounter Cnt = getPGORegionCounter(Body);
+    Cnt.beginRegion(Builder);
+
     // Enter the cleanup scopes for fields and non-virtual bases.
     EnterDtorCleanups(Dtor, Dtor_Base);
 
@@ -1355,11 +1351,8 @@
   LexicalScope Scope(*this, RootCS->getSourceRange());
 
   AssignmentMemcpyizer AM(*this, AssignOp, Args);
-  for (CompoundStmt::const_body_iterator I = RootCS->body_begin(),
-                                         E = RootCS->body_end();
-       I != E; ++I) {
-    AM.emitAssignment(*I);  
-  }
+  for (auto *I : RootCS->body())
+    AM.emitAssignment(I);  
   AM.finish();
 }
 
@@ -1368,7 +1361,7 @@
   struct CallDtorDelete : EHScopeStack::Cleanup {
     CallDtorDelete() {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
       const CXXRecordDecl *ClassDecl = Dtor->getParent();
       CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
@@ -1384,7 +1377,7 @@
       assert(ShouldDeleteCondition != NULL);
     }
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
       llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
       llvm::Value *ShouldCallDelete
@@ -1413,7 +1406,7 @@
       : field(field), destroyer(destroyer),
         useEHCleanupForArray(useEHCleanupForArray) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       // Find the address of the field.
       llvm::Value *thisValue = CGF.LoadCXXThis();
       QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
@@ -1427,7 +1420,7 @@
   };
 }
 
-/// EmitDtorEpilogue - Emit all code that comes at the end of class's
+/// \brief Emit all code that comes at the end of class's
 /// destructor. This is to call destructors on members and base classes
 /// in reverse order of their construction.
 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
@@ -1439,7 +1432,7 @@
   // operator delete that Sema picked up.
   if (DtorType == Dtor_Deleting) {
     assert(DD->getOperatorDelete() && 
-           "operator delete missing - EmitDtorEpilogue");
+           "operator delete missing - EnterDtorCleanups");
     if (CXXStructorImplicitParamValue) {
       // If there is an implicit param to the deleting dtor, it's a boolean
       // telling whether we should call delete at the end of the dtor.
@@ -1462,10 +1455,7 @@
 
     // We push them in the forward order so that they'll be popped in
     // the reverse order.
-    for (CXXRecordDecl::base_class_const_iterator I = 
-           ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
-              I != E; ++I) {
-      const CXXBaseSpecifier &Base = *I;
+    for (const auto &Base : ClassDecl->vbases()) {
       CXXRecordDecl *BaseClassDecl
         = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
     
@@ -1484,10 +1474,7 @@
   assert(DtorType == Dtor_Base);
   
   // Destroy non-virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = 
-        ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
-    const CXXBaseSpecifier &Base = *I;
-    
+  for (const auto &Base : ClassDecl->bases()) {
     // Ignore virtual bases.
     if (Base.isVirtual())
       continue;
@@ -1504,11 +1491,8 @@
   }
 
   // Destroy direct fields.
-  SmallVector<const FieldDecl *, 16> FieldDecls;
-  for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
-       E = ClassDecl->field_end(); I != E; ++I) {
-    const FieldDecl *field = *I;
-    QualType type = field->getType();
+  for (const auto *Field : ClassDecl->fields()) {
+    QualType type = Field->getType();
     QualType::DestructionKind dtorKind = type.isDestructedType();
     if (!dtorKind) continue;
 
@@ -1517,7 +1501,7 @@
     if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
 
     CleanupKind cleanupKind = getCleanupKind(dtorKind);
-    EHStack.pushCleanup<DestroyField>(cleanupKind, field,
+    EHStack.pushCleanup<DestroyField>(cleanupKind, Field,
                                       getDestroyer(dtorKind),
                                       cleanupKind & EHCleanup);
   }
@@ -1683,9 +1667,31 @@
     return;
   }
 
-  // Non-trivial constructors are handled in an ABI-specific manner.
-  CGM.getCXXABI().EmitConstructorCall(*this, D, Type, ForVirtualBase,
-                                      Delegating, This, ArgBeg, ArgEnd);
+  // C++11 [class.mfct.non-static]p2:
+  //   If a non-static member function of a class X is called for an object that
+  //   is not of type X, or of a type derived from X, the behavior is undefined.
+  // FIXME: Provide a source location here.
+  EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This,
+                getContext().getRecordType(D->getParent()));
+
+  CallArgList Args;
+
+  // Push the this ptr.
+  Args.add(RValue::get(This), D->getThisType(getContext()));
+
+  // Add the rest of the user-supplied arguments.
+  const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
+  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+  // Insert any ABI-specific implicit constructor arguments.
+  unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
+      *this, D, Type, ForVirtualBase, Delegating, Args);
+
+  // Emit the call.
+  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+  const CGFunctionInfo &Info =
+      CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
+  EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
 }
 
 void
@@ -1704,38 +1710,23 @@
   assert(D->isInstance() &&
          "Trying to emit a member call expr on a static method!");
   
-  const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
+  const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
   
   CallArgList Args;
   
   // Push the this ptr.
   Args.add(RValue::get(This), D->getThisType(getContext()));
   
-  
   // Push the src ptr.
-  QualType QT = *(FPT->arg_type_begin());
+  QualType QT = *(FPT->param_type_begin());
   llvm::Type *t = CGM.getTypes().ConvertType(QT);
   Src = Builder.CreateBitCast(Src, t);
   Args.add(RValue::get(Src), QT);
-  
+
   // Skip over first argument (Src).
-  ++ArgBeg;
-  CallExpr::const_arg_iterator Arg = ArgBeg;
-  for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
-       E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
-    assert(Arg != ArgEnd && "Running over edge of argument list!");
-    EmitCallArg(Args, *Arg, *I);
-  }
-  // Either we've emitted all the call args, or we have a call to a
-  // variadic function.
-  assert((Arg == ArgEnd || FPT->isVariadic()) &&
-         "Extra arguments in non-variadic function!");
-  // If we still have any arguments, emit them using the type of the argument.
-  for (; Arg != ArgEnd; ++Arg) {
-    QualType ArgType = Arg->getType();
-    EmitCallArg(Args, *Arg, ArgType);
-  }
-  
+  EmitCallArgs(Args, FPT->isVariadic(), FPT->param_type_begin() + 1,
+               FPT->param_type_end(), ArgBeg + 1, ArgEnd);
+
   EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
            Callee, ReturnValueSlot(), Args, D);
 }
@@ -1790,7 +1781,7 @@
                            CXXDtorType Type)
       : Dtor(D), Addr(Addr), Type(Type) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
                                 /*Delegating=*/true, Addr);
     }
@@ -1830,23 +1821,8 @@
                                             bool ForVirtualBase,
                                             bool Delegating,
                                             llvm::Value *This) {
-  GlobalDecl GD(DD, Type);
-  llvm::Value *VTT = GetVTTParameter(GD, ForVirtualBase, Delegating);
-  llvm::Value *Callee = 0;
-  if (getLangOpts().AppleKext)
-    Callee = BuildAppleKextVirtualDestructorCall(DD, Type, 
-                                                 DD->getParent());
-    
-  if (!Callee)
-    Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
-
-  if (DD->isVirtual())
-    This = CGM.getCXXABI().adjustThisArgumentForVirtualCall(*this, GD, This);
-
-  // FIXME: Provide a source location here.
-  EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
-                    VTT, getContext().getPointerType(getContext().VoidPtrTy),
-                    0, 0);
+  CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
+                                     Delegating, This);
 }
 
 namespace {
@@ -1857,7 +1833,7 @@
     CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
       : Dtor(D), Addr(Addr) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
                                 /*ForVirtualBase=*/false,
                                 /*Delegating=*/false, Addr);
@@ -1944,10 +1920,9 @@
   const CXXRecordDecl *RD = Base.getBase();
 
   // Traverse bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 
-       E = RD->bases_end(); I != E; ++I) {
+  for (const auto &I : RD->bases()) {
     CXXRecordDecl *BaseDecl
-      = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
     // Ignore classes without a vtable.
     if (!BaseDecl->isDynamicClass())
@@ -1957,7 +1932,7 @@
     CharUnits BaseOffsetFromNearestVBase;
     bool BaseDeclIsNonVirtualPrimaryBase;
 
-    if (I->isVirtual()) {
+    if (I.isVirtual()) {
       // Check if we've visited this virtual base before.
       if (!VBases.insert(BaseDecl))
         continue;
@@ -1978,7 +1953,7 @@
     }
     
     InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 
-                             I->isVirtual() ? BaseDecl : NearestVBase,
+                             I.isVirtual() ? BaseDecl : NearestVBase,
                              BaseOffsetFromNearestVBase,
                              BaseDeclIsNonVirtualPrimaryBase, 
                              VTableClass, VBases);
@@ -2127,7 +2102,7 @@
   // Prepare the return slot.
   const FunctionProtoType *FPT =
     callOperator->getType()->castAs<FunctionProtoType>();
-  QualType resultType = FPT->getResultType();
+  QualType resultType = FPT->getReturnType();
   ReturnValueSlot returnSlot;
   if (!resultType->isVoidType() &&
       calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
@@ -2162,11 +2137,9 @@
   CallArgs.add(RValue::get(ThisPtr), ThisType);
 
   // Add the rest of the parameters.
-  for (BlockDecl::param_const_iterator I = BD->param_begin(),
-       E = BD->param_end(); I != E; ++I) {
-    ParmVarDecl *param = *I;
+  for (auto param : BD->params())
     EmitDelegateCallArg(CallArgs, param, param->getLocStart());
-  }
+
   assert(!Lambda->isGenericLambda() && 
             "generic lambda interconversion to block not implemented");
   EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs);
@@ -2194,11 +2167,9 @@
   CallArgs.add(RValue::get(ThisPtr), ThisType);
 
   // Add the rest of the parameters.
-  for (FunctionDecl::param_const_iterator I = MD->param_begin(),
-       E = MD->param_end(); I != E; ++I) {
-    ParmVarDecl *param = *I;
-    EmitDelegateCallArg(CallArgs, param, param->getLocStart());
-  }
+  for (auto Param : MD->params())
+    EmitDelegateCallArg(CallArgs, Param, Param->getLocStart());
+
   const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
   // For a generic lambda, find the corresponding call operator specialization
   // to which the call to the static-invoker shall be forwarded.
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 65de4d4..8748224 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -528,7 +528,7 @@
   llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
   for (llvm::BasicBlock::use_iterator
          i = entry->use_begin(), e = entry->use_end(); i != e; ) {
-    llvm::Use &use = i.getUse();
+    llvm::Use &use = *i;
     ++i;
 
     use.set(unreachableBB);
@@ -860,7 +860,9 @@
 
   // Emit the EH cleanup if required.
   if (RequiresEHCleanup) {
-    if (CGDebugInfo *DI = getDebugInfo())
+    CGDebugInfo *DI = getDebugInfo();
+    SaveAndRestoreLocation AutoRestoreLocation(*this, Builder);
+    if (DI)
       DI->EmitLocation(Builder, CurEHLocation);
 
     CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 1bd6bba..fed75bc 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -194,6 +194,15 @@
     return getHandlers()[I];
   }
 
+  // Clear all handler blocks.
+  // FIXME: it's better to always call clearHandlerBlocks in DTOR and have a
+  // 'takeHandler' or some such function which removes ownership from the
+  // EHCatchScope object if the handlers should live longer than EHCatchScope.
+  void clearHandlerBlocks() {
+    for (unsigned I = 0, N = getNumHandlers(); I != N; ++I)
+      delete getHandler(I).Block;
+  }
+
   typedef const Handler *iterator;
   iterator begin() const { return getHandlers(); }
   iterator end() const { return getHandlers() + getNumHandlers(); }
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index fcb26f0..0e94b51 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -37,7 +37,7 @@
 #include "llvm/IR/Module.h"
 #include "llvm/Support/Dwarf.h"
 #include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Path.h"

+#include "llvm/Support/Path.h"
 using namespace clang;
 using namespace clang::CodeGen;
 
@@ -52,30 +52,35 @@
          "Region stack mismatch, stack not empty!");
 }
 
-
-NoLocation::NoLocation(CodeGenFunction &CGF, CGBuilderTy &B)
-  : DI(CGF.getDebugInfo()), Builder(B) {
+SaveAndRestoreLocation::SaveAndRestoreLocation(CodeGenFunction &CGF,
+                                               CGBuilderTy &B)
+    : DI(CGF.getDebugInfo()), Builder(B) {
   if (DI) {
     SavedLoc = DI->getLocation();
     DI->CurLoc = SourceLocation();
-    Builder.SetCurrentDebugLocation(llvm::DebugLoc());
   }
 }
 
+SaveAndRestoreLocation::~SaveAndRestoreLocation() {
+  if (DI)
+    DI->EmitLocation(Builder, SavedLoc);
+}
+
+NoLocation::NoLocation(CodeGenFunction &CGF, CGBuilderTy &B)
+  : SaveAndRestoreLocation(CGF, B) {
+  if (DI)
+    Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+}
+
 NoLocation::~NoLocation() {
-  if (DI) {
+  if (DI)
     assert(Builder.getCurrentDebugLocation().isUnknown());
-    DI->CurLoc = SavedLoc;
-  }
 }
 
 ArtificialLocation::ArtificialLocation(CodeGenFunction &CGF, CGBuilderTy &B)
-  : DI(CGF.getDebugInfo()), Builder(B) {
-  if (DI) {
-    SavedLoc = DI->getLocation();
-    DI->CurLoc = SourceLocation();
+  : SaveAndRestoreLocation(CGF, B) {
+  if (DI)
     Builder.SetCurrentDebugLocation(llvm::DebugLoc());
-  }
 }
 
 void ArtificialLocation::Emit() {
@@ -91,10 +96,8 @@
 }
 
 ArtificialLocation::~ArtificialLocation() {
-  if (DI) {
+  if (DI)
     assert(Builder.getCurrentDebugLocation().getLine() == 0);
-    DI->CurLoc = SavedLoc;
-  }
 }
 
 void CGDebugInfo::setLocation(SourceLocation Loc) {
@@ -225,34 +228,20 @@
 /// getClassName - Get class name including template argument list.
 StringRef
 CGDebugInfo::getClassName(const RecordDecl *RD) {
-  const ClassTemplateSpecializationDecl *Spec
-    = dyn_cast<ClassTemplateSpecializationDecl>(RD);
-  if (!Spec)
+  // quick optimization to avoid having to intern strings that are already
+  // stored reliably elsewhere
+  if (!isa<ClassTemplateSpecializationDecl>(RD))
     return RD->getName();
 
-  const TemplateArgument *Args;
-  unsigned NumArgs;
-  if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
-    const TemplateSpecializationType *TST =
-      cast<TemplateSpecializationType>(TAW->getType());
-    Args = TST->getArgs();
-    NumArgs = TST->getNumArgs();
-  } else {
-    const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
-    Args = TemplateArgs.data();
-    NumArgs = TemplateArgs.size();
-  }
-  StringRef Name = RD->getIdentifier()->getName();
-  PrintingPolicy Policy(CGM.getLangOpts());
-  SmallString<128> TemplateArgList;
+  SmallString<128> Name;
   {
-    llvm::raw_svector_ostream OS(TemplateArgList);
-    TemplateSpecializationType::PrintTemplateArgumentList(OS, Args, NumArgs,
-                                                          Policy);
+    llvm::raw_svector_ostream OS(Name);
+    RD->getNameForDiagnostic(OS, CGM.getContext().getPrintingPolicy(),
+                             /*Qualified*/ false);
   }
 
   // Copy this name on the side and use its reference.
-  return internString(Name, TemplateArgList);
+  return internString(Name);
 }
 
 /// getOrCreateFile - Get the file debug info descriptor for the input location.
@@ -342,9 +331,9 @@
   if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
     MainFileDir = MainFile->getDir()->getName();
     if (MainFileDir != ".") {
-      llvm::SmallString<1024> MainFileDirSS(MainFileDir);

-      llvm::sys::path::append(MainFileDirSS, MainFileName);

-      MainFileName = MainFileDirSS.str();

+      llvm::SmallString<1024> MainFileDirSS(MainFileDir);
+      llvm::sys::path::append(MainFileDirSS, MainFileName);
+      MainFileName = MainFileDirSS.str();
     }
   }
 
@@ -379,10 +368,12 @@
 
   // Create new compile unit.
   // FIXME - Eliminate TheCU.
-  TheCU = DBuilder.createCompileUnit(LangTag, Filename, getCurrentDirname(),
-                                     Producer, LO.Optimize,
-                                     CGM.getCodeGenOpts().DwarfDebugFlags,
-                                     RuntimeVers, SplitDwarfFilename);
+  TheCU = DBuilder.createCompileUnit(
+      LangTag, Filename, getCurrentDirname(), Producer, LO.Optimize,
+      CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers, SplitDwarfFilename,
+      DebugKind == CodeGenOptions::DebugLineTablesOnly
+          ? llvm::DIBuilder::LineTablesOnly
+          : llvm::DIBuilder::FullDebug);
 }
 
 /// CreateType - Get the Basic type from the cache or create a new
@@ -736,14 +727,16 @@
     return llvm::DIType();
   // We don't set size information, but do specify where the typedef was
   // declared.
-  unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
+  SourceLocation Loc = Ty->getDecl()->getLocation();
+  llvm::DIFile File = getOrCreateFile(Loc);
+  unsigned Line = getLineNumber(Loc);
   const TypedefNameDecl *TyDecl = Ty->getDecl();
 
   llvm::DIDescriptor TypedefContext =
     getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
 
   return
-    DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TypedefContext);
+    DBuilder.createTypedef(Src, TyDecl->getName(), File, Line, TypedefContext);
 }
 
 llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
@@ -751,15 +744,17 @@
   SmallVector<llvm::Value *, 16> EltTys;
 
   // Add the result type at least.
-  EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
+  EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit));
 
   // Set up remainder of arguments if there is a prototype.
-  // FIXME: IF NOT, HOW IS THIS REPRESENTED?  llvm-gcc doesn't represent '...'!
+  // otherwise emit it as a variadic function.
   if (isa<FunctionNoProtoType>(Ty))
     EltTys.push_back(DBuilder.createUnspecifiedParameter());
   else if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Ty)) {
-    for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
-      EltTys.push_back(getOrCreateType(FPT->getArgType(i), Unit));
+    for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i)
+      EltTys.push_back(getOrCreateType(FPT->getParamType(i), Unit));
+    if (FPT->isVariadic())
+      EltTys.push_back(DBuilder.createUnspecifiedParameter());
   }
 
   llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
@@ -784,7 +779,7 @@
   uint64_t sizeInBits = 0;
   unsigned alignInBits = 0;
   if (!type->isIncompleteArrayType()) {
-    llvm::tie(sizeInBits, alignInBits) = CGM.getContext().getTypeInfo(type);
+    std::tie(sizeInBits, alignInBits) = CGM.getContext().getTypeInfo(type);
 
     if (sizeInBitsOverride)
       sizeInBits = sizeInBitsOverride;
@@ -926,9 +921,8 @@
 
     // Static and non-static members should appear in the same order as
     // the corresponding declarations in the source program.
-    for (RecordDecl::decl_iterator I = record->decls_begin(),
-           E = record->decls_end(); I != E; ++I)
-      if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
+    for (const auto *I : record->decls())
+      if (const auto *V = dyn_cast<VarDecl>(I)) {
         // Reuse the existing static member declaration if one exists
         llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator MI =
             StaticDataMemberCache.find(V->getCanonicalDecl());
@@ -939,7 +933,7 @@
               llvm::DIDerivedType(cast<llvm::MDNode>(MI->second)));
         } else
           elements.push_back(CreateRecordStaticField(V, RecordTy));
-      } else if (FieldDecl *field = dyn_cast<FieldDecl>(*I)) {
+      } else if (const auto *field = dyn_cast<FieldDecl>(I)) {
         CollectRecordNormalField(field, layout.getFieldOffset(fieldNo),
                                  tunit, elements, RecordTy);
 
@@ -1005,7 +999,13 @@
 
   llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
 
-  return DBuilder.createSubroutineType(Unit, EltTypeArray);
+  unsigned Flags = 0;
+  if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
+    Flags |= llvm::DIDescriptor::FlagLValueReference;
+  if (Func->getExtProtoInfo().RefQualifier == RQ_RValue)
+    Flags |= llvm::DIDescriptor::FlagRValueReference;
+
+  return DBuilder.createSubroutineType(Unit, EltTypeArray, Flags);
 }
 
 /// isFunctionLocalClass - Return true if CXXRecordDecl is defined
@@ -1084,6 +1084,10 @@
   }
   if (Method->hasPrototype())
     Flags |= llvm::DIDescriptor::FlagPrototyped;
+  if (Method->getRefQualifier() == RQ_LValue)
+    Flags |= llvm::DIDescriptor::FlagLValueReference;
+  if (Method->getRefQualifier() == RQ_RValue)
+    Flags |= llvm::DIDescriptor::FlagRValueReference;
 
   llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
   llvm::DISubprogram SP =
@@ -1111,9 +1115,8 @@
   // Since we want more than just the individual member decls if we
   // have templated functions iterate over every declaration to gather
   // the functions.
-  for(DeclContext::decl_iterator I = RD->decls_begin(),
-        E = RD->decls_end(); I != E; ++I) {
-    if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*I)) {
+  for(const auto *I : RD->decls()) {
+    if (const auto *Method = dyn_cast<CXXMethodDecl>(I)) {
       // Reuse the existing member function declaration if it exists.
       // It may be associated with the declaration of the type & should be
       // reused as we're building the definition.
@@ -1130,16 +1133,13 @@
           EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
       } else
         EltTys.push_back(MI->second);
-    } else if (const FunctionTemplateDecl *FTD =
-                   dyn_cast<FunctionTemplateDecl>(*I)) {
+    } else if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(I)) {
       // Add any template specializations that have already been seen. Like
       // implicit member functions, these may have been added to a declaration
       // in the case of vtable-based debug info reduction.
-      for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
-                                               SE = FTD->spec_end();
-           SI != SE; ++SI) {
+      for (const auto *SI : FTD->specializations()) {
         llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator MI =
-            SPCache.find(cast<CXXMethodDecl>(*SI)->getCanonicalDecl());
+            SPCache.find(cast<CXXMethodDecl>(SI)->getCanonicalDecl());
         if (MI != SPCache.end())
           EltTys.push_back(MI->second);
       }
@@ -1156,15 +1156,14 @@
                 llvm::DIType RecordTy) {
 
   const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
-  for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
-         BE = RD->bases_end(); BI != BE; ++BI) {
+  for (const auto &BI : RD->bases()) {
     unsigned BFlags = 0;
     uint64_t BaseOffset;
 
     const CXXRecordDecl *Base =
-      cast<CXXRecordDecl>(BI->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl());
 
-    if (BI->isVirtual()) {
+    if (BI.isVirtual()) {
       // virtual base offset offset is -ve. The code generator emits dwarf
       // expression where it expects +ve number.
       BaseOffset =
@@ -1176,7 +1175,7 @@
     // FIXME: Inconsistent units for BaseOffset. It is in bytes when
     // BI->isVirtual() and bits when not.
 
-    AccessSpecifier Access = BI->getAccessSpecifier();
+    AccessSpecifier Access = BI.getAccessSpecifier();
     if (Access == clang::AS_private)
       BFlags |= llvm::DIDescriptor::FlagPrivate;
     else if (Access == clang::AS_protected)
@@ -1184,7 +1183,7 @@
 
     llvm::DIType DTy =
       DBuilder.createInheritance(RecordTy,
-                                 getOrCreateType(BI->getType(), Unit),
+                                 getOrCreateType(BI.getType(), Unit),
                                  BaseOffset, BFlags);
     EltTys.push_back(DTy);
   }
@@ -1239,7 +1238,7 @@
         V = CGM.GetAddrOfFunction(FD);
       // Member data pointers have special handling too to compute the fixed
       // offset within the object.
-      if (isa<FieldDecl>(D)) {
+      if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) {
         // These five lines (& possibly the above member function pointer
         // handling) might be able to be refactored to use similar code in
         // CodeGenModule::getMemberPointerConstant
@@ -1416,6 +1415,9 @@
 }
 
 void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
+  if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+    return;
+
   if (const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
     if (CXXDecl->isDynamicClass())
       return;
@@ -1439,32 +1441,57 @@
   TypeCache[TyPtr] = Res;
 }
 
+static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
+                                        CXXRecordDecl::method_iterator End) {
+  for (; I != End; ++I)
+    if (FunctionDecl *Tmpl = I->getInstantiatedFromMemberFunction())
+      if (!Tmpl->isImplicit() && Tmpl->isThisDeclarationADefinition() &&
+          !I->getMemberSpecializationInfo()->isExplicitSpecialization())
+        return true;
+  return false;
+}
+
+static bool shouldOmitDefinition(CodeGenOptions::DebugInfoKind DebugKind,
+                                 const RecordDecl *RD,
+                                 const LangOptions &LangOpts) {
+  if (DebugKind > CodeGenOptions::LimitedDebugInfo)
+    return false;
+
+  if (!LangOpts.CPlusPlus)
+    return false;
+
+  if (!RD->isCompleteDefinitionRequired())
+    return true;
+
+  const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+
+  if (!CXXDecl)
+    return false;
+
+  if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass())
+    return true;
+
+  TemplateSpecializationKind Spec = TSK_Undeclared;
+  if (const ClassTemplateSpecializationDecl *SD =
+          dyn_cast<ClassTemplateSpecializationDecl>(RD))
+    Spec = SD->getSpecializationKind();
+
+  if (Spec == TSK_ExplicitInstantiationDeclaration &&
+      hasExplicitMemberDefinition(CXXDecl->method_begin(),
+                                  CXXDecl->method_end()))
+    return true;
+
+  return false;
+}
+
 /// CreateType - get structure or union type.
 llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
   RecordDecl *RD = Ty->getDecl();
-  const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
-  // Always emit declarations for types that aren't required to be complete when
-  // in limit-debug-info mode. If the type is later found to be required to be
-  // complete this declaration will be upgraded to a definition by
-  // `completeRequiredType`.
-  // If the type is dynamic, only emit the definition in TUs that require class
-  // data. This is handled by `completeClassData`.
   llvm::DICompositeType T(getTypeOrNull(QualType(Ty, 0)));
-  // If we've already emitted the type, just use that, even if it's only a
-  // declaration. The completeType, completeRequiredType, and completeClassData
-  // callbacks will handle promoting the declaration to a definition.
-  if (T ||
-      (DebugKind <= CodeGenOptions::LimitedDebugInfo &&
-       // Under -flimit-debug-info, emit only a declaration unless the type is
-       // required to be complete.
-       !RD->isCompleteDefinitionRequired() && CGM.getLangOpts().CPlusPlus) ||
-      // If the class is dynamic, only emit a declaration. A definition will be
-      // emitted whenever the vtable is emitted.
-      (CXXDecl && CXXDecl->hasDefinition() && CXXDecl->isDynamicClass()) || T) {
-    llvm::DIDescriptor FDContext =
-      getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+  if (T || shouldOmitDefinition(DebugKind, RD, CGM.getLangOpts())) {
     if (!T)
-      T = getOrCreateRecordFwdDecl(Ty, FDContext);
+      T = getOrCreateRecordFwdDecl(
+          Ty, getContextDescriptor(cast<Decl>(RD->getDeclContext())));
     return T;
   }
 
@@ -1625,9 +1652,7 @@
   }
 
   // Create entries for all of the properties.
-  for (ObjCContainerDecl::prop_iterator I = ID->prop_begin(),
-         E = ID->prop_end(); I != E; ++I) {
-    const ObjCPropertyDecl *PD = *I;
+  for (const auto *PD : ID->properties()) {
     SourceLocation Loc = PD->getLocation();
     llvm::DIFile PUnit = getOrCreateFile(Loc);
     unsigned PLine = getLineNumber(Loc);
@@ -1829,11 +1854,13 @@
   if (!Ty->getPointeeType()->isFunctionType())
     return DBuilder.createMemberPointerType(
         getOrCreateType(Ty->getPointeeType(), U), ClassType);
+
+  const FunctionProtoType *FPT =
+    Ty->getPointeeType()->getAs<FunctionProtoType>();
   return DBuilder.createMemberPointerType(getOrCreateInstanceMethodType(
-      CGM.getContext().getPointerType(
-          QualType(Ty->getClass(), Ty->getPointeeType().getCVRQualifiers())),
-      Ty->getPointeeType()->getAs<FunctionProtoType>(), U),
-                                          ClassType);
+      CGM.getContext().getPointerType(QualType(Ty->getClass(),
+                                               FPT->getTypeQuals())),
+      FPT, U), ClassType);
 }
 
 llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
@@ -1871,9 +1898,7 @@
   // Create DIEnumerator elements for each enumerator.
   SmallVector<llvm::Value *, 16> Enumerators;
   ED = ED->getDefinition();
-  for (EnumDecl::enumerator_iterator
-         Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
-       Enum != EnumEnd; ++Enum) {
+  for (const auto *Enum : ED->enumerators()) {
     Enumerators.push_back(
       DBuilder.createEnumerator(Enum->getName(),
                                 Enum->getInitVal().getSExtValue()));
@@ -1994,6 +2019,17 @@
   return llvm::DIType(cast_or_null<llvm::MDNode>(V));
 }
 
+void CGDebugInfo::completeTemplateDefinition(
+    const ClassTemplateSpecializationDecl &SD) {
+  if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+    return;
+
+  completeClassData(&SD);
+  // In case this type has no member function definitions being emitted, ensure
+  // it is retained
+  RetainedTypes.push_back(CGM.getContext().getRecordType(&SD).getAsOpaquePtr());
+}
+
 /// getCachedInterfaceTypeOrNull - Get the type from the interface
 /// cache, unless it needs to regenerated. Otherwise return null.
 llvm::Value *CGDebugInfo::getCachedInterfaceTypeOrNull(QualType Ty) {
@@ -2123,10 +2159,11 @@
     return CreateType(cast<ComplexType>(Ty));
   case Type::Pointer:
     return CreateType(cast<PointerType>(Ty), Unit);
+  case Type::Adjusted:
   case Type::Decayed:
-    // Decayed types are just pointers in LLVM and DWARF.
+    // Decayed and adjusted types use the adjusted type in LLVM and DWARF.
     return CreateType(
-        cast<PointerType>(cast<DecayedType>(Ty)->getDecayedType()), Unit);
+        cast<PointerType>(cast<AdjustedType>(Ty)->getAdjustedType()), Unit);
   case Type::BlockPointer:
     return CreateType(cast<BlockPointerType>(Ty), Unit);
   case Type::Typedef:
@@ -2233,9 +2270,10 @@
   if (T && (!T.isForwardDecl() || !RD->getDefinition()))
       return T;
 
-  // If this is just a forward declaration, construct an appropriately
-  // marked node and just return it.
-  if (!RD->getDefinition())
+  // If this is just a forward or incomplete declaration, construct an
+  // appropriately marked node and just return it.
+  const RecordDecl *D = RD->getDefinition();
+  if (!D || !D->isCompleteDefinition())
     return getOrCreateRecordFwdDecl(Ty, RDContext);
 
   uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -2277,7 +2315,7 @@
   llvm::DICompositeType ContainingType;
   const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
   if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
-    // Seek non virtual primary base root.
+    // Seek non-virtual primary base root.
     while (1) {
       const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
       const CXXRecordDecl *PBT = BRL.getPrimaryBase();
@@ -2309,7 +2347,7 @@
   return Ty;
 }
 
-llvm::DIDescriptor CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
+llvm::DIScope CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
   // We only need a declaration (not a definition) of the type - so use whatever
   // we would otherwise do to get a type for a pointee. (forward declarations in
   // limited debug info, full definitions (if the type definition is available)
@@ -2327,9 +2365,9 @@
   llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator I =
       DeclCache.find(D->getCanonicalDecl());
   if (I == DeclCache.end())
-    return llvm::DIDescriptor();
+    return llvm::DIScope();
   llvm::Value *V = I->second;
-  return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(V));
+  return llvm::DIScope(dyn_cast_or_null<llvm::MDNode>(V));
 }
 
 /// getFunctionDeclaration - Return debug info descriptor to describe method
@@ -2352,7 +2390,6 @@
       llvm::DICompositeType T(S);
       llvm::DISubprogram SP =
           CreateCXXMemberFunction(MD, getOrCreateFile(MD->getLocation()), T);
-      T.addMember(SP);
       return SP;
     }
   }
@@ -2363,9 +2400,7 @@
       return SP;
   }
 
-  for (FunctionDecl::redecl_iterator I = FD->redecls_begin(),
-         E = FD->redecls_end(); I != E; ++I) {
-    const FunctionDecl *NextFD = *I;
+  for (auto NextFD : FD->redecls()) {
     llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
       MI = SPCache.find(NextFD->getCanonicalDecl());
     if (MI != SPCache.end()) {
@@ -2397,7 +2432,7 @@
     SmallVector<llvm::Value *, 16> Elts;
 
     // First element is always return type. For 'void' functions it is NULL.
-    QualType ResultTy = OMethod->getResultType();
+    QualType ResultTy = OMethod->getReturnType();
 
     // Replace the instancetype keyword with the actual type.
     if (ResultTy == CGM.getContext().getObjCInstanceType())
@@ -2413,13 +2448,27 @@
     llvm::DIType CmdTy = getOrCreateType(OMethod->getCmdDecl()->getType(), F);
     Elts.push_back(DBuilder.createArtificialType(CmdTy));
     // Get rest of the arguments.
-    for (ObjCMethodDecl::param_const_iterator PI = OMethod->param_begin(),
-           PE = OMethod->param_end(); PI != PE; ++PI)
-      Elts.push_back(getOrCreateType((*PI)->getType(), F));
+    for (const auto *PI : OMethod->params())
+      Elts.push_back(getOrCreateType(PI->getType(), F));
 
     llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
     return DBuilder.createSubroutineType(F, EltTypeArray);
   }
+
+  // Handle variadic function types; they need an additional
+  // unspecified parameter.
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+    if (FD->isVariadic()) {
+      SmallVector<llvm::Value *, 16> EltTys;
+      EltTys.push_back(getOrCreateType(FD->getReturnType(), F));
+      if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FnType))
+        for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i)
+          EltTys.push_back(getOrCreateType(FPT->getParamType(i), F));
+      EltTys.push_back(DBuilder.createUnspecifiedParameter());
+      llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
+      return DBuilder.createSubroutineType(F, EltTypeArray);
+    }
+
   return llvm::DICompositeType(getOrCreateType(FnType, F));
 }
 
@@ -2434,14 +2483,25 @@
   FnBeginRegionCount.push_back(LexicalBlockStack.size());
 
   const Decl *D = GD.getDecl();
-  // Function may lack declaration in source code if it is created by Clang
-  // CodeGen (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
+
+  // Use the location of the start of the function to determine where
+  // the function definition is located. By default use the location
+  // of the declaration as the location for the subprogram. A function
+  // may lack a declaration in the source code if it is created by code
+  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
   bool HasDecl = (D != 0);
-  // Use the location of the declaration.
   SourceLocation Loc;
-  if (HasDecl)
+  if (HasDecl) {
     Loc = D->getLocation();
 
+    // If this is a function specialization then use the pattern body
+    // as the location for the function.
+    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+      if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
+        if (SpecDecl->hasBody(SpecDecl))
+          Loc = SpecDecl->getLocation();
+  }
+
   unsigned Flags = 0;
   llvm::DIFile Unit = getOrCreateFile(Loc);
   llvm::DIDescriptor FDContext(Unit);
@@ -2504,6 +2564,11 @@
   if (!HasDecl || D->isImplicit())
     Flags |= llvm::DIDescriptor::FlagArtificial;
 
+  // FIXME: The function declaration we're constructing here is mostly reusing
+  // declarations from CXXMethodDecl and not constructing new ones for arbitrary
+  // FunctionDecls. When/if we fix this we can have FDContext be TheCU/null for
+  // all subprograms instead of the actual context since subprogram definitions
+  // are emitted as CU level entities by the backend.
   llvm::DISubprogram SP =
       DBuilder.createFunction(FDContext, Name, LinkageName, Unit, LineNo,
                               getOrCreateFunctionType(D, FnType, Unit),
@@ -2514,9 +2579,10 @@
   if (HasDecl)
     DeclCache.insert(std::make_pair(D->getCanonicalDecl(), llvm::WeakVH(SP)));
 
-  // Push function on region stack.
+  // Push the function onto the lexical block stack.
   llvm::MDNode *SPN = SP;
   LexicalBlockStack.push_back(SPN);
+
   if (HasDecl)
     RegionMap[D] = llvm::WeakVH(SP);
 }
@@ -2560,7 +2626,8 @@
                                 llvm::DIDescriptor(LexicalBlockStack.back()),
                                 getOrCreateFile(CurLoc),
                                 getLineNumber(CurLoc),
-                                getColumnNumber(CurLoc));
+                                getColumnNumber(CurLoc),
+                                0);
   llvm::MDNode *DN = D;
   LexicalBlockStack.push_back(DN);
 }
@@ -2768,10 +2835,7 @@
     // all union fields.
     const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
     if (RD->isUnion() && RD->isAnonymousStructOrUnion()) {
-      for (RecordDecl::field_iterator I = RD->field_begin(),
-             E = RD->field_end();
-           I != E; ++I) {
-        FieldDecl *Field = *I;
+      for (const auto *Field : RD->fields()) {
         llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
         StringRef FieldName = Field->getName();
 
@@ -2970,10 +3034,7 @@
   }
 
   // Variable captures.
-  for (BlockDecl::capture_const_iterator
-         i = blockDecl->capture_begin(), e = blockDecl->capture_end();
-       i != e; ++i) {
-    const BlockDecl::Capture &capture = *i;
+  for (const auto &capture : blockDecl->captures()) {
     const VarDecl *variable = capture.getVariable();
     const CGBlockInfo::Capture &captureInfo = block.getCapture(variable);
 
@@ -3085,7 +3146,6 @@
   llvm::DICompositeType Ctxt(
       getContextDescriptor(cast<Decl>(D->getDeclContext())));
   llvm::DIDerivedType T = CreateRecordStaticField(D, Ctxt);
-  Ctxt.addMember(T);
   return T;
 }
 
@@ -3196,7 +3256,7 @@
   // Emitting one decl is sufficient - debuggers can detect that this is an
   // overloaded name & provide lookup for all the overloads.
   const UsingShadowDecl &USD = **UD.shadow_begin();
-  if (llvm::DIDescriptor Target =
+  if (llvm::DIScope Target =
           getDeclarationOrDefinition(USD.getUnderlyingDecl()))
     DBuilder.createImportedDeclaration(
         getCurrentContextDescriptor(cast<Decl>(USD.getDeclContext())), Target,
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 0ca274f..52784da 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -20,10 +20,10 @@
 #include "clang/Basic/SourceLocation.h"
 #include "clang/Frontend/CodeGenOptions.h"
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/ValueHandle.h"
 #include "llvm/Support/Allocator.h"
-#include "llvm/Support/ValueHandle.h"
 
 namespace llvm {
   class MDNode;
@@ -47,8 +47,8 @@
 /// and is responsible for emitting to llvm globals or pass directly to
 /// the backend.
 class CGDebugInfo {
-  friend class NoLocation;
   friend class ArtificialLocation;
+  friend class SaveAndRestoreLocation;
   CodeGenModule &CGM;
   const CodeGenOptions::DebugInfoKind DebugKind;
   llvm::DIBuilder DBuilder;
@@ -288,6 +288,8 @@
   void completeRequiredType(const RecordDecl *RD);
   void completeClassData(const RecordDecl *RD);
 
+  void completeTemplateDefinition(const ClassTemplateSpecializationDecl &SD);
+
 private:
   /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
   void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
@@ -342,9 +344,9 @@
   llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
                                 StringRef Name, uint64_t *Offset);
 
-  /// \brief Retrieve the DIDescriptor, if any, for the canonical form of this
+  /// \brief Retrieve the DIScope, if any, for the canonical form of this
   /// declaration.
-  llvm::DIDescriptor getDeclarationOrDefinition(const Decl *D);
+  llvm::DIScope getDeclarationOrDefinition(const Decl *D);
 
   /// getFunctionDeclaration - Return debug info descriptor to describe method
   /// declaration for the given method definition.
@@ -394,16 +396,26 @@
   }
 };
 
-/// NoLocation - An RAII object that temporarily disables debug
-/// locations. This is useful for emitting instructions that should be
-/// counted towards the function prologue.
-class NoLocation {
+/// SaveAndRestoreLocation - An RAII object saves the current location
+/// and automatically restores it to the original value.
+class SaveAndRestoreLocation {
+protected:
   SourceLocation SavedLoc;
   CGDebugInfo *DI;
   CGBuilderTy &Builder;
 public:
+  SaveAndRestoreLocation(CodeGenFunction &CGF, CGBuilderTy &B);
+  /// Autorestore everything back to normal.
+  ~SaveAndRestoreLocation();
+};
+
+/// NoLocation - An RAII object that temporarily disables debug
+/// locations. This is useful for emitting instructions that should be
+/// counted towards the function prologue.
+class NoLocation : public SaveAndRestoreLocation {
+public:
   NoLocation(CodeGenFunction &CGF, CGBuilderTy &B);
-  /// ~NoLocation - Autorestore everything back to normal.
+  /// Autorestore everything back to normal.
   ~NoLocation();
 };
 
@@ -418,10 +430,7 @@
 /// This is necessary because passing an empty SourceLocation to
 /// CGDebugInfo::setLocation() will result in the last valid location
 /// being reused.
-class ArtificialLocation {
-  SourceLocation SavedLoc;
-  CGDebugInfo *DI;
-  CGBuilderTy &Builder;
+class ArtificialLocation : public SaveAndRestoreLocation {
 public:
   ArtificialLocation(CodeGenFunction &CGF, CGBuilderTy &B);
 
@@ -429,7 +438,7 @@
   /// (= the top of the LexicalBlockStack).
   void Emit();
 
-  /// ~ArtificialLocation - Autorestore everything back to normal.
+  /// Autorestore everything back to normal.
   ~ArtificialLocation();
 };
 
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 66d6b33..9db3c91 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -183,7 +183,7 @@
   return ContextName + Separator + D.getNameAsString();
 }
 
-llvm::GlobalVariable *
+llvm::Constant *
 CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
                                      const char *Separator,
                                      llvm::GlobalValue::LinkageTypes Linkage) {
@@ -212,6 +212,13 @@
   if (D.getTLSKind())
     CGM.setTLSMode(GV, D);
 
+  // Make sure the result is of the correct type.
+  unsigned ExpectedAddrSpace = CGM.getContext().getTargetAddressSpace(Ty);
+  if (AddrSpace != ExpectedAddrSpace) {
+    llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
+    return llvm::ConstantExpr::getAddrSpaceCast(GV, PTy);
+  }
+
   return GV;
 }
 
@@ -298,12 +305,8 @@
   llvm::Constant *addr =
     CGM.getStaticLocalDeclAddress(&D);
 
-  llvm::GlobalVariable *var;
-  if (addr) {
-    var = cast<llvm::GlobalVariable>(addr->stripPointerCasts());
-  } else {
-    addr = var = CreateStaticVarDecl(D, ".", Linkage);
-  }
+  if (!addr)
+    addr = CreateStaticVarDecl(D, ".", Linkage);
 
   // Store into LocalDeclMap before generating initializer to handle
   // circular references.
@@ -319,6 +322,8 @@
   // Save the type in case adding the initializer forces a type change.
   llvm::Type *expectedType = addr->getType();
 
+  llvm::GlobalVariable *var =
+    cast<llvm::GlobalVariable>(addr->stripPointerCasts());
   // If this value has an initializer, emit it.
   if (D.getInit())
     var = AddInitializerToStaticVarDecl(D, var);
@@ -332,14 +337,15 @@
     var->setSection(SA->getName());
 
   if (D.hasAttr<UsedAttr>())
-    CGM.AddUsedGlobal(var);
+    CGM.addUsedGlobal(var);
 
   // We may have to cast the constant because of the initializer
   // mismatch above.
   //
   // FIXME: It is really dangerous to store this in the map; if anyone
   // RAUW's the GV uses of this constant will be invalid.
-  llvm::Constant *castedAddr = llvm::ConstantExpr::getBitCast(var, expectedType);
+  llvm::Constant *castedAddr =
+    llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
   DMEntry = castedAddr;
   CGM.setStaticLocalDeclAddress(&D, castedAddr);
 
@@ -365,7 +371,7 @@
     CodeGenFunction::Destroyer *destroyer;
     bool useEHCleanupForArray;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       // Don't use an EH cleanup recursively from an EH cleanup.
       bool useEHCleanupForArray =
         flags.isForNormalCleanup() && this->useEHCleanupForArray;
@@ -384,7 +390,7 @@
     llvm::Value *NRVOFlag;
     llvm::Value *Loc;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       // Along the exceptions path we always execute the dtor.
       bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
 
@@ -410,7 +416,7 @@
   struct CallStackRestore : EHScopeStack::Cleanup {
     llvm::Value *Stack;
     CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       llvm::Value *V = CGF.Builder.CreateLoad(Stack);
       llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
       CGF.Builder.CreateCall(F, V);
@@ -421,7 +427,7 @@
     const VarDecl &Var;
     ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       // Compute the address of the local variable, in case it's a
       // byref or something.
       DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
@@ -441,7 +447,7 @@
                         const VarDecl *Var)
       : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
                       Var.getType(), VK_LValue, SourceLocation());
       // Compute the address of the local variable, in case it's a byref
@@ -473,7 +479,7 @@
     CallLifetimeEnd(llvm::Value *addr, llvm::Value *size)
       : Addr(addr), Size(size) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       llvm::Value *castAddr = CGF.Builder.CreateBitCast(Addr, CGF.Int8PtrTy);
       CGF.Builder.CreateCall2(CGF.CGM.getLLVMLifetimeEndFn(),
                               Size, castAddr)
@@ -530,9 +536,8 @@
       return (ref->getDecl() == &var);
     if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
       const BlockDecl *block = be->getBlockDecl();
-      for (BlockDecl::capture_const_iterator i = block->capture_begin(),
-           e = block->capture_end(); i != e; ++i) {
-        if (i->getVariable() == &var)
+      for (const auto &I : block->captures()) {
+        if (I.getVariable() == &var)
           return true;
       }
     }
@@ -571,7 +576,10 @@
     EmitStoreThroughLValue(RValue::get(value), lvalue, true);
     return;
   }
-
+  
+  if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
+    init = DIE->getExpr();
+    
   // If we're emitting a value with lifetime, we have to do the
   // initialization *before* we leave the cleanup scopes.
   if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
@@ -823,7 +831,7 @@
 }
 
 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
-/// local variable.  Does not emit initalization or destruction.
+/// local variable.  Does not emit initialization or destruction.
 CodeGenFunction::AutoVarEmission
 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
   QualType Ty = D.getType();
@@ -944,12 +952,12 @@
 
       // Push a cleanup block and restore the stack there.
       // FIXME: in general circumstances, this should be an EH cleanup.
-      EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
+      pushStackRestore(NormalCleanup, Stack);
     }
 
     llvm::Value *elementCount;
     QualType elementType;
-    llvm::tie(elementCount, elementType) = getVLASize(Ty);
+    std::tie(elementCount, elementType) = getVLASize(Ty);
 
     llvm::Type *llvmTy = ConvertTypeForMem(elementType);
 
@@ -990,9 +998,8 @@
 
   if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
     const BlockDecl *block = be->getBlockDecl();
-    for (BlockDecl::capture_const_iterator i = block->capture_begin(),
-           e = block->capture_end(); i != e; ++i) {
-      if (i->getVariable() == &var)
+    for (const auto &I : block->captures()) {
+      if (I.getVariable() == &var)
         return true;
     }
 
@@ -1002,18 +1009,16 @@
 
   if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) {
     const CompoundStmt *CS = SE->getSubStmt();
-    for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
-	   BE = CS->body_end(); BI != BE; ++BI)
-      if (Expr *E = dyn_cast<Expr>((*BI))) {
+    for (const auto *BI : CS->body())
+      if (const auto *E = dyn_cast<Expr>(BI)) {
         if (isCapturedBy(var, E))
             return true;
       }
-      else if (DeclStmt *DS = dyn_cast<DeclStmt>((*BI))) {
+      else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
           // special case declarations
-          for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
-               I != E; ++I) {
-              if (VarDecl *VD = dyn_cast<VarDecl>((*I))) {
-                Expr *Init = VD->getInit();
+          for (const auto *I : DS->decls()) {
+              if (const auto *VD = dyn_cast<VarDecl>((I))) {
+                const Expr *Init = VD->getInit();
                 if (Init && isCapturedBy(var, Init))
                   return true;
               }
@@ -1344,6 +1349,10 @@
                                      destroyer, useEHCleanupForArray);
 }
 
+void CodeGenFunction::pushStackRestore(CleanupKind Kind, llvm::Value *SPMem) {
+  EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
+}
+
 void CodeGenFunction::pushLifetimeExtendedDestroy(
     CleanupKind cleanupKind, llvm::Value *addr, QualType type,
     Destroyer *destroyer, bool useEHCleanupForArray) {
@@ -1505,7 +1514,7 @@
       : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
         ElementType(elementType), Destroyer(destroyer) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
                               ElementType, Destroyer);
     }
@@ -1527,7 +1536,7 @@
       : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
         ElementType(elementType), Destroyer(destroyer) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
       emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
                               ElementType, Destroyer);
@@ -1594,7 +1603,7 @@
     llvm::Value *Param;
     ARCPreciseLifetime_t Precise;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.EmitARCRelease(Param, Precise);
     }
   };
@@ -1603,7 +1612,7 @@
 /// Emit an alloca (or GlobalValue depending on target)
 /// for the specified parameter and set up LocalDeclMap.
 void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
-                                   unsigned ArgNo) {
+                                   bool ArgIsPointer, unsigned ArgNo) {
   // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
   assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
          "Invalid argument to EmitParmDecl");
@@ -1641,30 +1650,35 @@
   }
 
   llvm::Value *DeclPtr;
-  bool HasNonScalarEvalKind = !CodeGenFunction::hasScalarEvaluationKind(Ty);
-  // If this is an aggregate or variable sized value, reuse the input pointer.
-  if (HasNonScalarEvalKind || !Ty->isConstantSizeType()) {
-    DeclPtr = Arg;
+  bool DoStore = false;
+  bool IsScalar = hasScalarEvaluationKind(Ty);
+  CharUnits Align = getContext().getDeclAlign(&D);
+  // If we already have a pointer to the argument, reuse the input pointer.
+  if (ArgIsPointer) {
+    // If we have a prettier pointer type at this point, bitcast to that.
+    unsigned AS = cast<llvm::PointerType>(Arg->getType())->getAddressSpace();
+    llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
+    DeclPtr = Arg->getType() == IRTy ? Arg : Builder.CreateBitCast(Arg, IRTy,
+                                                                   D.getName());
     // Push a destructor cleanup for this parameter if the ABI requires it.
-    if (HasNonScalarEvalKind &&
-        getTarget().getCXXABI().isArgumentDestroyedByCallee()) {
-      if (const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl()) {
-        if (RD->hasNonTrivialDestructor())
-          pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty);
-      }
+    if (!IsScalar &&
+        getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
+      const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+      if (RD && RD->hasNonTrivialDestructor())
+        pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty);
     }
   } else {
     // Otherwise, create a temporary to hold the value.
     llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
                                                D.getName() + ".addr");
-    CharUnits Align = getContext().getDeclAlign(&D);
     Alloc->setAlignment(Align.getQuantity());
     DeclPtr = Alloc;
+    DoStore = true;
+  }
 
-    bool doStore = true;
-
+  LValue lv = MakeAddrLValue(DeclPtr, Ty, Align);
+  if (IsScalar) {
     Qualifiers qs = Ty.getQualifiers();
-    LValue lv = MakeAddrLValue(DeclPtr, Ty, Align);
     if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
       // We honor __attribute__((ns_consumed)) for types with lifetime.
       // For __strong, it's handled by just skipping the initial retain;
@@ -1693,7 +1707,7 @@
             llvm::Value *Null = CGM.EmitNullConstant(D.getType());
             EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
             EmitARCStoreStrongCall(lv.getAddress(), Arg, true);
-            doStore = false;
+            DoStore = false;
           }
           else
           // Don't use objc_retainBlock for block pointers, because we
@@ -1712,19 +1726,19 @@
 
         if (lt == Qualifiers::OCL_Weak) {
           EmitARCInitWeak(DeclPtr, Arg);
-          doStore = false; // The weak init is a store, no need to do two.
+          DoStore = false; // The weak init is a store, no need to do two.
         }
       }
 
       // Enter the cleanup scope.
       EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
     }
-
-    // Store the initial value into the alloca.
-    if (doStore)
-      EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
   }
 
+  // Store the initial value into the alloca.
+  if (DoStore)
+    EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
+
   llvm::Value *&DMEntry = LocalDeclMap[&D];
   assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
   DMEntry = DeclPtr;
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 7bdb9eb..bc8620d 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -281,9 +281,8 @@
     // FIXME: We only need to register one __cxa_thread_atexit function for the
     // entire TU.
     CXXThreadLocalInits.push_back(Fn);
-  } else if (D->hasAttr<InitPriorityAttr>()) {
-    unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
-    OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
+  } else if (const InitPriorityAttr *IPA = D->getAttr<InitPriorityAttr>()) {
+    OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
     PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
     DelayedCXXInitPosition.erase(D);
   } else if (D->getTemplateSpecializationKind() != TSK_ExplicitSpecialization &&
@@ -503,11 +502,9 @@
   FunctionArgList args;
   ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
   args.push_back(&dst);
-  
-  const CGFunctionInfo &FI = 
-    CGM.getTypes().arrangeFunctionDeclaration(getContext().VoidTy, args,
-                                              FunctionType::ExtInfo(),
-                                              /*variadic*/ false);
+
+  const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+      getContext().VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
   llvm::Function *fn = 
     CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 39a992a..ca31717 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -17,8 +17,8 @@
 #include "TargetInfo.h"
 #include "clang/AST/StmtCXX.h"
 #include "clang/AST/StmtObjC.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/CallSite.h"
 
 using namespace clang;
 using namespace CodeGen;
@@ -263,12 +263,9 @@
 /// Check whether a personality function could reasonably be swapped
 /// for a C++ personality function.
 static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
-  for (llvm::Constant::use_iterator
-         I = Fn->use_begin(), E = Fn->use_end(); I != E; ++I) {
-    llvm::User *User = *I;
-
+  for (llvm::User *U : Fn->users()) {
     // Conditionally white-list bitcasts.
-    if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(User)) {
+    if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(U)) {
       if (CE->getOpcode() != llvm::Instruction::BitCast) return false;
       if (!PersonalityHasOnlyCXXUses(CE))
         return false;
@@ -276,7 +273,7 @@
     }
 
     // Otherwise, it has to be a landingpad instruction.
-    llvm::LandingPadInst *LPI = dyn_cast<llvm::LandingPadInst>(User);
+    llvm::LandingPadInst *LPI = dyn_cast<llvm::LandingPadInst>(U);
     if (!LPI) return false;
 
     for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
@@ -363,7 +360,7 @@
   struct FreeException : EHScopeStack::Cleanup {
     llvm::Value *exn;
     FreeException(llvm::Value *exn) : exn(exn) {}
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.EmitNounwindRuntimeCall(getFreeExceptionFn(CGF.CGM), exn);
     }
   };
@@ -766,11 +763,9 @@
 
   // Save the current IR generation state.
   CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
-  SourceLocation SavedLocation;
-  if (CGDebugInfo *DI = getDebugInfo()) {
-    SavedLocation = DI->getLocation();
+  SaveAndRestoreLocation AutoRestoreLocation(*this, Builder);
+  if (CGDebugInfo *DI = getDebugInfo())
     DI->EmitLocation(Builder, CurEHLocation);
-  }
 
   const EHPersonality &personality = EHPersonality::get(getLangOpts());
 
@@ -892,8 +887,6 @@
 
   // Restore the old IR generation state.
   Builder.restoreIP(savedIP);
-  if (CGDebugInfo *DI = getDebugInfo())
-    DI->EmitLocation(Builder, SavedLocation);
 
   return lpad;
 }
@@ -915,7 +908,7 @@
     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
     bool MightThrow;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       if (!MightThrow) {
         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
         return;
@@ -1244,6 +1237,7 @@
 
   // If the catch was not required, bail out now.
   if (!CatchScope.hasEHBranches()) {
+    CatchScope.clearHandlerBlocks();
     EHStack.popCatch();
     return;
   }
@@ -1294,6 +1288,10 @@
     // Initialize the catch variable and set up the cleanups.
     BeginCatch(*this, C);
 
+    // Emit the PGO counter increment.
+    RegionCounter CatchCnt = getPGORegionCounter(C);
+    CatchCnt.beginRegion(Builder);
+
     // Perform the body of the catch.
     EmitStmt(C->getHandlerBlock());
 
@@ -1320,7 +1318,9 @@
       Builder.CreateBr(ContBB);
   }
 
+  RegionCounter ContCnt = getPGORegionCounter(&S);
   EmitBlock(ContBB);
+  ContCnt.beginRegion(Builder);
 }
 
 namespace {
@@ -1330,7 +1330,7 @@
     CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
       : ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
       llvm::BasicBlock *CleanupContBB =
         CGF.createBasicBlock("finally.cleanup.cont");
@@ -1357,7 +1357,7 @@
       : Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
         RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       // Enter a cleanup to call the end-catch function if one was provided.
       if (EndCatchFn)
         CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index cb990b2..1bdd094 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -85,6 +85,7 @@
 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
 /// expression and compare the result against zero, returning an Int1Ty value.
 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+  PGO.setCurrentStmt(E);
   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
     llvm::Value *MemPtr = EmitScalarExpr(E);
     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
@@ -389,7 +390,7 @@
     case SubobjectAdjustment::MemberPointerAdjustment: {
       llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
       Object = CGM.getCXXABI().EmitMemberDataPointerAddress(
-                    *this, Object, Ptr, Adjustment.Ptr.MPT);
+          *this, E, Object, Ptr, Adjustment.Ptr.MPT);
       break;
     }
     }
@@ -1690,11 +1691,16 @@
 
 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
                                       const Expr *E, const VarDecl *VD) {
+  QualType T = E->getType();
+
+  // If it's thread_local, emit a call to its wrapper function instead.
+  if (VD->getTLSKind() == VarDecl::TLS_Dynamic)
+    return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
+
   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
   V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
-  QualType T = E->getType();
   LValue LV;
   if (VD->getType()->isReferenceType()) {
     llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
@@ -1702,7 +1708,7 @@
     V = LI;
     LV = CGF.MakeNaturalAlignAddrLValue(V, T);
   } else {
-    LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+    LV = CGF.MakeAddrLValue(V, T, Alignment);
   }
   setObjCGCLValueClass(CGF.getContext(), E, LV);
   return LV;
@@ -1718,7 +1724,7 @@
       // isn't the same as the type of a use.  Correct for this with a
       // bitcast.
       QualType NoProtoType =
-          CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
+          CGF.getContext().getFunctionNoProtoType(Proto->getReturnType());
       NoProtoType = CGF.getContext().getPointerType(NoProtoType);
       V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
     }
@@ -1769,12 +1775,8 @@
 
   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
     // Check if this is a global variable.
-    if (VD->hasLinkage() || VD->isStaticDataMember()) {
-      // If it's thread_local, emit a call to its wrapper function instead.
-      if (VD->getTLSKind() == VarDecl::TLS_Dynamic)
-        return CGM.getCXXABI().EmitThreadLocalDeclRefExpr(*this, E);
+    if (VD->hasLinkage() || VD->isStaticDataMember())
       return EmitGlobalVarDeclLValue(*this, E, VD);
-    }
 
     bool isBlockVariable = VD->hasAttr<BlocksAttr>();
 
@@ -2651,6 +2653,7 @@
   }
 
   OpaqueValueMapping binding(*this, expr);
+  RegionCounter Cnt = getPGORegionCounter(expr);
 
   const Expr *condExpr = expr->getCond();
   bool CondExprBool;
@@ -2658,8 +2661,12 @@
     const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
     if (!CondExprBool) std::swap(live, dead);
 
-    if (!ContainsLabel(dead))
+    if (!ContainsLabel(dead)) {
+      // If the true case is live, we need to track its region.
+      if (CondExprBool)
+        Cnt.beginRegion(Builder);
       return EmitLValue(live);
+    }
   }
 
   llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
@@ -2667,10 +2674,11 @@
   llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
 
   ConditionalEvaluation eval(*this);
-  EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
+  EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, Cnt.getCount());
 
   // Any temporaries created here are conditional.
   EmitBlock(lhsBlock);
+  Cnt.beginRegion(Builder);
   eval.begin(*this);
   LValue lhs = EmitLValue(expr->getTrueExpr());
   eval.end(*this);
@@ -2744,6 +2752,7 @@
   case CK_ARCReclaimReturnedObject:
   case CK_ARCExtendBlockObject:
   case CK_CopyAndAutoreleaseBlockObject:
+  case CK_AddressSpaceConversion:
     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
 
   case CK_Dependent:
@@ -3061,7 +3070,7 @@
   if (!RV.isScalar())
     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
 
-  assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
+  assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
          "Can't have a scalar return unless the return type is a "
          "reference type!");
 
@@ -3231,8 +3240,8 @@
   const MemberPointerType *MPT
     = E->getRHS()->getType()->getAs<MemberPointerType>();
 
-  llvm::Value *AddV =
-    CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
+  llvm::Value *AddV = CGM.getCXXABI().EmitMemberDataPointerAddress(
+      *this, E, BaseV, OffsetV, MPT);
 
   return MakeAddrLValue(AddV, MPT->getPointeeType());
 }
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 9d0f3a9..6c50521 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -713,6 +713,7 @@
   case CK_CopyAndAutoreleaseBlockObject:
   case CK_BuiltinFnToFnPtr:
   case CK_ZeroToOCLEvent:
+  case CK_AddressSpaceConversion:
     llvm_unreachable("cast kind invalid for aggregate types");
   }
 }
@@ -891,14 +892,16 @@
   // Bind the common expression if necessary.
   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
 
+  RegionCounter Cnt = CGF.getPGORegionCounter(E);
   CodeGenFunction::ConditionalEvaluation eval(CGF);
-  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, Cnt.getCount());
 
   // Save whether the destination's lifetime is externally managed.
   bool isExternallyDestructed = Dest.isExternallyDestructed();
 
   eval.begin(CGF);
   CGF.EmitBlock(LHSBlock);
+  Cnt.beginRegion(Builder);
   Visit(E->getTrueExpr());
   eval.end(CGF);
 
@@ -928,7 +931,11 @@
   llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
 
   if (!ArgPtr) {
-    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
+    // If EmitVAArg fails, we fall back to the LLVM instruction.
+    llvm::Value *Val =
+        Builder.CreateVAArg(ArgValue, CGF.ConvertType(VE->getType()));
+    if (!Dest.isIgnored())
+      Builder.CreateStore(Val, Dest.getAddr());
     return;
   }
 
@@ -1134,9 +1141,7 @@
 #ifndef NDEBUG
       // Make sure that it's really an empty and not a failure of
       // semantic analysis.
-      for (RecordDecl::field_iterator Field = record->field_begin(),
-                                   FieldEnd = record->field_end();
-           Field != FieldEnd; ++Field)
+      for (const auto *Field : record->fields())
         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
 #endif
       return;
@@ -1165,9 +1170,7 @@
   // Here we iterate over the fields; this makes it simpler to both
   // default-initialize fields and skip over unnamed fields.
   unsigned curInitIndex = 0;
-  for (RecordDecl::field_iterator field = record->field_begin(),
-                               fieldEnd = record->field_end();
-       field != fieldEnd; ++field) {
+  for (const auto *field : record->fields()) {
     // We're done once we hit the flexible array member.
     if (field->getType()->isIncompleteArrayType())
       break;
@@ -1184,7 +1187,7 @@
       break;
     
 
-    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, *field);
+    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
     // We never generate write-barries for initialized fields.
     LV.setNonGC(true);
     
@@ -1261,8 +1264,7 @@
       CharUnits NumNonZeroBytes = CharUnits::Zero();
       
       unsigned ILEElement = 0;
-      for (RecordDecl::field_iterator Field = SD->field_begin(),
-           FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
+      for (const auto *Field : SD->fields()) {
         // We're done once we hit the flexible array member or run out of
         // InitListExpr elements.
         if (Field->getType()->isIncompleteArrayType() ||
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index cc7b24d..f71a3de 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -18,8 +18,8 @@
 #include "CGObjCRuntime.h"
 #include "clang/CodeGen/CGFunctionInfo.h"
 #include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/CallSite.h"
 
 using namespace clang;
 using namespace CodeGen;
@@ -119,8 +119,8 @@
     // type of MD and has a prefix.
     // For now we just avoid devirtualizing these covariant cases.
     if (DevirtualizedMethod &&
-        DevirtualizedMethod->getResultType().getCanonicalType() !=
-        MD->getResultType().getCanonicalType())
+        DevirtualizedMethod->getReturnType().getCanonicalType() !=
+            MD->getReturnType().getCanonicalType())
       DevirtualizedMethod = NULL;
   }
 
@@ -220,8 +220,10 @@
     }
   }
 
-  if (MD->isVirtual())
-    This = CGM.getCXXABI().adjustThisArgumentForVirtualCall(*this, MD, This);
+  if (MD->isVirtual()) {
+    This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
+        *this, MD, This, UseVirtualCall);
+  }
 
   return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
                            /*ImplicitParam=*/0, QualType(),
@@ -260,7 +262,7 @@
 
   // Ask the ABI to load the callee.  Note that This is modified.
   llvm::Value *Callee =
-    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
+    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, MemFnPtr, MPT);
   
   CallArgList Args;
 
@@ -316,7 +318,7 @@
 
   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
   CharUnits Size = Layout.getNonVirtualSize();
-  CharUnits Align = Layout.getNonVirtualAlign();
+  CharUnits Align = Layout.getNonVirtualAlignment();
 
   llvm::Value *SizeVal = CGF.CGM.getSize(Size);
 
@@ -864,9 +866,29 @@
     cleanupDominator->eraseFromParent();
   }
 
-  // Advance to the next element.
-  llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
+  // FIXME: The code below intends to initialize the individual array base
+  // elements, one at a time - but when dealing with multi-dimensional arrays -
+  // the pointer arithmetic can get confused - so the fix below entails casting
+  // to the allocated type to ensure that we get the pointer arithmetic right.
+  // It seems like the right approach here, it to really initialize the
+  // individual array base elements one at a time since it'll generate less
+  // code. I think the problem is that the wrong type is being passed into
+  // StoreAnyExprIntoOneUnit, but directly fixing that doesn't really work,
+  // because the Init expression has the wrong type at this point.
+  // So... this is ok for a quick fix, but we can and should do a lot better
+  // here long-term.
 
+  // Advance to the next element by adjusting the pointer type as necessary.
+  // For new int[10][20][30], alloc type is int[20][30], base type is 'int'.
+  QualType AllocType = E->getAllocatedType();
+  llvm::Type *AllocPtrTy = ConvertTypeForMem(AllocType)->getPointerTo(
+      curPtr->getType()->getPointerAddressSpace());
+  llvm::Value *curPtrAllocTy = Builder.CreateBitCast(curPtr, AllocPtrTy);
+  llvm::Value *nextPtrAllocTy =
+      Builder.CreateConstGEP1_32(curPtrAllocTy, 1, "array.next");
+  // Cast it back to the base type so that we can compare it to the endPtr.
+  llvm::Value *nextPtr =
+      Builder.CreateBitCast(nextPtrAllocTy, endPtr->getType());
   // Check whether we've gotten to the end of the array and, if so,
   // exit the loop.
   llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
@@ -991,20 +1013,20 @@
       getPlacementArgs()[I] = Arg;
     }
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       const FunctionProtoType *FPT
         = OperatorDelete->getType()->getAs<FunctionProtoType>();
-      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
-             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+      assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
+             (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
 
       CallArgList DeleteArgs;
 
       // The first argument is always a void*.
-      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+      FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
       DeleteArgs.add(RValue::get(Ptr), *AI++);
 
       // A member 'operator delete' can take an extra 'size_t' argument.
-      if (FPT->getNumArgs() == NumPlacementArgs + 2)
+      if (FPT->getNumParams() == NumPlacementArgs + 2)
         DeleteArgs.add(RValue::get(AllocSize), *AI++);
 
       // Pass the rest of the arguments, which must match exactly.
@@ -1046,20 +1068,20 @@
       getPlacementArgs()[I] = Arg;
     }
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       const FunctionProtoType *FPT
         = OperatorDelete->getType()->getAs<FunctionProtoType>();
-      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
-             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+      assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
+             (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
 
       CallArgList DeleteArgs;
 
       // The first argument is always a void*.
-      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+      FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
       DeleteArgs.add(Ptr.restore(CGF), *AI++);
 
       // A member 'operator delete' can take an extra 'size_t' argument.
-      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
+      if (FPT->getNumParams() == NumPlacementArgs + 2) {
         RValue RV = AllocSize.restore(CGF);
         DeleteArgs.add(RV, *AI++);
       }
@@ -1145,35 +1167,12 @@
   
   allocatorArgs.add(RValue::get(allocSize), sizeType);
 
-  // Emit the rest of the arguments.
-  // FIXME: Ideally, this should just use EmitCallArgs.
-  CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
-
-  // First, use the types from the function type.
   // We start at 1 here because the first argument (the allocation size)
   // has already been emitted.
-  for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
-       ++i, ++placementArg) {
-    QualType argType = allocatorType->getArgType(i);
-
-    assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
-                                               placementArg->getType()) &&
-           "type mismatch in call argument!");
-
-    EmitCallArg(allocatorArgs, *placementArg, argType);
-  }
-
-  // Either we've emitted all the call args, or we have a call to a
-  // variadic function.
-  assert((placementArg == E->placement_arg_end() ||
-          allocatorType->isVariadic()) &&
-         "Extra arguments to non-variadic function!");
-
-  // If we still have any arguments, emit them using the type of the argument.
-  for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
-       placementArg != placementArgsEnd; ++placementArg) {
-    EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
-  }
+  EmitCallArgs(allocatorArgs, allocatorType->isVariadic(),
+               allocatorType->param_type_begin() + 1,
+               allocatorType->param_type_end(), E->placement_arg_begin(),
+               E->placement_arg_end());
 
   // Emit the allocation call.  If the allocator is a global placement
   // operator, just "inline" it directly.
@@ -1289,14 +1288,14 @@
   // Check if we need to pass the size to the delete operator.
   llvm::Value *Size = 0;
   QualType SizeTy;
-  if (DeleteFTy->getNumArgs() == 2) {
-    SizeTy = DeleteFTy->getArgType(1);
+  if (DeleteFTy->getNumParams() == 2) {
+    SizeTy = DeleteFTy->getParamType(1);
     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
     Size = llvm::ConstantInt::get(ConvertType(SizeTy), 
                                   DeleteTypeSize.getQuantity());
   }
-  
-  QualType ArgTy = DeleteFTy->getArgType(0);
+
+  QualType ArgTy = DeleteFTy->getParamType(0);
   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
 
@@ -1319,7 +1318,7 @@
                      QualType ElementType)
       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
     }
   };
@@ -1422,22 +1421,22 @@
       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
         ElementType(ElementType), CookieSize(CookieSize) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       const FunctionProtoType *DeleteFTy =
         OperatorDelete->getType()->getAs<FunctionProtoType>();
-      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
+      assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
 
       CallArgList Args;
       
       // Pass the pointer as the first argument.
-      QualType VoidPtrTy = DeleteFTy->getArgType(0);
+      QualType VoidPtrTy = DeleteFTy->getParamType(0);
       llvm::Value *DeletePtr
         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
       Args.add(RValue::get(DeletePtr), VoidPtrTy);
 
       // Pass the original requested size as the second argument.
-      if (DeleteFTy->getNumArgs() == 2) {
-        QualType size_t = DeleteFTy->getArgType(1);
+      if (DeleteFTy->getNumParams() == 2) {
+        QualType size_t = DeleteFTy->getParamType(1);
         llvm::IntegerType *SizeTy
           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
         
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 73d5bcb..1f84c86 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -93,7 +93,7 @@
   ComplexPairTy Visit(Expr *E) {
     return StmtVisitor<ComplexExprEmitter, ComplexPairTy>::Visit(E);
   }
-    
+
   ComplexPairTy VisitStmt(Stmt *S) {
     S->dump(CGF.getContext().getSourceManager());
     llvm_unreachable("Stmt can't have complex result type!");
@@ -410,7 +410,7 @@
   return ComplexPairTy(Val, llvm::Constant::getNullValue(Val->getType()));
 }
 
-ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op, 
+ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
                                            QualType DestTy) {
   switch (CK) {
   case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
@@ -427,7 +427,7 @@
   case CK_LValueBitCast: {
     LValue origLV = CGF.EmitLValue(Op);
     llvm::Value *V = origLV.getAddress();
-    V = Builder.CreateBitCast(V, 
+    V = Builder.CreateBitCast(V,
                     CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
     return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy,
                                                origLV.getAlignment()),
@@ -475,6 +475,7 @@
   case CK_CopyAndAutoreleaseBlockObject:
   case CK_BuiltinFnToFnPtr:
   case CK_ZeroToOCLEvent:
+  case CK_AddressSpaceConversion:
     llvm_unreachable("invalid cast kind for complex value");
 
   case CK_FloatingRealToComplex:
@@ -652,7 +653,7 @@
   assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
                                                  E->getRHS()->getType()));
   OpInfo.RHS = Visit(E->getRHS());
-  
+
   LValue LHS = CGF.EmitLValue(E->getLHS());
 
   // Load from the l-value and convert it.
@@ -702,7 +703,7 @@
 
 LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
                                                ComplexPairTy &Val) {
-  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), 
+  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
                                                  E->getRHS()->getType()) &&
          "Invalid assignment");
   TestAndClearIgnoreReal();
@@ -751,11 +752,13 @@
   // Bind the common expression if necessary.
   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
 
+  RegionCounter Cnt = CGF.getPGORegionCounter(E);
   CodeGenFunction::ConditionalEvaluation eval(CGF);
-  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, Cnt.getCount());
 
   eval.begin(CGF);
   CGF.EmitBlock(LHSBlock);
+  Cnt.beginRegion(Builder);
   ComplexPairTy LHS = Visit(E->getTrueExpr());
   LHSBlock = Builder.GetInsertBlock();
   CGF.EmitBranch(ContBlock);
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index f4d6861..82382dd 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -633,6 +633,9 @@
       return llvm::ConstantStruct::get(STy, Elts);
     }
 
+    case CK_AddressSpaceConversion:
+      return llvm::ConstantExpr::getAddrSpaceCast(C, destType);
+
     case CK_LValueToRValue:
     case CK_AtomicToNonAtomic:
     case CK_NonAtomicToAtomic:
@@ -917,7 +920,7 @@
     }
     case Expr::CallExprClass: {
       CallExpr* CE = cast<CallExpr>(E);
-      unsigned builtin = CE->isBuiltinCall();
+      unsigned builtin = CE->getBuiltinCallee();
       if (builtin !=
             Builtin::BI__builtin___CFStringMakeConstantString &&
           builtin !=
@@ -1062,13 +1065,13 @@
       if (!Offset->isNullValue()) {
         llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Int8PtrTy);
         Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
-        C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
+        C = llvm::ConstantExpr::getPointerCast(Casted, C->getType());
       }
 
       // Convert to the appropriate type; this could be an lvalue for
       // an integer.
       if (isa<llvm::PointerType>(DestTy))
-        return llvm::ConstantExpr::getBitCast(C, DestTy);
+        return llvm::ConstantExpr::getPointerCast(C, DestTy);
 
       return llvm::ConstantExpr::getPtrToInt(C, DestTy);
     } else {
@@ -1265,15 +1268,14 @@
     const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
 
     // Go through all bases and fill in any null pointer to data members.
-    for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-         E = RD->bases_end(); I != E; ++I) {
-      if (I->isVirtual()) {
+    for (const auto &I : RD->bases()) {
+      if (I.isVirtual()) {
         // Ignore virtual bases.
         continue;
       }
       
       const CXXRecordDecl *BaseDecl = 
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
       
       // Ignore empty bases.
       if (BaseDecl->isEmpty())
@@ -1285,7 +1287,7 @@
 
       uint64_t BaseOffset =
         CGM.getContext().toBits(Layout.getBaseClassOffset(BaseDecl));
-      FillInNullDataMemberPointers(CGM, I->getType(),
+      FillInNullDataMemberPointers(CGM, I.getType(),
                                    Elements, StartOffset + BaseOffset);
     }
     
@@ -1335,16 +1337,15 @@
   std::vector<llvm::Constant *> elements(numElements);
 
   // Fill in all the bases.
-  for (CXXRecordDecl::base_class_const_iterator
-         I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
-    if (I->isVirtual()) {
+  for (const auto &I : record->bases()) {
+    if (I.isVirtual()) {
       // Ignore virtual bases; if we're laying out for a complete
       // object, we'll lay these out later.
       continue;
     }
 
     const CXXRecordDecl *base = 
-      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
 
     // Ignore empty bases.
     if (base->isEmpty())
@@ -1356,28 +1357,24 @@
   }
 
   // Fill in all the fields.
-  for (RecordDecl::field_iterator I = record->field_begin(),
-         E = record->field_end(); I != E; ++I) {
-    const FieldDecl *field = *I;
-
+  for (const auto *Field : record->fields()) {
     // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
     // will fill in later.)
-    if (!field->isBitField()) {
-      unsigned fieldIndex = layout.getLLVMFieldNo(field);
-      elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+    if (!Field->isBitField()) {
+      unsigned fieldIndex = layout.getLLVMFieldNo(Field);
+      elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
     }
 
     // For unions, stop after the first named field.
-    if (record->isUnion() && field->getDeclName())
+    if (record->isUnion() && Field->getDeclName())
       break;
   }
 
   // Fill in the virtual bases, if we're working with the complete object.
   if (asCompleteObject) {
-    for (CXXRecordDecl::base_class_const_iterator
-           I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
+    for (const auto &I : record->vbases()) {
       const CXXRecordDecl *base = 
-        cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+        cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
 
       // Ignore empty bases.
       if (base->isEmpty())
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index f3a5387..5f932b0 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -22,13 +22,13 @@
 #include "clang/AST/StmtVisitor.h"
 #include "clang/Basic/TargetInfo.h"
 #include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/IR/CFG.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/GlobalVariable.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/Module.h"
-#include "llvm/Support/CFG.h"
 #include <cstdarg>
 
 using namespace clang;
@@ -246,7 +246,7 @@
   }
   Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
     if (E->getMethodDecl() &&
-        E->getMethodDecl()->getResultType()->isReferenceType())
+        E->getMethodDecl()->getReturnType()->isReferenceType())
       return EmitLoadOfLValue(E);
     return CGF.EmitObjCMessageExpr(E).getScalarVal();
   }
@@ -367,11 +367,8 @@
     CGF.EmitCXXDeleteExpr(E);
     return 0;
   }
-  Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
-    return Builder.getInt1(E->getValue());
-  }
 
-  Value *VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
+  Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
     return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
   }
 
@@ -1299,7 +1296,18 @@
   case CK_AnyPointerToBlockPointerCast:
   case CK_BitCast: {
     Value *Src = Visit(const_cast<Expr*>(E));
-    return Builder.CreateBitCast(Src, ConvertType(DestTy));
+    llvm::Type *SrcTy = Src->getType();
+    llvm::Type *DstTy = ConvertType(DestTy);
+    if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
+        SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
+      llvm::Type *MidTy = CGF.CGM.getDataLayout().getIntPtrType(SrcTy);
+      return Builder.CreateIntToPtr(Builder.CreatePtrToInt(Src, MidTy), DstTy);
+    }
+    return Builder.CreateBitCast(Src, DstTy);
+  }
+  case CK_AddressSpaceConversion: {
+    Value *Src = Visit(const_cast<Expr*>(E));
+    return Builder.CreateAddrSpaceCast(Src, ConvertType(DestTy));
   }
   case CK_AtomicToNonAtomic:
   case CK_NonAtomicToAtomic:
@@ -1360,7 +1368,7 @@
 
     // Make sure the array decay ends up being the right type.  This matters if
     // the array type was of an incomplete type.
-    return CGF.Builder.CreateBitCast(V, ConvertType(CE->getType()));
+    return CGF.Builder.CreatePointerCast(V, ConvertType(CE->getType()));
   }
   case CK_FunctionToPointerDecay:
     return EmitLValue(E).getAddress();
@@ -1485,7 +1493,7 @@
   }
 
   case CK_ZeroToOCLEvent: {
-    assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non event type");
+    assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non-event type");
     return llvm::Constant::getNullValue(ConvertType(DestTy));
   }
 
@@ -1727,8 +1735,9 @@
   if (atomicPHI) {
     llvm::BasicBlock *opBB = Builder.GetInsertBlock();
     llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
-    llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI,
-        CGF.EmitToMemory(value, type), llvm::SequentiallyConsistent);
+    llvm::Value *old = Builder.CreateAtomicCmpXchg(
+        LV.getAddress(), atomicPHI, CGF.EmitToMemory(value, type),
+        llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
     atomicPHI->addIncoming(old, opBB);
     llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
     Builder.CreateCondBr(success, contBB, opBB);
@@ -1906,7 +1915,7 @@
 
       QualType eltType;
       llvm::Value *numElts;
-      llvm::tie(numElts, eltType) = CGF.getVLASize(VAT);
+      std::tie(numElts, eltType) = CGF.getVLASize(VAT);
 
       llvm::Value *size = numElts;
 
@@ -2069,8 +2078,9 @@
   if (atomicPHI) {
     llvm::BasicBlock *opBB = Builder.GetInsertBlock();
     llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
-    llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI,
-        CGF.EmitToMemory(Result, LHSTy), llvm::SequentiallyConsistent);
+    llvm::Value *old = Builder.CreateAtomicCmpXchg(
+        LHSLV.getAddress(), atomicPHI, CGF.EmitToMemory(Result, LHSTy),
+        llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
     atomicPHI->addIncoming(old, opBB);
     llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
     Builder.CreateCondBr(success, contBB, opBB);
@@ -2238,7 +2248,7 @@
   llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
   llvm::Function::iterator insertPt = initialBB;
   llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
-                                                      llvm::next(insertPt));
+                                                      std::next(insertPt));
   llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
 
   Builder.CreateCondBr(overflow, overflowBB, continueBB);
@@ -2529,7 +2539,7 @@
   if (const VariableArrayType *vla
         = CGF.getContext().getAsVariableArrayType(elementType)) {
     llvm::Value *numElements;
-    llvm::tie(numElements, elementType) = CGF.getVLASize(vla);
+    std::tie(numElements, elementType) = CGF.getVLASize(vla);
 
     divisor = numElements;
 
@@ -2818,11 +2828,11 @@
 
   switch (E->getLHS()->getType().getObjCLifetime()) {
   case Qualifiers::OCL_Strong:
-    llvm::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
+    std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
     break;
 
   case Qualifiers::OCL_Autoreleasing:
-    llvm::tie(LHS,RHS) = CGF.EmitARCStoreAutoreleasing(E);
+    std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
     break;
 
   case Qualifiers::OCL_Weak:
@@ -2866,8 +2876,12 @@
 }
 
 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+  RegionCounter Cnt = CGF.getPGORegionCounter(E);
+
   // Perform vector logical and on comparisons with zero vectors.
   if (E->getType()->isVectorType()) {
+    Cnt.beginRegion(Builder);
+
     Value *LHS = Visit(E->getLHS());
     Value *RHS = Visit(E->getRHS());
     Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
@@ -2889,6 +2903,8 @@
   bool LHSCondVal;
   if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
     if (LHSCondVal) { // If we have 1 && X, just emit X.
+      Cnt.beginRegion(Builder);
+
       Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
       // ZExt result to int or bool.
       return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
@@ -2905,7 +2921,7 @@
   CodeGenFunction::ConditionalEvaluation eval(CGF);
 
   // Branch on the LHS first.  If it is false, go to the failure (cont) block.
-  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
+  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, Cnt.getCount());
 
   // Any edges into the ContBlock are now from an (indeterminate number of)
   // edges from this first condition.  All of these values will be false.  Start
@@ -2918,6 +2934,7 @@
 
   eval.begin(CGF);
   CGF.EmitBlock(RHSBlock);
+  Cnt.beginRegion(Builder);
   Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
   eval.end(CGF);
 
@@ -2937,8 +2954,12 @@
 }
 
 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+  RegionCounter Cnt = CGF.getPGORegionCounter(E);
+
   // Perform vector logical or on comparisons with zero vectors.
   if (E->getType()->isVectorType()) {
+    Cnt.beginRegion(Builder);
+
     Value *LHS = Visit(E->getLHS());
     Value *RHS = Visit(E->getRHS());
     Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
@@ -2960,6 +2981,8 @@
   bool LHSCondVal;
   if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
     if (!LHSCondVal) { // If we have 0 || X, just emit X.
+      Cnt.beginRegion(Builder);
+
       Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
       // ZExt result to int or bool.
       return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
@@ -2976,7 +2999,8 @@
   CodeGenFunction::ConditionalEvaluation eval(CGF);
 
   // Branch on the LHS first.  If it is true, go to the success (cont) block.
-  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
+  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
+                           Cnt.getParentCount() - Cnt.getCount());
 
   // Any edges into the ContBlock are now from an (indeterminate number of)
   // edges from this first condition.  All of these values will be true.  Start
@@ -2991,6 +3015,7 @@
 
   // Emit the RHS condition as a bool value.
   CGF.EmitBlock(RHSBlock);
+  Cnt.beginRegion(Builder);
   Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
 
   eval.end(CGF);
@@ -3041,6 +3066,7 @@
 
   // Bind the common expression if necessary.
   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+  RegionCounter Cnt = CGF.getPGORegionCounter(E);
 
   Expr *condExpr = E->getCond();
   Expr *lhsExpr = E->getTrueExpr();
@@ -3055,6 +3081,8 @@
 
     // If the dead side doesn't have labels we need, just emit the Live part.
     if (!CGF.ContainsLabel(dead)) {
+      if (CondExprBool)
+        Cnt.beginRegion(Builder);
       Value *Result = Visit(live);
 
       // If the live part is a throw expression, it acts like it has a void
@@ -3071,6 +3099,8 @@
   // the select function.
   if (CGF.getLangOpts().OpenCL
       && condExpr->getType()->isVectorType()) {
+    Cnt.beginRegion(Builder);
+
     llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
     llvm::Value *LHS = Visit(lhsExpr);
     llvm::Value *RHS = Visit(rhsExpr);
@@ -3114,6 +3144,8 @@
   // safe to evaluate the LHS and RHS unconditionally.
   if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
       isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
+    Cnt.beginRegion(Builder);
+
     llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
     llvm::Value *LHS = Visit(lhsExpr);
     llvm::Value *RHS = Visit(rhsExpr);
@@ -3130,9 +3162,10 @@
   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
 
   CodeGenFunction::ConditionalEvaluation eval(CGF);
-  CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock);
+  CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, Cnt.getCount());
 
   CGF.EmitBlock(LHSBlock);
+  Cnt.beginRegion(Builder);
   eval.begin(CGF);
   Value *LHS = Visit(lhsExpr);
   eval.end(CGF);
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 0bda053..f78bb0b 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -22,7 +22,7 @@
 #include "clang/Basic/Diagnostic.h"
 #include "clang/CodeGen/CGFunctionInfo.h"
 #include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/InlineAsm.h"
 using namespace clang;
@@ -79,10 +79,10 @@
   RValue RV = EmitAnyExpr(SubExpr);
   CallArgList Args;
   Args.add(RV, ArgQT);
-  
-  RValue result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 
-                                              BoxingMethod->getResultType(), Sel, Receiver, Args, 
-                                              ClassDecl, BoxingMethod);
+
+  RValue result = Runtime.GenerateMessageSend(
+      *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
+      Args, ClassDecl, BoxingMethod);
   return Builder.CreateBitCast(result.getScalarVal(), 
                                ConvertType(E->getType()));
 }
@@ -186,12 +186,9 @@
   llvm::Value *Receiver = Runtime.GetClass(*this, Class);
 
   // Generate the message send.
-  RValue result
-    = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 
-                                  MethodWithObjects->getResultType(),
-                                  Sel,
-                                  Receiver, Args, Class,
-                                  MethodWithObjects);
+  RValue result = Runtime.GenerateMessageSend(
+      *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel,
+      Receiver, Args, Class, MethodWithObjects);
 
   // The above message send needs these objects, but in ARC they are
   // passed in a buffer that is essentially __unsafe_unretained.
@@ -238,7 +235,7 @@
     return Result;
 
   if (!Method->hasRelatedResultType() ||
-      CGF.getContext().hasSameType(ExpT, Method->getResultType()) ||
+      CGF.getContext().hasSameType(ExpT, Method->getReturnType()) ||
       !Result.isScalar())
     return Result;
   
@@ -369,8 +366,7 @@
       shouldExtendReceiverForInnerPointerMessage(E))
     Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
 
-  QualType ResultType =
-    method ? method->getResultType() : E->getType();
+  QualType ResultType = method ? method->getReturnType() : E->getType();
 
   CallArgList Args;
   EmitCallArgs(Args, method, E->arg_begin(), E->arg_end());
@@ -435,7 +431,7 @@
 
 namespace {
 struct FinishARCDealloc : EHScopeStack::Cleanup {
-  void Emit(CodeGenFunction &CGF, Flags flags) {
+  void Emit(CodeGenFunction &CGF, Flags flags) override {
     const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
 
     const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
@@ -480,13 +476,12 @@
   args.push_back(OMD->getSelfDecl());
   args.push_back(OMD->getCmdDecl());
 
-  for (ObjCMethodDecl::param_const_iterator PI = OMD->param_begin(),
-         E = OMD->param_end(); PI != E; ++PI)
-    args.push_back(*PI);
+  for (const auto *PI : OMD->params())
+    args.push_back(PI);
 
   CurGD = OMD;
 
-  StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
+  StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, StartLoc);
 
   // In ARC, certain methods get an extra cleanup.
   if (CGM.getLangOpts().ObjCAutoRefCount &&
@@ -506,8 +501,14 @@
 /// its pointer, name, and types registered in the class struture.
 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
   StartObjCMethod(OMD, OMD->getClassInterface(), OMD->getLocStart());
-  EmitStmt(OMD->getBody());
+  PGO.assignRegionCounters(OMD, CurFn);
+  assert(isa<CompoundStmt>(OMD->getBody()));
+  RegionCounter Cnt = getPGORegionCounter(OMD->getBody());
+  Cnt.beginRegion(Builder);
+  EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
   FinishFunction(OMD->getBodyRBrace());
+  PGO.emitInstrumentationData();
+  PGO.destroyRegionCounters();
 }
 
 /// emitStructGetterCall - Call the runtime function to load a property
@@ -622,8 +623,8 @@
   // Evaluate the ivar's size and alignment.
   ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
   QualType ivarType = ivar->getType();
-  llvm::tie(IvarSize, IvarAlignment)
-    = CGM.getContext().getTypeInfoInChars(ivarType);
+  std::tie(IvarSize, IvarAlignment) =
+      CGM.getContext().getTypeInfoInChars(ivarType);
 
   // If we have a copy property, we always have to use getProperty/setProperty.
   // TODO: we could actually use setProperty and an expression for non-atomics.
@@ -895,16 +896,21 @@
 
     // FIXME: We shouldn't need to get the function info here, the
     // runtime already should have computed it to build the function.
+    llvm::Instruction *CallInstruction;
     RValue RV = EmitCall(getTypes().arrangeFreeFunctionCall(propType, args,
                                                        FunctionType::ExtInfo(),
                                                             RequiredArgs::All),
-                         getPropertyFn, ReturnValueSlot(), args);
+                         getPropertyFn, ReturnValueSlot(), args, 0,
+                         &CallInstruction);
+    if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
+      call->setTailCall();
 
     // We need to fix the type here. Ivars with copy & retain are
     // always objects so we don't need to worry about complex or
     // aggregates.
-    RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
-           getTypes().ConvertType(getterMethod->getResultType())));
+    RV = RValue::get(Builder.CreateBitCast(
+        RV.getScalarVal(),
+        getTypes().ConvertType(getterMethod->getReturnType())));
 
     EmitReturnOfRValue(RV, propType);
 
@@ -955,8 +961,8 @@
         }
 
         value = Builder.CreateBitCast(value, ConvertType(propType));
-        value = Builder.CreateBitCast(value, 
-                  ConvertType(GetterMethodDecl->getResultType()));
+        value = Builder.CreateBitCast(
+            value, ConvertType(GetterMethodDecl->getReturnType()));
       }
       
       EmitReturnOfRValue(RValue::get(value), propType);
@@ -1292,7 +1298,7 @@
       : addr(addr), ivar(ivar), destroyer(destroyer),
         useEHCleanupForArray(useEHCleanupForArray) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       LValue lvalue
         = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
       CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
@@ -1356,12 +1362,9 @@
     // Suppress the final autorelease in ARC.
     AutoreleaseResult = false;
 
-    SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
-    for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
-           E = IMP->init_end(); B != E; ++B) {
-      CXXCtorInitializer *IvarInit = (*B);
+    for (const auto *IvarInit : IMP->inits()) {
       FieldDecl *Field = IvarInit->getAnyMember();
-      ObjCIvarDecl  *Ivar = cast<ObjCIvarDecl>(Field);
+      ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
       LValue LV = EmitLValueForIvar(TypeOfSelfObject(), 
                                     LoadObjCSelf(), Ivar, 0);
       EmitAggExpr(IvarInit->getInit(),
@@ -1506,9 +1509,13 @@
   llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
 
   // If the limit pointer was zero to begin with, the collection is
-  // empty; skip all this.
+  // empty; skip all this. Set the branch weight assuming this has the same
+  // probability of exiting the loop as any other loop exit.
+  uint64_t EntryCount = PGO.getCurrentRegionCount();
+  RegionCounter Cnt = getPGORegionCounter(&S);
   Builder.CreateCondBr(Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"),
-                       EmptyBB, LoopInitBB);
+                       EmptyBB, LoopInitBB,
+                       PGO.createBranchWeights(EntryCount, Cnt.getCount()));
 
   // Otherwise, initialize the loop.
   EmitBlock(LoopInitBB);
@@ -1537,6 +1544,8 @@
   llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
   count->addIncoming(initialBufferLimit, LoopInitBB);
 
+  Cnt.beginRegion(Builder);
+
   // Check whether the mutations value has changed from where it was
   // at start.  StateMutationsPtr should actually be invariant between
   // refreshes.
@@ -1644,8 +1653,12 @@
     = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
 
   // If we haven't overrun the buffer yet, we can continue.
+  // Set the branch weights based on the simplifying assumption that this is
+  // like a while-loop, i.e., ignoring that the false branch fetches more
+  // elements and then returns to the loop.
   Builder.CreateCondBr(Builder.CreateICmpULT(indexPlusOne, count),
-                       LoopBodyBB, FetchMoreBB);
+                       LoopBodyBB, FetchMoreBB,
+                       PGO.createBranchWeights(Cnt.getCount(), EntryCount));
 
   index->addIncoming(indexPlusOne, AfterBody.getBlock());
   count->addIncoming(count, AfterBody.getBlock());
@@ -1715,7 +1728,7 @@
     CallObjCRelease(llvm::Value *object) : object(object) {}
     llvm::Value *object;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       // Releases at the end of the full-expression are imprecise.
       CGF.EmitARCRelease(object, ARCImpreciseLifetime);
     }
@@ -2324,7 +2337,7 @@
 
     CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.EmitObjCAutoreleasePoolPop(Token);
     }
   };
@@ -2333,7 +2346,7 @@
 
     CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.EmitObjCMRRAutoreleasePoolPop(Token);
     }
   };
@@ -2824,9 +2837,8 @@
     EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
   }
 
-  for (CompoundStmt::const_body_iterator I = S.body_begin(),
-       E = S.body_end(); I != E; ++I)
-    EmitStmt(*I);
+  for (const auto *I : S.body())
+    EmitStmt(I);
 
   if (DI)
     DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
@@ -2892,12 +2904,10 @@
   args.push_back(&dstDecl);
   ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
   args.push_back(&srcDecl);
-  
-  const CGFunctionInfo &FI =
-    CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
-                                              FunctionType::ExtInfo(),
-                                              RequiredArgs::All);
-  
+
+  const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+      C.VoidTy, args, FunctionType::ExtInfo(), RequiredArgs::All);
+
   llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
   
   llvm::Function *Fn =
@@ -2973,12 +2983,10 @@
   args.push_back(&dstDecl);
   ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
   args.push_back(&srcDecl);
-  
-  const CGFunctionInfo &FI =
-  CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
-                                            FunctionType::ExtInfo(),
-                                            RequiredArgs::All);
-  
+
+  const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+      C.VoidTy, args, FunctionType::ExtInfo(), RequiredArgs::All);
+
   llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
   
   llvm::Function *Fn =
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index a7ab850..2689d7b 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -27,11 +27,11 @@
 #include "clang/Basic/SourceManager.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringMap.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
 #include "llvm/Support/Compiler.h"
 #include <cstdarg>
 
@@ -479,102 +479,91 @@
   CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
       unsigned protocolClassVersion);
 
-  virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
+  llvm::Constant *GenerateConstantString(const StringLiteral *) override;
 
-  virtual RValue
-  GenerateMessageSend(CodeGenFunction &CGF,
-                      ReturnValueSlot Return,
-                      QualType ResultType,
-                      Selector Sel,
-                      llvm::Value *Receiver,
-                      const CallArgList &CallArgs,
+  RValue
+  GenerateMessageSend(CodeGenFunction &CGF, ReturnValueSlot Return,
+                      QualType ResultType, Selector Sel,
+                      llvm::Value *Receiver, const CallArgList &CallArgs,
                       const ObjCInterfaceDecl *Class,
-                      const ObjCMethodDecl *Method);
-  virtual RValue
-  GenerateMessageSendSuper(CodeGenFunction &CGF,
-                           ReturnValueSlot Return,
-                           QualType ResultType,
-                           Selector Sel,
+                      const ObjCMethodDecl *Method) override;
+  RValue
+  GenerateMessageSendSuper(CodeGenFunction &CGF, ReturnValueSlot Return,
+                           QualType ResultType, Selector Sel,
                            const ObjCInterfaceDecl *Class,
-                           bool isCategoryImpl,
-                           llvm::Value *Receiver,
-                           bool IsClassMessage,
-                           const CallArgList &CallArgs,
-                           const ObjCMethodDecl *Method);
-  virtual llvm::Value *GetClass(CodeGenFunction &CGF,
-                                const ObjCInterfaceDecl *OID);
-  virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
-                                   bool lval = false);
-  virtual llvm::Value *GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl
-      *Method);
-  virtual llvm::Constant *GetEHType(QualType T);
+                           bool isCategoryImpl, llvm::Value *Receiver,
+                           bool IsClassMessage, const CallArgList &CallArgs,
+                           const ObjCMethodDecl *Method) override;
+  llvm::Value *GetClass(CodeGenFunction &CGF,
+                        const ObjCInterfaceDecl *OID) override;
+  llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
+                           bool lval = false) override;
+  llvm::Value *GetSelector(CodeGenFunction &CGF,
+                           const ObjCMethodDecl *Method) override;
+  llvm::Constant *GetEHType(QualType T) override;
 
-  virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
-                                         const ObjCContainerDecl *CD);
-  virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
-  virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
-  virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD);
-  virtual llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
-                                           const ObjCProtocolDecl *PD);
-  virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
-  virtual llvm::Function *ModuleInitFunction();
-  virtual llvm::Constant *GetPropertyGetFunction();
-  virtual llvm::Constant *GetPropertySetFunction();
-  virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic, 
-                                                          bool copy);
-  virtual llvm::Constant *GetSetStructFunction();
-  virtual llvm::Constant *GetGetStructFunction();
-  virtual llvm::Constant *GetCppAtomicObjectGetFunction();
-  virtual llvm::Constant *GetCppAtomicObjectSetFunction();
-  virtual llvm::Constant *EnumerationMutationFunction();
+  llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+                                 const ObjCContainerDecl *CD) override;
+  void GenerateCategory(const ObjCCategoryImplDecl *CMD) override;
+  void GenerateClass(const ObjCImplementationDecl *ClassDecl) override;
+  void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override;
+  llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
+                                   const ObjCProtocolDecl *PD) override;
+  void GenerateProtocol(const ObjCProtocolDecl *PD) override;
+  llvm::Function *ModuleInitFunction() override;
+  llvm::Constant *GetPropertyGetFunction() override;
+  llvm::Constant *GetPropertySetFunction() override;
+  llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+                                                  bool copy) override;
+  llvm::Constant *GetSetStructFunction() override;
+  llvm::Constant *GetGetStructFunction() override;
+  llvm::Constant *GetCppAtomicObjectGetFunction() override;
+  llvm::Constant *GetCppAtomicObjectSetFunction() override;
+  llvm::Constant *EnumerationMutationFunction() override;
 
-  virtual void EmitTryStmt(CodeGenFunction &CGF,
-                           const ObjCAtTryStmt &S);
-  virtual void EmitSynchronizedStmt(CodeGenFunction &CGF,
-                                    const ObjCAtSynchronizedStmt &S);
-  virtual void EmitThrowStmt(CodeGenFunction &CGF,
-                             const ObjCAtThrowStmt &S,
-                             bool ClearInsertionPoint=true);
-  virtual llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF,
-                                         llvm::Value *AddrWeakObj);
-  virtual void EmitObjCWeakAssign(CodeGenFunction &CGF,
-                                  llvm::Value *src, llvm::Value *dst);
-  virtual void EmitObjCGlobalAssign(CodeGenFunction &CGF,
-                                    llvm::Value *src, llvm::Value *dest,
-                                    bool threadlocal=false);
-  virtual void EmitObjCIvarAssign(CodeGenFunction &CGF,
-                                    llvm::Value *src, llvm::Value *dest,
-                                    llvm::Value *ivarOffset);
-  virtual void EmitObjCStrongCastAssign(CodeGenFunction &CGF,
-                                        llvm::Value *src, llvm::Value *dest);
-  virtual void EmitGCMemmoveCollectable(CodeGenFunction &CGF,
-                                        llvm::Value *DestPtr,
-                                        llvm::Value *SrcPtr,
-                                        llvm::Value *Size);
-  virtual LValue EmitObjCValueForIvar(CodeGenFunction &CGF,
-                                      QualType ObjectTy,
-                                      llvm::Value *BaseValue,
-                                      const ObjCIvarDecl *Ivar,
-                                      unsigned CVRQualifiers);
-  virtual llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
-                                      const ObjCInterfaceDecl *Interface,
-                                      const ObjCIvarDecl *Ivar);
-  virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF);
-  virtual llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM,
-                                             const CGBlockInfo &blockInfo) {
+  void EmitTryStmt(CodeGenFunction &CGF,
+                   const ObjCAtTryStmt &S) override;
+  void EmitSynchronizedStmt(CodeGenFunction &CGF,
+                            const ObjCAtSynchronizedStmt &S) override;
+  void EmitThrowStmt(CodeGenFunction &CGF,
+                     const ObjCAtThrowStmt &S,
+                     bool ClearInsertionPoint=true) override;
+  llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF,
+                                 llvm::Value *AddrWeakObj) override;
+  void EmitObjCWeakAssign(CodeGenFunction &CGF,
+                          llvm::Value *src, llvm::Value *dst) override;
+  void EmitObjCGlobalAssign(CodeGenFunction &CGF,
+                            llvm::Value *src, llvm::Value *dest,
+                            bool threadlocal=false) override;
+  void EmitObjCIvarAssign(CodeGenFunction &CGF, llvm::Value *src,
+                          llvm::Value *dest, llvm::Value *ivarOffset) override;
+  void EmitObjCStrongCastAssign(CodeGenFunction &CGF,
+                                llvm::Value *src, llvm::Value *dest) override;
+  void EmitGCMemmoveCollectable(CodeGenFunction &CGF, llvm::Value *DestPtr,
+                                llvm::Value *SrcPtr,
+                                llvm::Value *Size) override;
+  LValue EmitObjCValueForIvar(CodeGenFunction &CGF, QualType ObjectTy,
+                              llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
+                              unsigned CVRQualifiers) override;
+  llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
+                              const ObjCInterfaceDecl *Interface,
+                              const ObjCIvarDecl *Ivar) override;
+  llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override;
+  llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM,
+                                     const CGBlockInfo &blockInfo) override {
     return NULLPtr;
   }
-  virtual llvm::Constant *BuildRCBlockLayout(CodeGenModule &CGM,
-                                             const CGBlockInfo &blockInfo) {
+  llvm::Constant *BuildRCBlockLayout(CodeGenModule &CGM,
+                                     const CGBlockInfo &blockInfo) override {
     return NULLPtr;
   }
-  
-  virtual llvm::Constant *BuildByrefLayout(CodeGenModule &CGM,
-                                           QualType T) {
+
+  llvm::Constant *BuildByrefLayout(CodeGenModule &CGM, QualType T) override {
     return NULLPtr;
   }
-  
-  virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
+
+  llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
+                                       bool Weak = false) override {
     return 0;
   }
 };
@@ -595,11 +584,9 @@
   /// arguments.  Returns the IMP for the corresponding method.
   LazyRuntimeFunction MsgLookupSuperFn;
 protected:
-  virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
-                                 llvm::Value *&Receiver,
-                                 llvm::Value *cmd,
-                                 llvm::MDNode *node,
-                                 MessageSendInfo &MSI) {
+  llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver,
+                         llvm::Value *cmd, llvm::MDNode *node,
+                         MessageSendInfo &MSI) override {
     CGBuilderTy &Builder = CGF.Builder;
     llvm::Value *args[] = {
             EnforceType(Builder, Receiver, IdTy),
@@ -608,10 +595,8 @@
     imp->setMetadata(msgSendMDKind, node);
     return imp.getInstruction();
   }
-  virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
-                                      llvm::Value *ObjCSuper,
-                                      llvm::Value *cmd,
-                                      MessageSendInfo &MSI) {
+  llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+                              llvm::Value *cmd, MessageSendInfo &MSI) override {
       CGBuilderTy &Builder = CGF.Builder;
       llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
           PtrToObjCSuperTy), cmd};
@@ -654,13 +639,11 @@
     /// lookup functions.
     llvm::Type *SlotTy;
   public:
-    virtual llvm::Constant *GetEHType(QualType T);
+    llvm::Constant *GetEHType(QualType T) override;
   protected:
-    virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
-                                   llvm::Value *&Receiver,
-                                   llvm::Value *cmd,
-                                   llvm::MDNode *node,
-                                   MessageSendInfo &MSI) {
+    llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver,
+                           llvm::Value *cmd, llvm::MDNode *node,
+                           MessageSendInfo &MSI) override {
       CGBuilderTy &Builder = CGF.Builder;
       llvm::Function *LookupFn = SlotLookupFn;
 
@@ -696,10 +679,9 @@
       Receiver = Builder.CreateLoad(ReceiverPtr, true);
       return imp;
     }
-    virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
-                                        llvm::Value *ObjCSuper,
-                                        llvm::Value *cmd,
-                                        MessageSendInfo &MSI) {
+    llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+                                llvm::Value *cmd,
+                                MessageSendInfo &MSI) override {
       CGBuilderTy &Builder = CGF.Builder;
       llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
 
@@ -760,22 +742,22 @@
       CxxAtomicObjectGetFn.init(&CGM, "objc_getCppObjectAtomic", VoidTy, PtrTy,
           PtrTy, PtrTy, NULL);
     }
-    virtual llvm::Constant *GetCppAtomicObjectGetFunction() {
+    llvm::Constant *GetCppAtomicObjectGetFunction() override {
       // The optimised functions were added in version 1.7 of the GNUstep
       // runtime.
       assert (CGM.getLangOpts().ObjCRuntime.getVersion() >=
           VersionTuple(1, 7));
       return CxxAtomicObjectGetFn;
     }
-    virtual llvm::Constant *GetCppAtomicObjectSetFunction() {
+    llvm::Constant *GetCppAtomicObjectSetFunction() override {
       // The optimised functions were added in version 1.7 of the GNUstep
       // runtime.
       assert (CGM.getLangOpts().ObjCRuntime.getVersion() >=
           VersionTuple(1, 7));
       return CxxAtomicObjectSetFn;
     }
-    virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
-                                                            bool copy) {
+    llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+                                                    bool copy) override {
       // The optimised property functions omit the GC check, and so are not
       // safe to use in GC mode.  The standard functions are fast in GC mode,
       // so there is less advantage in using them.
@@ -789,10 +771,8 @@
         if (copy) return SetPropertyAtomicCopy;
         return SetPropertyAtomic;
       }
-      if (copy) return SetPropertyNonAtomicCopy;
-      return SetPropertyNonAtomic;
 
-      return 0;
+      return copy ? SetPropertyNonAtomicCopy : SetPropertyNonAtomic;
     }
 };
 
@@ -810,11 +790,9 @@
   /// arguments.  Returns the IMP for the corresponding method.
   LazyRuntimeFunction MsgLookupSuperFn, MsgLookupSuperFnSRet;
 
-  virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
-                                 llvm::Value *&Receiver,
-                                 llvm::Value *cmd,
-                                 llvm::MDNode *node,
-                                 MessageSendInfo &MSI) {
+  llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver,
+                         llvm::Value *cmd, llvm::MDNode *node,
+                         MessageSendInfo &MSI) override {
     CGBuilderTy &Builder = CGF.Builder;
     llvm::Value *args[] = {
             EnforceType(Builder, Receiver, IdTy),
@@ -830,10 +808,8 @@
     return imp.getInstruction();
   }
 
-  virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
-                                      llvm::Value *ObjCSuper,
-                                      llvm::Value *cmd,
-                                      MessageSendInfo &MSI) {
+  llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+                              llvm::Value *cmd, MessageSendInfo &MSI) override {
       CGBuilderTy &Builder = CGF.Builder;
       llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
           PtrToObjCSuperTy), cmd};
@@ -844,8 +820,8 @@
         return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
     }
 
-  virtual llvm::Value *GetClassNamed(CodeGenFunction &CGF,
-                                     const std::string &Name, bool isWeak) {
+  llvm::Value *GetClassNamed(CodeGenFunction &CGF,
+                             const std::string &Name, bool isWeak) override {
     if (isWeak)
       return CGObjCGNU::GetClassNamed(CGF, Name, isWeak);
 
@@ -948,7 +924,7 @@
   Int64Ty = llvm::Type::getInt64Ty(VMContext);
 
   IntPtrTy =
-      TheModule.getPointerSize() == llvm::Module::Pointer32 ? Int32Ty : Int64Ty;
+      CGM.getDataLayout().getPointerSizeInBits() == 32 ? Int32Ty : Int64Ty;
 
   // Object type
   QualType UnqualIdTy = CGM.getContext().getObjCIdType();
@@ -1779,24 +1755,22 @@
     PD = Def;
 
   SmallVector<std::string, 16> Protocols;
-  for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
-       E = PD->protocol_end(); PI != E; ++PI)
-    Protocols.push_back((*PI)->getNameAsString());
+  for (const auto *PI : PD->protocols())
+    Protocols.push_back(PI->getNameAsString());
   SmallVector<llvm::Constant*, 16> InstanceMethodNames;
   SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
   SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
   SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
-  for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(),
-       E = PD->instmeth_end(); iter != E; iter++) {
+  for (const auto *I : PD->instance_methods()) {
     std::string TypeStr;
-    Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
-    if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+    Context.getObjCEncodingForMethodDecl(I, TypeStr);
+    if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
       OptionalInstanceMethodNames.push_back(
-          MakeConstantString((*iter)->getSelector().getAsString()));
+          MakeConstantString(I->getSelector().getAsString()));
       OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
     } else {
       InstanceMethodNames.push_back(
-          MakeConstantString((*iter)->getSelector().getAsString()));
+          MakeConstantString(I->getSelector().getAsString()));
       InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
     }
   }
@@ -1805,18 +1779,16 @@
   SmallVector<llvm::Constant*, 16> ClassMethodTypes;
   SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
   SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
-  for (ObjCProtocolDecl::classmeth_iterator
-         iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
-       iter != endIter ; iter++) {
+  for (const auto *I : PD->class_methods()) {
     std::string TypeStr;
-    Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
-    if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+    Context.getObjCEncodingForMethodDecl(I,TypeStr);
+    if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
       OptionalClassMethodNames.push_back(
-          MakeConstantString((*iter)->getSelector().getAsString()));
+          MakeConstantString(I->getSelector().getAsString()));
       OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
     } else {
       ClassMethodNames.push_back(
-          MakeConstantString((*iter)->getSelector().getAsString()));
+          MakeConstantString(I->getSelector().getAsString()));
       ClassMethodTypes.push_back(MakeConstantString(TypeStr));
     }
   }
@@ -1846,11 +1818,8 @@
 
   // Add all of the property methods need adding to the method list and to the
   // property metadata list.
-  for (ObjCContainerDecl::prop_iterator
-         iter = PD->prop_begin(), endIter = PD->prop_end();
-       iter != endIter ; iter++) {
+  for (auto *property : PD->properties()) {
     std::vector<llvm::Constant*> Fields;
-    ObjCPropertyDecl *property = *iter;
 
     Fields.push_back(MakePropertyEncodingString(property, 0));
     PushPropertyAttributes(Fields, property);
@@ -1996,8 +1965,7 @@
 /// bitfield / with the 63rd bit set will be 1<<64.
 llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
   int bitCount = bits.size();
-  int ptrBits =
-        (TheModule.getPointerSize() == llvm::Module::Pointer32) ? 32 : 64;
+  int ptrBits = CGM.getDataLayout().getPointerSizeInBits();
   if (bitCount < ptrBits) {
     uint64_t val = 1;
     for (int i=0 ; i<bitCount ; ++i) {
@@ -2032,24 +2000,20 @@
   // Collect information about instance methods
   SmallVector<Selector, 16> InstanceMethodSels;
   SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
-  for (ObjCCategoryImplDecl::instmeth_iterator
-         iter = OCD->instmeth_begin(), endIter = OCD->instmeth_end();
-       iter != endIter ; iter++) {
-    InstanceMethodSels.push_back((*iter)->getSelector());
+  for (const auto *I : OCD->instance_methods()) {
+    InstanceMethodSels.push_back(I->getSelector());
     std::string TypeStr;
-    CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+    CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr);
     InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
   }
 
   // Collect information about class methods
   SmallVector<Selector, 16> ClassMethodSels;
   SmallVector<llvm::Constant*, 16> ClassMethodTypes;
-  for (ObjCCategoryImplDecl::classmeth_iterator
-         iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
-       iter != endIter ; iter++) {
-    ClassMethodSels.push_back((*iter)->getSelector());
+  for (const auto *I : OCD->class_methods()) {
+    ClassMethodSels.push_back(I->getSelector());
     std::string TypeStr;
-    CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+    CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr);
     ClassMethodTypes.push_back(MakeConstantString(TypeStr));
   }
 
@@ -2093,12 +2057,9 @@
 
   // Add all of the property methods need adding to the method list and to the
   // property metadata list.
-  for (ObjCImplDecl::propimpl_iterator
-         iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
-       iter != endIter ; iter++) {
+  for (auto *propertyImpl : OID->property_impls()) {
     std::vector<llvm::Constant*> Fields;
-    ObjCPropertyDecl *property = iter->getPropertyDecl();
-    ObjCPropertyImplDecl *propertyImpl = *iter;
+    ObjCPropertyDecl *property = propertyImpl->getPropertyDecl();
     bool isSynthesized = (propertyImpl->getPropertyImplementation() == 
         ObjCPropertyImplDecl::Synthesize);
     bool isDynamic = (propertyImpl->getPropertyImplementation() == 
@@ -2265,12 +2226,10 @@
   // Collect information about instance methods
   SmallVector<Selector, 16> InstanceMethodSels;
   SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
-  for (ObjCImplementationDecl::instmeth_iterator
-         iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
-       iter != endIter ; iter++) {
-    InstanceMethodSels.push_back((*iter)->getSelector());
+  for (const auto *I : OID->instance_methods()) {
+    InstanceMethodSels.push_back(I->getSelector());
     std::string TypeStr;
-    Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+    Context.getObjCEncodingForMethodDecl(I,TypeStr);
     InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
   }
 
@@ -2281,22 +2240,16 @@
   // Collect information about class methods
   SmallVector<Selector, 16> ClassMethodSels;
   SmallVector<llvm::Constant*, 16> ClassMethodTypes;
-  for (ObjCImplementationDecl::classmeth_iterator
-         iter = OID->classmeth_begin(), endIter = OID->classmeth_end();
-       iter != endIter ; iter++) {
-    ClassMethodSels.push_back((*iter)->getSelector());
+  for (const auto *I : OID->class_methods()) {
+    ClassMethodSels.push_back(I->getSelector());
     std::string TypeStr;
-    Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+    Context.getObjCEncodingForMethodDecl(I,TypeStr);
     ClassMethodTypes.push_back(MakeConstantString(TypeStr));
   }
   // Collect the names of referenced protocols
   SmallVector<std::string, 16> Protocols;
-  for (ObjCInterfaceDecl::protocol_iterator
-         I = ClassDecl->protocol_begin(),
-         E = ClassDecl->protocol_end(); I != E; ++I)
-    Protocols.push_back((*I)->getNameAsString());
-
-
+  for (const auto *I : ClassDecl->protocols())
+    Protocols.push_back(I->getNameAsString());
 
   // Get the superclass pointer.
   llvm::Constant *SuperClass;
@@ -2595,7 +2548,7 @@
             llvm::Constant::getNullValue(RegisterAlias->getType()));
     Builder.CreateCondBr(HasRegisterAlias, AliasBB, NoAliasBB);
 
-    // The true branch (has alias registration fucntion):
+    // The true branch (has alias registration function):
     Builder.SetInsertPoint(AliasBB);
     // Emit alias registration calls:
     for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin();
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 2b2a5b8..8f5969c 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -29,12 +29,12 @@
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallString.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/InlineAsm.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cstdio>
 
@@ -174,6 +174,7 @@
 public:
   llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
   llvm::Type *Int8PtrTy, *Int8PtrPtrTy;
+  llvm::Type *IvarOffsetVarTy;
 
   /// ObjectPtrTy - LLVM type for object handles (typeof(id))
   llvm::Type *ObjectPtrTy;
@@ -243,8 +244,8 @@
     Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
     Params.push_back(Ctx.BoolTy);
     llvm::FunctionType *FTy =
-      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(IdType, Params,
-                                                    FunctionType::ExtInfo(),
+      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(IdType, false, Params,
+                                                          FunctionType::ExtInfo(),
                                                           RequiredArgs::All));
     return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
   }
@@ -263,8 +264,9 @@
     Params.push_back(Ctx.BoolTy);
     Params.push_back(Ctx.BoolTy);
     llvm::FunctionType *FTy =
-      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
-                                                     FunctionType::ExtInfo(),
+      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
+                                                          Params,
+                                                          FunctionType::ExtInfo(),
                                                           RequiredArgs::All));
     return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
   }
@@ -289,7 +291,8 @@
     Params.push_back(IdType);
     Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
     llvm::FunctionType *FTy =
-    Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+    Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
+                                                        Params,
                                                         FunctionType::ExtInfo(),
                                                         RequiredArgs::All));
     const char *name;
@@ -316,8 +319,9 @@
     Params.push_back(Ctx.BoolTy);
     Params.push_back(Ctx.BoolTy);
     llvm::FunctionType *FTy =
-      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
-                                                     FunctionType::ExtInfo(),
+      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
+                                                          Params,
+                                                          FunctionType::ExtInfo(),
                                                           RequiredArgs::All));
     return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
   }
@@ -335,8 +339,9 @@
     Params.push_back(Ctx.VoidPtrTy);
     Params.push_back(Ctx.VoidPtrTy);
     llvm::FunctionType *FTy =
-      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
-                                                     FunctionType::ExtInfo(),
+      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
+                                                          Params,
+                                                          FunctionType::ExtInfo(),
                                                           RequiredArgs::All));
     return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
   }
@@ -348,8 +353,9 @@
     SmallVector<CanQualType,1> Params;
     Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
     llvm::FunctionType *FTy =
-      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
-                                                      FunctionType::ExtInfo(),
+      Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
+                                                          Params,
+                                                          FunctionType::ExtInfo(),
                                                       RequiredArgs::All));
     return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
   }
@@ -885,6 +891,9 @@
 
   /// DefinedClasses - List of defined classes.
   SmallVector<llvm::GlobalValue*, 16> DefinedClasses;
+  
+  /// ImplementedClasses - List of @implemented classes.
+  SmallVector<const ObjCInterfaceDecl*, 16> ImplementedClasses;
 
   /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
   SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyClasses;
@@ -991,7 +1000,7 @@
     llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
     SmallVectorImpl<llvm::Constant*> &Properties,
     const Decl *Container,
-    const ObjCProtocolDecl *PROTO,
+    const ObjCProtocolDecl *Proto,
     const ObjCCommonTypesHelper &ObjCTypes);
 
   /// GetProtocolRef - Return a reference to the internal protocol
@@ -1038,12 +1047,12 @@
   CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
     CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
 
-  virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
-  
-  virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
-                                         const ObjCContainerDecl *CD=0);
+  llvm::Constant *GenerateConstantString(const StringLiteral *SL) override;
 
-  virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+  llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+                                 const ObjCContainerDecl *CD=0) override;
+
+  void GenerateProtocol(const ObjCProtocolDecl *PD) override;
 
   /// GetOrEmitProtocol - Get the protocol object for the given
   /// declaration, emitting it if necessary. The return value has type
@@ -1055,13 +1064,13 @@
   /// forward references will be filled in with empty bodies if no
   /// definition is seen. The return value has type ProtocolPtrTy.
   virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
-  virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
-                                             const CGBlockInfo &blockInfo);
-  virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
-                                             const CGBlockInfo &blockInfo);
-  
-  virtual llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
-                                           QualType T);
+  llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+                                     const CGBlockInfo &blockInfo) override;
+  llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
+                                     const CGBlockInfo &blockInfo) override;
+
+  llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
+                                   QualType T) override;
 };
 
 class CGObjCMac : public CGObjCCommonMac {
@@ -1092,9 +1101,9 @@
   
   llvm::Value *EmitClassRefFromId(CodeGenFunction &CGF,
                                   IdentifierInfo *II);
-  
-  llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF);
-  
+
+  llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override;
+
   /// EmitSuperClassRef - Emits reference to class's main metadata class.
   llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
 
@@ -1145,13 +1154,13 @@
   /// GetOrEmitProtocol - Get the protocol object for the given
   /// declaration, emitting it if necessary. The return value has type
   /// ProtocolPtrTy.
-  virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+  llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) override;
 
   /// GetOrEmitProtocolRef - Get a forward reference to the protocol
   /// object for the given declaration, emitting it if needed. These
   /// forward references will be filled in with empty bodies if no
   /// definition is seen. The return value has type ProtocolPtrTy.
-  virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+  llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) override;
 
   /// EmitProtocolExtension - Generate the protocol extension
   /// structure used to store optional instance and class methods, and
@@ -1177,97 +1186,90 @@
 public:
   CGObjCMac(CodeGen::CodeGenModule &cgm);
 
-  virtual llvm::Function *ModuleInitFunction();
+  llvm::Function *ModuleInitFunction() override;
 
-  virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
-                                              ReturnValueSlot Return,
-                                              QualType ResultType,
-                                              Selector Sel,
-                                              llvm::Value *Receiver,
-                                              const CallArgList &CallArgs,
-                                              const ObjCInterfaceDecl *Class,
-                                              const ObjCMethodDecl *Method);
+  CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                                      ReturnValueSlot Return,
+                                      QualType ResultType,
+                                      Selector Sel, llvm::Value *Receiver,
+                                      const CallArgList &CallArgs,
+                                      const ObjCInterfaceDecl *Class,
+                                      const ObjCMethodDecl *Method) override;
 
-  virtual CodeGen::RValue
+  CodeGen::RValue
   GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
-                           ReturnValueSlot Return,
-                           QualType ResultType,
-                           Selector Sel,
-                           const ObjCInterfaceDecl *Class,
-                           bool isCategoryImpl,
-                           llvm::Value *Receiver,
-                           bool IsClassMessage,
-                           const CallArgList &CallArgs,
-                           const ObjCMethodDecl *Method);
+                           ReturnValueSlot Return, QualType ResultType,
+                           Selector Sel, const ObjCInterfaceDecl *Class,
+                           bool isCategoryImpl, llvm::Value *Receiver,
+                           bool IsClassMessage, const CallArgList &CallArgs,
+                           const ObjCMethodDecl *Method) override;
 
-  virtual llvm::Value *GetClass(CodeGenFunction &CGF,
-                                const ObjCInterfaceDecl *ID);
+  llvm::Value *GetClass(CodeGenFunction &CGF,
+                        const ObjCInterfaceDecl *ID) override;
 
-  virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel, 
-                                   bool lval = false);
+  llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
+                           bool lval = false) override;
 
   /// The NeXT/Apple runtimes do not support typed selectors; just emit an
   /// untyped one.
-  virtual llvm::Value *GetSelector(CodeGenFunction &CGF,
-                                   const ObjCMethodDecl *Method);
+  llvm::Value *GetSelector(CodeGenFunction &CGF,
+                           const ObjCMethodDecl *Method) override;
 
-  virtual llvm::Constant *GetEHType(QualType T);
+  llvm::Constant *GetEHType(QualType T) override;
 
-  virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+  void GenerateCategory(const ObjCCategoryImplDecl *CMD) override;
 
-  virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+  void GenerateClass(const ObjCImplementationDecl *ClassDecl) override;
 
-  virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+  void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override {}
 
-  virtual llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
-                                           const ObjCProtocolDecl *PD);
+  llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
+                                   const ObjCProtocolDecl *PD) override;
 
-  virtual llvm::Constant *GetPropertyGetFunction();
-  virtual llvm::Constant *GetPropertySetFunction();
-  virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic, 
-                                                          bool copy);
-  virtual llvm::Constant *GetGetStructFunction();
-  virtual llvm::Constant *GetSetStructFunction();
-  virtual llvm::Constant *GetCppAtomicObjectGetFunction();
-  virtual llvm::Constant *GetCppAtomicObjectSetFunction();
-  virtual llvm::Constant *EnumerationMutationFunction();
+  llvm::Constant *GetPropertyGetFunction() override;
+  llvm::Constant *GetPropertySetFunction() override;
+  llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+                                                  bool copy) override;
+  llvm::Constant *GetGetStructFunction() override;
+  llvm::Constant *GetSetStructFunction() override;
+  llvm::Constant *GetCppAtomicObjectGetFunction() override;
+  llvm::Constant *GetCppAtomicObjectSetFunction() override;
+  llvm::Constant *EnumerationMutationFunction() override;
 
-  virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
-                           const ObjCAtTryStmt &S);
-  virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
-                                    const ObjCAtSynchronizedStmt &S);
+  void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+                   const ObjCAtTryStmt &S) override;
+  void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                            const ObjCAtSynchronizedStmt &S) override;
   void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S);
-  virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
-                             const ObjCAtThrowStmt &S,
-                             bool ClearInsertionPoint=true);
-  virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
-                                         llvm::Value *AddrWeakObj);
-  virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
-                                  llvm::Value *src, llvm::Value *dst);
-  virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
-                                    llvm::Value *src, llvm::Value *dest,
-                                    bool threadlocal = false);
-  virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
-                                  llvm::Value *src, llvm::Value *dest,
-                                  llvm::Value *ivarOffset);
-  virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
-                                        llvm::Value *src, llvm::Value *dest);
-  virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
-                                        llvm::Value *dest, llvm::Value *src,
-                                        llvm::Value *size);
+  void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S,
+                     bool ClearInsertionPoint=true) override;
+  llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+                                 llvm::Value *AddrWeakObj) override;
+  void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                          llvm::Value *src, llvm::Value *dst) override;
+  void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                            llvm::Value *src, llvm::Value *dest,
+                            bool threadlocal = false) override;
+  void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                          llvm::Value *src, llvm::Value *dest,
+                          llvm::Value *ivarOffset) override;
+  void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+                                llvm::Value *src, llvm::Value *dest) override;
+  void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+                                llvm::Value *dest, llvm::Value *src,
+                                llvm::Value *size) override;
 
-  virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
-                                      QualType ObjectTy,
-                                      llvm::Value *BaseValue,
-                                      const ObjCIvarDecl *Ivar,
-                                      unsigned CVRQualifiers);
-  virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
-                                      const ObjCInterfaceDecl *Interface,
-                                      const ObjCIvarDecl *Ivar);
-  
+  LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy,
+                              llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
+                              unsigned CVRQualifiers) override;
+  llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+                              const ObjCInterfaceDecl *Interface,
+                              const ObjCIvarDecl *Ivar) override;
+
   /// GetClassGlobal - Return the global variable for the Objective-C
   /// class of the given name.
-  virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
+  llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
+                                       bool Weak = false) override {
     llvm_unreachable("CGObjCMac::GetClassGlobal");
   }
 };
@@ -1316,7 +1318,8 @@
                                             llvm::Constant *IsAGV,
                                             llvm::Constant *SuperClassGV,
                                             llvm::Constant *ClassRoGV,
-                                            bool HiddenVisibility);
+                                            bool HiddenVisibility,
+                                            bool Weak);
 
   llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
 
@@ -1341,13 +1344,13 @@
   /// GetOrEmitProtocol - Get the protocol object for the given
   /// declaration, emitting it if necessary. The return value has type
   /// ProtocolPtrTy.
-  virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+  llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) override;
 
   /// GetOrEmitProtocolRef - Get a forward reference to the protocol
   /// object for the given declaration, emitting it if needed. These
   /// forward references will be filled in with empty bodies if no
   /// definition is seen. The return value has type ProtocolPtrTy.
-  virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+  llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) override;
 
   /// EmitProtocolList - Generate the list of referenced
   /// protocols. The return value has type ProtocolListPtrTy.
@@ -1367,17 +1370,18 @@
   
   /// GetClassGlobal - Return the global variable for the Objective-C
   /// class of the given name.
-  llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
-    
+  llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
+                                       bool Weak = false) override;
+
   /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
   /// for the given class reference.
   llvm::Value *EmitClassRef(CodeGenFunction &CGF,
                             const ObjCInterfaceDecl *ID);
   
   llvm::Value *EmitClassRefFromId(CodeGenFunction &CGF,
-                                  IdentifierInfo *II);
-  
-  llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF);
+                                  IdentifierInfo *II, bool Weak);
+
+  llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override;
 
   /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
   /// for the given super class reference.
@@ -1434,11 +1438,10 @@
   bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
 
   bool IsIvarOffsetKnownIdempotent(const CodeGen::CodeGenFunction &CGF,
-                                   const ObjCInterfaceDecl *ID,
                                    const ObjCIvarDecl *IV) {
-    // Annotate the load as an invariant load iff the object type is the type,
-    // or a derived type, of the class containing the ivar within an ObjC
-    // method.  This check is needed because the ivar offset is a lazily
+    // Annotate the load as an invariant load iff inside an instance method
+    // and ivar belongs to instance method's class and one of its super class.
+    // This check is needed because the ivar offset is a lazily
     // initialised value that may depend on objc_msgSend to perform a fixup on
     // the first message dispatch.
     //
@@ -1446,121 +1449,115 @@
     // base of the ivar access is a parameter to an Objective C method.
     // However, because the parameters are not available in the current
     // interface, we cannot perform this check.
-    if (CGF.CurFuncDecl && isa<ObjCMethodDecl>(CGF.CurFuncDecl))
-      if (IV->getContainingInterface()->isSuperClassOf(ID))
-        return true;
+    if (const ObjCMethodDecl *MD =
+          dyn_cast_or_null<ObjCMethodDecl>(CGF.CurFuncDecl))
+      if (MD->isInstanceMethod())
+        if (const ObjCInterfaceDecl *ID = MD->getClassInterface())
+          return IV->getContainingInterface()->isSuperClassOf(ID);
     return false;
   }
 
 public:
   CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
   // FIXME. All stubs for now!
-  virtual llvm::Function *ModuleInitFunction();
+  llvm::Function *ModuleInitFunction() override;
 
-  virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
-                                              ReturnValueSlot Return,
-                                              QualType ResultType,
-                                              Selector Sel,
-                                              llvm::Value *Receiver,
-                                              const CallArgList &CallArgs,
-                                              const ObjCInterfaceDecl *Class,
-                                              const ObjCMethodDecl *Method);
+  CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                                      ReturnValueSlot Return,
+                                      QualType ResultType, Selector Sel,
+                                      llvm::Value *Receiver,
+                                      const CallArgList &CallArgs,
+                                      const ObjCInterfaceDecl *Class,
+                                      const ObjCMethodDecl *Method) override;
 
-  virtual CodeGen::RValue
+  CodeGen::RValue
   GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
-                           ReturnValueSlot Return,
-                           QualType ResultType,
-                           Selector Sel,
-                           const ObjCInterfaceDecl *Class,
-                           bool isCategoryImpl,
-                           llvm::Value *Receiver,
-                           bool IsClassMessage,
-                           const CallArgList &CallArgs,
-                           const ObjCMethodDecl *Method);
+                           ReturnValueSlot Return, QualType ResultType,
+                           Selector Sel, const ObjCInterfaceDecl *Class,
+                           bool isCategoryImpl, llvm::Value *Receiver,
+                           bool IsClassMessage, const CallArgList &CallArgs,
+                           const ObjCMethodDecl *Method) override;
 
-  virtual llvm::Value *GetClass(CodeGenFunction &CGF,
-                                const ObjCInterfaceDecl *ID);
+  llvm::Value *GetClass(CodeGenFunction &CGF,
+                        const ObjCInterfaceDecl *ID) override;
 
-  virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
-                                   bool lvalue = false)
+  llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
+                           bool lvalue = false) override
     { return EmitSelector(CGF, Sel, lvalue); }
 
   /// The NeXT/Apple runtimes do not support typed selectors; just emit an
   /// untyped one.
-  virtual llvm::Value *GetSelector(CodeGenFunction &CGF,
-                                   const ObjCMethodDecl *Method)
+  llvm::Value *GetSelector(CodeGenFunction &CGF,
+                           const ObjCMethodDecl *Method) override
     { return EmitSelector(CGF, Method->getSelector()); }
 
-  virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+  void GenerateCategory(const ObjCCategoryImplDecl *CMD) override;
 
-  virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+  void GenerateClass(const ObjCImplementationDecl *ClassDecl) override;
 
-  virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+  void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override {}
 
-  virtual llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
-                                           const ObjCProtocolDecl *PD);
+  llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
+                                   const ObjCProtocolDecl *PD) override;
 
-  virtual llvm::Constant *GetEHType(QualType T);
+  llvm::Constant *GetEHType(QualType T) override;
 
-  virtual llvm::Constant *GetPropertyGetFunction() {
+  llvm::Constant *GetPropertyGetFunction() override {
     return ObjCTypes.getGetPropertyFn();
   }
-  virtual llvm::Constant *GetPropertySetFunction() {
+  llvm::Constant *GetPropertySetFunction() override {
     return ObjCTypes.getSetPropertyFn();
   }
-  
-  virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic, 
-                                                          bool copy) {
+
+  llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+                                                  bool copy) override {
     return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
   }
-  
-  virtual llvm::Constant *GetSetStructFunction() {
+
+  llvm::Constant *GetSetStructFunction() override {
     return ObjCTypes.getCopyStructFn();
   }
-  virtual llvm::Constant *GetGetStructFunction() {
+  llvm::Constant *GetGetStructFunction() override {
     return ObjCTypes.getCopyStructFn();
   }
-  virtual llvm::Constant *GetCppAtomicObjectSetFunction() {
+  llvm::Constant *GetCppAtomicObjectSetFunction() override {
     return ObjCTypes.getCppAtomicObjectFunction();
   }
-  virtual llvm::Constant *GetCppAtomicObjectGetFunction() {
+  llvm::Constant *GetCppAtomicObjectGetFunction() override {
     return ObjCTypes.getCppAtomicObjectFunction();
   }
-  
-  virtual llvm::Constant *EnumerationMutationFunction() {
+
+  llvm::Constant *EnumerationMutationFunction() override {
     return ObjCTypes.getEnumerationMutationFn();
   }
 
-  virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
-                           const ObjCAtTryStmt &S);
-  virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
-                                    const ObjCAtSynchronizedStmt &S);
-  virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
-                             const ObjCAtThrowStmt &S,
-                             bool ClearInsertionPoint=true);
-  virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
-                                         llvm::Value *AddrWeakObj);
-  virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
-                                  llvm::Value *src, llvm::Value *dst);
-  virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
-                                    llvm::Value *src, llvm::Value *dest,
-                                    bool threadlocal = false);
-  virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
-                                  llvm::Value *src, llvm::Value *dest,
-                                  llvm::Value *ivarOffset);
-  virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
-                                        llvm::Value *src, llvm::Value *dest);
-  virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
-                                        llvm::Value *dest, llvm::Value *src,
-                                        llvm::Value *size);
-  virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
-                                      QualType ObjectTy,
-                                      llvm::Value *BaseValue,
-                                      const ObjCIvarDecl *Ivar,
-                                      unsigned CVRQualifiers);
-  virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
-                                      const ObjCInterfaceDecl *Interface,
-                                      const ObjCIvarDecl *Ivar);
+  void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+                   const ObjCAtTryStmt &S) override;
+  void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                            const ObjCAtSynchronizedStmt &S) override;
+  void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S,
+                     bool ClearInsertionPoint=true) override;
+  llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+                                 llvm::Value *AddrWeakObj) override;
+  void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                          llvm::Value *src, llvm::Value *dst) override;
+  void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                            llvm::Value *src, llvm::Value *dest,
+                            bool threadlocal = false) override;
+  void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                          llvm::Value *src, llvm::Value *dest,
+                          llvm::Value *ivarOffset) override;
+  void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+                                llvm::Value *src, llvm::Value *dest) override;
+  void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+                                llvm::Value *dest, llvm::Value *src,
+                                llvm::Value *size) override;
+  LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy,
+                              llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
+                              unsigned CVRQualifiers) override;
+  llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+                              const ObjCInterfaceDecl *Interface,
+                              const ObjCIvarDecl *Ivar) override;
 };
 
 /// A helper class for performing the null-initialization of a return
@@ -1878,14 +1875,14 @@
   MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
 
   if (Method)
-    assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
-           CGM.getContext().getCanonicalType(ResultType) &&
+    assert(CGM.getContext().getCanonicalType(Method->getReturnType()) ==
+               CGM.getContext().getCanonicalType(ResultType) &&
            "Result type mismatch!");
 
   NullReturnState nullReturn;
 
   llvm::Constant *Fn = NULL;
-  if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+  if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
     if (!IsSuper) nullReturn.init(CGF, Arg0);
     Fn = (ObjCABI == 2) ?  ObjCTypes.getSendStretFn2(IsSuper)
       : ObjCTypes.getSendStretFn(IsSuper);
@@ -1896,15 +1893,17 @@
     Fn = (ObjCABI == 2) ? ObjCTypes.getSendFp2RetFn2(IsSuper)
       : ObjCTypes.getSendFp2retFn(IsSuper);
   } else {
+    // arm64 uses objc_msgSend for stret methods and yet null receiver check
+    // must be made for it.
+    if (!IsSuper && CGM.ReturnTypeUsesSRet(MSI.CallInfo))
+      nullReturn.init(CGF, Arg0);
     Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
       : ObjCTypes.getSendFn(IsSuper);
   }
   
   bool requiresnullCheck = false;
   if (CGM.getLangOpts().ObjCAutoRefCount && Method)
-    for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
-         e = Method->param_end(); i != e; ++i) {
-      const ParmVarDecl *ParamDecl = (*i);
+    for (const auto *ParamDecl : Method->params()) {
       if (ParamDecl->hasAttr<NSConsumedAttr>()) {
         if (!nullReturn.NullBB)
           nullReturn.init(CGF, Arg0);
@@ -1967,9 +1966,8 @@
   // to be GC'ed.
 
   // Walk the captured variables.
-  for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
-         ce = blockDecl->capture_end(); ci != ce; ++ci) {
-    const VarDecl *variable = ci->getVariable();
+  for (const auto &CI : blockDecl->captures()) {
+    const VarDecl *variable = CI.getVariable();
     QualType type = variable->getType();
 
     const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
@@ -1980,7 +1978,7 @@
     uint64_t fieldOffset = layout->getElementOffset(capture.getIndex());
 
     // __block variables are passed by their descriptor address.
-    if (ci->isByRef()) {
+    if (CI.isByRef()) {
       IvarsInfo.push_back(GC_IVAR(fieldOffset, /*size in words*/ 1));
       continue;
     }
@@ -2118,9 +2116,6 @@
         ElCount *= CArray->getSize().getZExtValue();
         FQT = CArray->getElementType();
       }
-      
-      assert(!FQT->isUnionType() &&
-             "layout for array of unions not supported");
       if (FQT->isRecordType() && ElCount) {
         int OldIndex = RunSkipBlockVars.size() - 1;
         const RecordType *RT = FQT->getAs<RecordType>();
@@ -2196,10 +2191,7 @@
                                                   bool &HasUnion,
                                                   bool ByrefLayout) {
   const RecordDecl *RD = RT->getDecl();
-  SmallVector<const FieldDecl*, 16> Fields;
-  for (RecordDecl::field_iterator i = RD->field_begin(),
-       e = RD->field_end(); i != e; ++i)
-    Fields.push_back(*i);
+  SmallVector<const FieldDecl*, 16> Fields(RD->fields());
   llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
   const llvm::StructLayout *RecLayout =
     CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty));
@@ -2486,9 +2478,8 @@
                            blockInfo.BlockHeaderForcedGapOffset,
                            blockInfo.BlockHeaderForcedGapSize);
   // Walk the captured variables.
-  for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
-       ce = blockDecl->capture_end(); ci != ce; ++ci) {
-    const VarDecl *variable = ci->getVariable();
+  for (const auto &CI : blockDecl->captures()) {
+    const VarDecl *variable = CI.getVariable();
     QualType type = variable->getType();
     
     const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
@@ -2500,17 +2491,17 @@
        CharUnits::fromQuantity(layout->getElementOffset(capture.getIndex()));
     
     assert(!type->isArrayType() && "array variable should not be caught");
-    if (!ci->isByRef())
+    if (!CI.isByRef())
       if (const RecordType *record = type->getAs<RecordType>()) {
         BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion);
         continue;
       }
     CharUnits fieldSize;
-    if (ci->isByRef())
+    if (CI.isByRef())
       fieldSize = CharUnits::fromQuantity(WordSizeInBytes);
     else
       fieldSize = CGM.getContext().getTypeSizeInChars(type);
-    UpdateRunSkipBlockVars(ci->isByRef(), getBlockCaptureLifetime(type, false),
+    UpdateRunSkipBlockVars(CI.isByRef(), getBlockCaptureLifetime(type, false),
                            fieldOffset, fieldSize);
   }
   return getBitmapBlockLayout(false);
@@ -2562,8 +2553,16 @@
   return GetOrEmitProtocolRef(PD);
 }
 
+static void assertPrivateName(const llvm::GlobalValue *GV) {
+  StringRef NameRef = GV->getName();
+  (void)NameRef;
+  assert(NameRef[0] == '\01' && (NameRef[1] == 'L' || NameRef[1] == 'l'));
+  assert(GV->getVisibility() == llvm::GlobalValue::DefaultVisibility);
+  assert(GV->getLinkage() == llvm::GlobalValue::PrivateLinkage);
+}
+
 /*
-// APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions
+// Objective-C 1.0 extensions
 struct _objc_protocol {
 struct _objc_protocol_extension *isa;
 char *protocol_name;
@@ -2593,9 +2592,7 @@
   std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
   std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
   std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
-  for (ObjCProtocolDecl::instmeth_iterator
-         i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
-    ObjCMethodDecl *MD = *i;
+  for (const auto *MD : PD->instance_methods()) {
     llvm::Constant *C = GetMethodDescriptionConstant(MD);
     if (!C)
       return GetOrEmitProtocolRef(PD);
@@ -2609,9 +2606,7 @@
     }
   }
 
-  for (ObjCProtocolDecl::classmeth_iterator
-         i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
-    ObjCMethodDecl *MD = *i;
+  for (const auto *MD : PD->class_methods()) {
     llvm::Constant *C = GetMethodDescriptionConstant(MD);
     if (!C)
       return GetOrEmitProtocolRef(PD);
@@ -2646,13 +2641,13 @@
                                                    Values);
 
   if (Entry) {
-    // Already created, fix the linkage and update the initializer.
-    Entry->setLinkage(llvm::GlobalValue::InternalLinkage);
+    // Already created, update the initializer.
+    assert(Entry->getLinkage() == llvm::GlobalValue::PrivateLinkage);
     Entry->setInitializer(Init);
   } else {
     Entry =
       new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
-                               llvm::GlobalValue::InternalLinkage,
+                               llvm::GlobalValue::PrivateLinkage,
                                Init,
                                "\01L_OBJC_PROTOCOL_" + PD->getName());
     Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
@@ -2661,7 +2656,8 @@
 
     Protocols[PD->getIdentifier()] = Entry;
   }
-  CGM.AddUsedGlobal(Entry);
+  assertPrivateName(Entry);
+  CGM.addCompilerUsedGlobal(Entry);
 
   return Entry;
 }
@@ -2675,13 +2671,14 @@
     // contents for protocols which were referenced but never defined.
     Entry =
       new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
-                               llvm::GlobalValue::ExternalLinkage,
+                               llvm::GlobalValue::PrivateLinkage,
                                0,
                                "\01L_OBJC_PROTOCOL_" + PD->getName());
     Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
     // FIXME: Is this necessary? Why only for protocol?
     Entry->setAlignment(4);
   }
+  assertPrivateName(Entry);
 
   return Entry;
 }
@@ -2726,7 +2723,7 @@
     llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
 
   // No special section, but goes in llvm.used
-  return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getName(),
+  return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(),
                            Init,
                            0, 0, true);
 }
@@ -2775,14 +2772,11 @@
 PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
                        SmallVectorImpl<llvm::Constant *> &Properties,
                        const Decl *Container,
-                       const ObjCProtocolDecl *PROTO,
+                       const ObjCProtocolDecl *Proto,
                        const ObjCCommonTypesHelper &ObjCTypes) {
-  for (ObjCProtocolDecl::protocol_iterator P = PROTO->protocol_begin(),
-         E = PROTO->protocol_end(); P != E; ++P) 
-    PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
-  for (ObjCContainerDecl::prop_iterator I = PROTO->prop_begin(),
-       E = PROTO->prop_end(); I != E; ++I) {
-    const ObjCPropertyDecl *PD = *I;
+  for (const auto *P : Proto->protocols()) 
+    PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
+  for (const auto *PD : Proto->properties()) {
     if (!PropertySet.insert(PD->getIdentifier()))
       continue;
     llvm::Constant *Prop[] = {
@@ -2811,9 +2805,7 @@
                                        const ObjCCommonTypesHelper &ObjCTypes) {
   SmallVector<llvm::Constant *, 16> Properties;
   llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
-  for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(),
-         E = OCD->prop_end(); I != E; ++I) {
-    const ObjCPropertyDecl *PD = *I;
+  for (const auto *PD : OCD->properties()) {
     PropertySet.insert(PD->getIdentifier());
     llvm::Constant *Prop[] = {
       GetPropertyName(PD->getIdentifier()),
@@ -2823,17 +2815,12 @@
                                                    Prop));
   }
   if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
-    for (ObjCInterfaceDecl::all_protocol_iterator
-         P = OID->all_referenced_protocol_begin(),
-         E = OID->all_referenced_protocol_end(); P != E; ++P)
-      PushProtocolProperties(PropertySet, Properties, Container, (*P), 
-                             ObjCTypes);
+    for (const auto *P : OID->all_referenced_protocols())
+      PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
   }
   else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
-    for (ObjCCategoryDecl::protocol_iterator P = CD->protocol_begin(),
-         E = CD->protocol_end(); P != E; ++P)
-      PushProtocolProperties(PropertySet, Properties, Container, (*P), 
-                             ObjCTypes);
+    for (const auto *P : CD->protocols())
+      PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
   }
 
   // Return null for empty list.
@@ -2945,16 +2932,13 @@
                                      << OCD->getName();
 
   SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods;
-  for (ObjCCategoryImplDecl::instmeth_iterator
-         i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+  for (const auto *I : OCD->instance_methods())
     // Instance methods should always be defined.
-    InstanceMethods.push_back(GetMethodConstant(*i));
-  }
-  for (ObjCCategoryImplDecl::classmeth_iterator
-         i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+    InstanceMethods.push_back(GetMethodConstant(I));
+
+  for (const auto *I : OCD->class_methods())
     // Class methods should always be defined.
-    ClassMethods.push_back(GetMethodConstant(*i));
-  }
+    ClassMethods.push_back(GetMethodConstant(I));
 
   llvm::Constant *Values[7];
   Values[0] = GetClassName(OCD->getIdentifier());
@@ -3073,21 +3057,15 @@
     Flags |= FragileABI_Class_Hidden;
 
   SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods;
-  for (ObjCImplementationDecl::instmeth_iterator
-         i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+  for (const auto *I : ID->instance_methods())
     // Instance methods should always be defined.
-    InstanceMethods.push_back(GetMethodConstant(*i));
-  }
-  for (ObjCImplementationDecl::classmeth_iterator
-         i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+    InstanceMethods.push_back(GetMethodConstant(I));
+
+  for (const auto *I : ID->class_methods())
     // Class methods should always be defined.
-    ClassMethods.push_back(GetMethodConstant(*i));
-  }
+    ClassMethods.push_back(GetMethodConstant(I));
 
-  for (ObjCImplementationDecl::propimpl_iterator
-         i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
-    ObjCPropertyImplDecl *PID = *i;
-
+  for (const auto *PID : ID->property_impls()) {
     if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
       ObjCPropertyDecl *PD = PID->getPropertyDecl();
 
@@ -3133,19 +3111,19 @@
   Name += ClassName;
   const char *Section = "__OBJC,__class,regular,no_dead_strip";
   // Check for a forward reference.
-  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
   if (GV) {
     assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
            "Forward metaclass reference has incorrect type.");
-    GV->setLinkage(llvm::GlobalValue::InternalLinkage);
     GV->setInitializer(Init);
     GV->setSection(Section);
     GV->setAlignment(4);
-    CGM.AddUsedGlobal(GV);
-  } 
-  else
+    CGM.addCompilerUsedGlobal(GV);
+  } else
     GV = CreateMetadataVar(Name, Init, Section, 4, true);
+  assertPrivateName(GV);
   DefinedClasses.push_back(GV);
+  ImplementedClasses.push_back(Interface);
   // method definition entries must be clear for next implementation.
   MethodDefinitions.clear();
 }
@@ -3201,20 +3179,20 @@
   Name += ID->getName();
 
   // Check for a forward reference.
-  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
   if (GV) {
     assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
            "Forward metaclass reference has incorrect type.");
-    GV->setLinkage(llvm::GlobalValue::InternalLinkage);
     GV->setInitializer(Init);
   } else {
     GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
-                                  llvm::GlobalValue::InternalLinkage,
+                                  llvm::GlobalValue::PrivateLinkage,
                                   Init, Name);
   }
+  assertPrivateName(GV);
   GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
   GV->setAlignment(4);
-  CGM.AddUsedGlobal(GV);
+  CGM.addCompilerUsedGlobal(GV);
 
   return GV;
 }
@@ -3230,35 +3208,29 @@
   // Check for an existing forward reference.
   // Previously, metaclass with internal linkage may have been defined.
   // pass 'true' as 2nd argument so it is returned.
-  if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
-                                                                   true)) {
-    assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
-           "Forward metaclass reference has incorrect type.");
-    return GV;
-  } else {
-    // Generate as an external reference to keep a consistent
-    // module. This will be patched up when we emit the metaclass.
-    return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
-                                    llvm::GlobalValue::ExternalLinkage,
-                                    0,
-                                    Name);
-  }
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+  if (!GV)
+    GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+                                  llvm::GlobalValue::PrivateLinkage, 0, Name);
+
+  assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+         "Forward metaclass reference has incorrect type.");
+  assertPrivateName(GV);
+  return GV;
 }
 
 llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
   std::string Name = "\01L_OBJC_CLASS_" + ID->getNameAsString();
-  
-  if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
-                                                                   true)) {
-    assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
-           "Forward class metadata reference has incorrect type.");
-    return GV;
-  } else {
-    return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
-                                    llvm::GlobalValue::ExternalLinkage,
-                                    0,
-                                    Name);
-  }
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+
+  if (!GV)
+    GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+                                  llvm::GlobalValue::PrivateLinkage, 0, Name);
+
+  assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+         "Forward class metadata reference has incorrect type.");
+  assertPrivateName(GV);
+  return GV;
 }
 
 /*
@@ -3430,13 +3402,14 @@
   llvm::Type *Ty = Init->getType();
   llvm::GlobalVariable *GV =
     new llvm::GlobalVariable(CGM.getModule(), Ty, false,
-                             llvm::GlobalValue::InternalLinkage, Init, Name);
+                             llvm::GlobalValue::PrivateLinkage, Init, Name);
+  assertPrivateName(GV);
   if (Section)
     GV->setSection(Section);
   if (Align)
     GV->setAlignment(Align);
   if (AddToUsed)
-    CGM.AddUsedGlobal(GV);
+    CGM.addCompilerUsedGlobal(GV);
   return GV;
 }
 
@@ -3501,7 +3474,7 @@
       : S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
         ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       // Check whether we need to call objc_exception_try_exit.
       // In optimized code, this branch will always be folded.
       llvm::BasicBlock *FinallyCallExit =
@@ -4293,14 +4266,14 @@
 ///   unsigned flags;
 /// };
 enum ImageInfoFlags {
-  eImageInfo_FixAndContinue      = (1 << 0),
+  eImageInfo_FixAndContinue      = (1 << 0), // This flag is no longer set by clang.
   eImageInfo_GarbageCollected    = (1 << 1),
   eImageInfo_GCOnly              = (1 << 2),
-  eImageInfo_OptimizedByDyld     = (1 << 3), // FIXME: When is this set.
+  eImageInfo_OptimizedByDyld     = (1 << 3), // This flag is set by the dyld shared cache.
 
   // A flag indicating that the module has no instances of a @synthesize of a
   // superclass variable. <rdar://problem/6803242>
-  eImageInfo_CorrectedSynthesize = (1 << 4),
+  eImageInfo_CorrectedSynthesize = (1 << 4), // This flag is no longer set by clang.
   eImageInfo_ImageIsSimulated    = (1 << 5)
 };
 
@@ -4399,9 +4372,17 @@
   // The runtime expects exactly the list of defined classes followed
   // by the list of defined categories, in a single array.
   SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories);
-  for (unsigned i=0; i<NumClasses; i++)
+  for (unsigned i=0; i<NumClasses; i++) {
+    const ObjCInterfaceDecl *ID = ImplementedClasses[i];
+    assert(ID);
+    if (ObjCImplementationDecl *IMP = ID->getImplementation())
+      // We are implementing a weak imported interface. Give it external linkage
+      if (ID->isWeakImported() && !IMP->isWeakImported())
+        DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+    
     Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
                                                 ObjCTypes.Int8PtrTy);
+  }
   for (unsigned i=0; i<NumCategories; i++)
     Symbols[NumClasses + i] =
       llvm::ConstantExpr::getBitCast(DefinedCategories[i],
@@ -4507,10 +4488,7 @@
                                                 bool &HasUnion) {
   const RecordDecl *RD = RT->getDecl();
   // FIXME - Use iterator.
-  SmallVector<const FieldDecl*, 16> Fields;
-  for (RecordDecl::field_iterator i = RD->field_begin(),
-                                  e = RD->field_end(); i != e; ++i)
-    Fields.push_back(*i);
+  SmallVector<const FieldDecl*, 16> Fields(RD->fields());
   llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
   const llvm::StructLayout *RecLayout =
     CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty));
@@ -4589,9 +4567,6 @@
         ElCount *= CArray->getSize().getZExtValue();
         FQT = CArray->getElementType();
       }
-
-      assert(!FQT->isUnionType() &&
-             "layout for array of unions not supported");
       if (FQT->isRecordType() && ElCount) {
         int OldIndex = IvarsInfo.size() - 1;
         int OldSkIndex = SkipIvars.size() -1;
@@ -4996,10 +4971,10 @@
     Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
     Values[3] = Values[4] =
       llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
-    I->second->setLinkage(llvm::GlobalValue::InternalLinkage);
+    assertPrivateName(I->second);
     I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
                                                         Values));
-    CGM.AddUsedGlobal(I->second);
+    CGM.addCompilerUsedGlobal(I->second);
   }
 
   // Add assembler directives to add lazy undefined symbol references
@@ -5054,6 +5029,13 @@
   Int8PtrTy = CGM.Int8PtrTy;
   Int8PtrPtrTy = CGM.Int8PtrPtrTy;
 
+  // arm64 targets use "int" ivar offset variables. All others,
+  // including OS X x86_64 and Windows x86_64, use "long" ivar offsets.
+  if (CGM.getTarget().getTriple().getArch() == llvm::Triple::arm64)
+    IvarOffsetVarTy = IntTy;
+  else
+    IvarOffsetVarTy = LongTy;
+
   ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
   PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
   SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
@@ -5357,16 +5339,15 @@
   ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
 
   // struct _ivar_t {
-  //   unsigned long int *offset;  // pointer to ivar offset location
+  //   unsigned [long] int *offset;  // pointer to ivar offset location
   //   char *name;
   //   char *type;
   //   uint32_t alignment;
   //   uint32_t size;
   // }
-  IvarnfABITy =
-    llvm::StructType::create("struct._ivar_t",
-                             llvm::PointerType::getUnqual(LongTy),
-                             Int8PtrTy, Int8PtrTy, IntTy, IntTy, NULL);
+  IvarnfABITy = llvm::StructType::create(
+      "struct._ivar_t", llvm::PointerType::getUnqual(IvarOffsetVarTy),
+      Int8PtrTy, Int8PtrTy, IntTy, IntTy, NULL);
 
   // struct _ivar_list_t {
   //   uint32 entsize;  // sizeof(struct _ivar_t)
@@ -5521,12 +5502,13 @@
 
   llvm::GlobalVariable *GV =
     new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
-                             llvm::GlobalValue::InternalLinkage,
+                             llvm::GlobalValue::PrivateLinkage,
                              Init,
                              SymbolName);
+  assertPrivateName(GV);
   GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
   GV->setSection(SectionName);
-  CGM.AddUsedGlobal(GV);
+  CGM.addCompilerUsedGlobal(GV);
 }
 
 void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
@@ -5534,24 +5516,20 @@
 
   // Build list of all implemented class addresses in array
   // L_OBJC_LABEL_CLASS_$.
+
+  for (unsigned i=0, NumClasses=ImplementedClasses.size(); i<NumClasses; i++) {
+    const ObjCInterfaceDecl *ID = ImplementedClasses[i];
+    assert(ID);
+    if (ObjCImplementationDecl *IMP = ID->getImplementation())
+      // We are implementing a weak imported interface. Give it external linkage
+      if (ID->isWeakImported() && !IMP->isWeakImported())
+        DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+  }
+  
   AddModuleClassList(DefinedClasses,
                      "\01L_OBJC_LABEL_CLASS_$",
                      "__DATA, __objc_classlist, regular, no_dead_strip");
-  
-  for (unsigned i = 0, e = DefinedClasses.size(); i < e; i++) {
-    llvm::GlobalValue *IMPLGV = DefinedClasses[i];
-    if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
-      continue;
-    IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
-  }
-  
-  for (unsigned i = 0, e = DefinedMetaClasses.size(); i < e; i++) {
-    llvm::GlobalValue *IMPLGV = DefinedMetaClasses[i];
-    if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
-      continue;
-    IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
-  }    
-  
+
   AddModuleClassList(DefinedNonLazyClasses,
                      "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
                      "__DATA, __objc_nlclslist, regular, no_dead_strip");
@@ -5669,22 +5647,16 @@
   std::string MethodListName("\01l_OBJC_$_");
   if (flags & NonFragileABI_Class_Meta) {
     MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
-    for (ObjCImplementationDecl::classmeth_iterator
-           i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+    for (const auto *I : ID->class_methods())
       // Class methods should always be defined.
-      Methods.push_back(GetMethodConstant(*i));
-    }
+      Methods.push_back(GetMethodConstant(I));
   } else {
     MethodListName += "INSTANCE_METHODS_" + ID->getNameAsString();
-    for (ObjCImplementationDecl::instmeth_iterator
-           i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+    for (const auto *I : ID->instance_methods())
       // Instance methods should always be defined.
-      Methods.push_back(GetMethodConstant(*i));
-    }
-    for (ObjCImplementationDecl::propimpl_iterator
-           i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
-      ObjCPropertyImplDecl *PID = *i;
+      Methods.push_back(GetMethodConstant(I));
 
+    for (const auto *PID : ID->property_impls()) {
       if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
         ObjCPropertyDecl *PD = PID->getPropertyDecl();
 
@@ -5721,11 +5693,12 @@
                                                    Values);
   llvm::GlobalVariable *CLASS_RO_GV =
     new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
-                             llvm::GlobalValue::InternalLinkage,
+                             llvm::GlobalValue::PrivateLinkage,
                              Init,
                              (flags & NonFragileABI_Class_Meta) ?
                              std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
                              std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
+  assertPrivateName(CLASS_RO_GV);
   CLASS_RO_GV->setAlignment(
     CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
   CLASS_RO_GV->setSection("__DATA, __objc_const");
@@ -5743,12 +5716,9 @@
 ///   struct class_ro_t *ro;
 /// }
 ///
-llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
-  std::string &ClassName,
-  llvm::Constant *IsAGV,
-  llvm::Constant *SuperClassGV,
-  llvm::Constant *ClassRoGV,
-  bool HiddenVisibility) {
+llvm::GlobalVariable *CGObjCNonFragileABIMac::BuildClassMetaData(
+    std::string &ClassName, llvm::Constant *IsAGV, llvm::Constant *SuperClassGV,
+    llvm::Constant *ClassRoGV, bool HiddenVisibility, bool Weak) {
   llvm::Constant *Values[] = {
     IsAGV,
     SuperClassGV,
@@ -5763,7 +5733,7 @@
                   llvm::PointerType::getUnqual(ObjCTypes.ImpnfABITy));
   llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
                                                    Values);
-  llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
+  llvm::GlobalVariable *GV = GetClassGlobal(ClassName, Weak);
   GV->setInitializer(Init);
   GV->setSection("__DATA, __objc_data");
   GV->setAlignment(
@@ -5849,31 +5819,37 @@
   if (!ID->getClassInterface()->getSuperClass()) {
     // class is root
     flags |= NonFragileABI_Class_Root;
-    SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
-    IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
+    SuperClassGV = GetClassGlobal(ObjCClassName + ClassName,
+                                  ID->getClassInterface()->isWeakImported());
+    IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName,
+                           ID->getClassInterface()->isWeakImported());
+
+    // We are implementing a weak imported interface. Give it external
+    // linkage.
+    if (!ID->isWeakImported() && ID->getClassInterface()->isWeakImported())
+      IsAGV->setLinkage(llvm::GlobalVariable::ExternalLinkage);
   } else {
     // Has a root. Current class is not a root.
     const ObjCInterfaceDecl *Root = ID->getClassInterface();
     while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
       Root = Super;
-    IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
-    if (Root->isWeakImported())
-      IsAGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+    IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString(),
+                           Root->isWeakImported());
     // work on super class metadata symbol.
     std::string SuperClassName =
       ObjCMetaClassName + 
         ID->getClassInterface()->getSuperClass()->getNameAsString();
-    SuperClassGV = GetClassGlobal(SuperClassName);
-    if (ID->getClassInterface()->getSuperClass()->isWeakImported())
-      SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+    SuperClassGV = GetClassGlobal(
+        SuperClassName,
+        ID->getClassInterface()->getSuperClass()->isWeakImported());
   }
   llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
                                                                InstanceStart,
                                                                InstanceSize,ID);
   std::string TClassName = ObjCMetaClassName + ClassName;
-  llvm::GlobalVariable *MetaTClass =
-    BuildClassMetaData(TClassName, IsAGV, SuperClassGV, CLASS_RO_GV,
-                       classIsHidden);
+  llvm::GlobalVariable *MetaTClass = BuildClassMetaData(
+      TClassName, IsAGV, SuperClassGV, CLASS_RO_GV, classIsHidden,
+      ID->isWeakImported());
   DefinedMetaClasses.push_back(MetaTClass);
 
   // Metadata for the class
@@ -5904,9 +5880,9 @@
     // Has a root. Current class is not a root.
     std::string RootClassName =
       ID->getClassInterface()->getSuperClass()->getNameAsString();
-    SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
-    if (ID->getClassInterface()->getSuperClass()->isWeakImported())
-      SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+    SuperClassGV = GetClassGlobal(
+        ObjCClassName + RootClassName,
+        ID->getClassInterface()->getSuperClass()->isWeakImported());
   }
   GetClassSizeInfo(ID, InstanceStart, InstanceSize);
   CLASS_RO_GV = BuildClassRoTInitializer(flags,
@@ -5917,8 +5893,10 @@
   TClassName = ObjCClassName + ClassName;
   llvm::GlobalVariable *ClassMD =
     BuildClassMetaData(TClassName, MetaTClass, SuperClassGV, CLASS_RO_GV,
-                       classIsHidden);
+                       classIsHidden,
+                       ID->getClassInterface()->isWeakImported());
   DefinedClasses.push_back(ClassMD);
+  ImplementedClasses.push_back(ID->getClassInterface());
 
   // Determine if this class is also "non-lazy".
   if (ImplementationIsNonLazy(ID))
@@ -5963,7 +5941,7 @@
     ProtocolName);
   PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
   PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
-  CGM.AddUsedGlobal(PTGV);
+  CGM.addCompilerUsedGlobal(PTGV);
   return CGF.Builder.CreateLoad(PTGV);
 }
 
@@ -5988,21 +5966,18 @@
   llvm::Constant *Values[6];
   Values[0] = GetClassName(OCD->getIdentifier());
   // meta-class entry symbol
-  llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
-  if (Interface->isWeakImported())
-    ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
-  
+  llvm::GlobalVariable *ClassGV =
+      GetClassGlobal(ExtClassName, Interface->isWeakImported());
+
   Values[1] = ClassGV;
   std::vector<llvm::Constant*> Methods;
   std::string MethodListName(Prefix);
   MethodListName += "INSTANCE_METHODS_" + Interface->getNameAsString() +
     "_$_" + OCD->getNameAsString();
 
-  for (ObjCCategoryImplDecl::instmeth_iterator
-         i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+  for (const auto *I : OCD->instance_methods())
     // Instance methods should always be defined.
-    Methods.push_back(GetMethodConstant(*i));
-  }
+    Methods.push_back(GetMethodConstant(I));
 
   Values[2] = EmitMethodList(MethodListName,
                              "__DATA, __objc_const",
@@ -6012,11 +5987,9 @@
   MethodListName += "CLASS_METHODS_" + Interface->getNameAsString() + "_$_" +
     OCD->getNameAsString();
   Methods.clear();
-  for (ObjCCategoryImplDecl::classmeth_iterator
-         i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+  for (const auto *I : OCD->class_methods())
     // Class methods should always be defined.
-    Methods.push_back(GetMethodConstant(*i));
-  }
+    Methods.push_back(GetMethodConstant(I));
 
   Values[3] = EmitMethodList(MethodListName,
                              "__DATA, __objc_const",
@@ -6045,13 +6018,14 @@
   llvm::GlobalVariable *GCATV
     = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
                                false,
-                               llvm::GlobalValue::InternalLinkage,
+                               llvm::GlobalValue::PrivateLinkage,
                                Init,
                                ExtCatName);
+  assertPrivateName(GCATV);
   GCATV->setAlignment(
     CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy));
   GCATV->setSection("__DATA, __objc_const");
-  CGM.AddUsedGlobal(GCATV);
+  CGM.addCompilerUsedGlobal(GCATV);
   DefinedCategories.push_back(GCATV);
 
   // Determine if this category is also "non-lazy".
@@ -6107,10 +6081,11 @@
 
   llvm::GlobalVariable *GV =
     new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
-                             llvm::GlobalValue::InternalLinkage, Init, Name);
+                             llvm::GlobalValue::PrivateLinkage, Init, Name);
+  assertPrivateName(GV);
   GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
   GV->setSection(Section);
-  CGM.AddUsedGlobal(GV);
+  CGM.addCompilerUsedGlobal(GV);
   return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
 }
 
@@ -6125,12 +6100,9 @@
   llvm::GlobalVariable *IvarOffsetGV =
     CGM.getModule().getGlobalVariable(Name);
   if (!IvarOffsetGV)
-    IvarOffsetGV =
-      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.LongTy,
-                               false,
-                               llvm::GlobalValue::ExternalLinkage,
-                               0,
-                               Name);
+    IvarOffsetGV = new llvm::GlobalVariable(
+        CGM.getModule(), ObjCTypes.IvarOffsetVarTy, false,
+        llvm::GlobalValue::ExternalLinkage, 0, Name);
   return IvarOffsetGV;
 }
 
@@ -6139,10 +6111,10 @@
                                           const ObjCIvarDecl *Ivar,
                                           unsigned long int Offset) {
   llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
-  IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
-                                                      Offset));
+  IvarOffsetGV->setInitializer(
+      llvm::ConstantInt::get(ObjCTypes.IvarOffsetVarTy, Offset));
   IvarOffsetGV->setAlignment(
-    CGM.getDataLayout().getABITypeAlignment(ObjCTypes.LongTy));
+      CGM.getDataLayout().getABITypeAlignment(ObjCTypes.IvarOffsetVarTy));
 
   // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
   // well (i.e., in ObjCIvarOffsetVariable).
@@ -6160,7 +6132,7 @@
 /// implementation. The return value has type
 /// IvarListnfABIPtrTy.
 ///  struct _ivar_t {
-///   unsigned long int *offset;  // pointer to ivar offset location
+///   unsigned [long] int *offset;  // pointer to ivar offset location
 ///   char *name;
 ///   char *type;
 ///   uint32_t alignment;
@@ -6223,14 +6195,15 @@
   const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
   llvm::GlobalVariable *GV =
     new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
-                             llvm::GlobalValue::InternalLinkage,
+                             llvm::GlobalValue::PrivateLinkage,
                              Init,
                              Prefix + OID->getName());
+  assertPrivateName(GV);
   GV->setAlignment(
     CGM.getDataLayout().getABITypeAlignment(Init->getType()));
   GV->setSection("__DATA, __objc_const");
 
-  CGM.AddUsedGlobal(GV);
+  CGM.addCompilerUsedGlobal(GV);
   return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
 }
 
@@ -6243,10 +6216,9 @@
     // reference or not. At module finalization we add the empty
     // contents for protocols which were referenced but never defined.
     Entry =
-      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy, false,
-                               llvm::GlobalValue::ExternalLinkage,
-                               0,
-                               "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+        new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+                                 false, llvm::GlobalValue::WeakAnyLinkage, 0,
+                                 "\01l_OBJC_PROTOCOL_$_" + PD->getName());
     Entry->setSection("__DATA,__datacoal_nt,coalesced");
   }
 
@@ -6287,9 +6259,7 @@
   std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
   std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
   std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
-  for (ObjCProtocolDecl::instmeth_iterator
-         i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
-    ObjCMethodDecl *MD = *i;
+  for (const auto *MD : PD->instance_methods()) {
     llvm::Constant *C = GetMethodDescriptionConstant(MD);
     if (!C)
       return GetOrEmitProtocolRef(PD);
@@ -6303,9 +6273,7 @@
     }
   }
 
-  for (ObjCProtocolDecl::classmeth_iterator
-         i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
-    ObjCMethodDecl *MD = *i;
+  for (const auto *MD : PD->class_methods()) {
     llvm::Constant *C = GetMethodDescriptionConstant(MD);
     if (!C)
       return GetOrEmitProtocolRef(PD);
@@ -6359,8 +6327,8 @@
                                                    Values);
 
   if (Entry) {
-    // Already created, fix the linkage and update the initializer.
-    Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+    // Already created, update the initializer.
+    assert(Entry->getLinkage() == llvm::GlobalValue::WeakAnyLinkage);
     Entry->setInitializer(Init);
   } else {
     Entry =
@@ -6374,7 +6342,7 @@
     Protocols[PD->getIdentifier()] = Entry;
   }
   Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
-  CGM.AddUsedGlobal(Entry);
+  CGM.addCompilerUsedGlobal(Entry);
 
   // Use this protocol meta-data to build protocol list table in section
   // __DATA, __objc_protolist
@@ -6386,7 +6354,7 @@
     CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
   PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
   PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
-  CGM.AddUsedGlobal(PTGV);
+  CGM.addCompilerUsedGlobal(PTGV);
   return Entry;
 }
 
@@ -6433,12 +6401,13 @@
 
   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
   GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
-                                llvm::GlobalValue::InternalLinkage,
+                                llvm::GlobalValue::PrivateLinkage,
                                 Init, Name);
+  assertPrivateName(GV);
   GV->setSection("__DATA, __objc_const");
   GV->setAlignment(
     CGM.getDataLayout().getABITypeAlignment(Init->getType()));
-  CGM.AddUsedGlobal(GV);
+  CGM.addCompilerUsedGlobal(GV);
   return llvm::ConstantExpr::getBitCast(GV,
                                         ObjCTypes.ProtocolListnfABIPtrTy);
 }
@@ -6479,12 +6448,6 @@
                                                unsigned CVRQualifiers) {
   ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
   llvm::Value *Offset = EmitIvarOffset(CGF, ID, Ivar);
-
-  if (IsIvarOffsetKnownIdempotent(CGF, ID, Ivar))
-    if (llvm::LoadInst *LI = cast<llvm::LoadInst>(Offset))
-      LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
-                      llvm::MDNode::get(VMContext, ArrayRef<llvm::Value*>()));
-
   return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
                                   Offset);
 }
@@ -6493,7 +6456,20 @@
   CodeGen::CodeGenFunction &CGF,
   const ObjCInterfaceDecl *Interface,
   const ObjCIvarDecl *Ivar) {
-  return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),"ivar");
+  llvm::Value *IvarOffsetValue = ObjCIvarOffsetVariable(Interface, Ivar);
+  IvarOffsetValue = CGF.Builder.CreateLoad(IvarOffsetValue, "ivar");
+  if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
+    cast<llvm::LoadInst>(IvarOffsetValue)
+        ->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+                      llvm::MDNode::get(VMContext, ArrayRef<llvm::Value *>()));
+
+  // This could be 32bit int or 64bit integer depending on the architecture.
+  // Cast it to 64bit integer value, if it is a 32bit integer ivar offset value
+  //  as this is what caller always expectes.
+  if (ObjCTypes.IvarOffsetVarTy == ObjCTypes.IntTy)
+    IvarOffsetValue = CGF.Builder.CreateIntCast(
+        IvarOffsetValue, ObjCTypes.LongTy, true, "ivar.conv");
+  return IvarOffsetValue;
 }
 
 static void appendSelectorForMessageRefTable(std::string &buffer,
@@ -6556,7 +6532,7 @@
   // FIXME: don't use this for that.
   llvm::Constant *fn = 0;
   std::string messageRefName("\01l_");
-  if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+  if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
     if (isSuper) {
       fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
       messageRefName += "objc_msgSendSuper2_stret_fixup";
@@ -6603,9 +6579,7 @@
   
   bool requiresnullCheck = false;
   if (CGM.getLangOpts().ObjCAutoRefCount && method)
-    for (ObjCMethodDecl::param_const_iterator i = method->param_begin(),
-         e = method->param_end(); i != e; ++i) {
-      const ParmVarDecl *ParamDecl = (*i);
+    for (const auto *ParamDecl : method->params()) {
       if (ParamDecl->hasAttr<NSConsumedAttr>()) {
         if (!nullReturn.NullBB)
           nullReturn.init(CGF, arg0);
@@ -6652,49 +6626,53 @@
 }
 
 llvm::GlobalVariable *
-CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name, bool Weak) {
+  llvm::GlobalValue::LinkageTypes L =
+      Weak ? llvm::GlobalValue::ExternalWeakLinkage
+           : llvm::GlobalValue::ExternalLinkage;
+
   llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
 
-  if (!GV) {
+  if (!GV)
     GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
-                                  false, llvm::GlobalValue::ExternalLinkage,
-                                  0, Name);
-  }
+                                  false, L, 0, Name);
 
+  assert(GV->getLinkage() == L);
   return GV;
 }
 
 llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
-                                                        IdentifierInfo *II) {
+                                                        IdentifierInfo *II,
+                                                        bool Weak) {
   llvm::GlobalVariable *&Entry = ClassReferences[II];
   
   if (!Entry) {
     std::string ClassName(getClassSymbolPrefix() + II->getName().str());
-    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName, Weak);
     Entry =
     new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
-                             false, llvm::GlobalValue::InternalLinkage,
+                             false, llvm::GlobalValue::PrivateLinkage,
                              ClassGV,
                              "\01L_OBJC_CLASSLIST_REFERENCES_$_");
     Entry->setAlignment(
                         CGM.getDataLayout().getABITypeAlignment(
                                                                 ObjCTypes.ClassnfABIPtrTy));
     Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
-    CGM.AddUsedGlobal(Entry);
+    CGM.addCompilerUsedGlobal(Entry);
   }
-  
+  assertPrivateName(Entry);
   return CGF.Builder.CreateLoad(Entry);
 }
 
 llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF,
                                                   const ObjCInterfaceDecl *ID) {
-  return EmitClassRefFromId(CGF, ID->getIdentifier());
+  return EmitClassRefFromId(CGF, ID->getIdentifier(), ID->isWeakImported());
 }
 
 llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
                                                     CodeGenFunction &CGF) {
   IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
-  return EmitClassRefFromId(CGF, II);
+  return EmitClassRefFromId(CGF, II, false);
 }
 
 llvm::Value *
@@ -6704,19 +6682,20 @@
 
   if (!Entry) {
     std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
-    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName,
+                                                   ID->isWeakImported());
     Entry =
       new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
-                               false, llvm::GlobalValue::InternalLinkage,
+                               false, llvm::GlobalValue::PrivateLinkage,
                                ClassGV,
                                "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
     Entry->setAlignment(
       CGM.getDataLayout().getABITypeAlignment(
         ObjCTypes.ClassnfABIPtrTy));
     Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
-    CGM.AddUsedGlobal(Entry);
+    CGM.addCompilerUsedGlobal(Entry);
   }
-
+  assertPrivateName(Entry);
   return CGF.Builder.CreateLoad(Entry);
 }
 
@@ -6726,23 +6705,23 @@
 llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
                                                       const ObjCInterfaceDecl *ID) {
   llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
-  if (Entry)
-    return CGF.Builder.CreateLoad(Entry);
+  if (!Entry) {
 
-  std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
-  llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
-  Entry =
-    new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
-                             llvm::GlobalValue::InternalLinkage,
-                             MetaClassGV,
-                             "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
-  Entry->setAlignment(
-    CGM.getDataLayout().getABITypeAlignment(
-      ObjCTypes.ClassnfABIPtrTy));
+    std::string MetaClassName(getMetaclassSymbolPrefix() +
+                              ID->getNameAsString());
+    llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
+    Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+                                     false, llvm::GlobalValue::PrivateLinkage,
+                                     MetaClassGV,
+                                     "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+    Entry->setAlignment(
+        CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABIPtrTy));
 
-  Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
-  CGM.AddUsedGlobal(Entry);
+    Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+    CGM.addCompilerUsedGlobal(Entry);
+  }
 
+  assertPrivateName(Entry);
   return CGF.Builder.CreateLoad(Entry);
 }
 
@@ -6752,8 +6731,9 @@
                                               const ObjCInterfaceDecl *ID) {
   if (ID->isWeakImported()) {
     std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
-    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
-    ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName, true);
+    (void)ClassGV;
+    assert(ClassGV->getLinkage() == llvm::GlobalValue::ExternalWeakLinkage);
   }
   
   return EmitClassRef(CGF, ID);
@@ -6819,12 +6799,13 @@
                                      ObjCTypes.SelectorPtrTy);
     Entry =
       new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
-                               llvm::GlobalValue::InternalLinkage,
+                               llvm::GlobalValue::PrivateLinkage,
                                Casted, "\01L_OBJC_SELECTOR_REFERENCES_");
     Entry->setExternallyInitialized(true);
     Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
-    CGM.AddUsedGlobal(Entry);
+    CGM.addCompilerUsedGlobal(Entry);
   }
+  assertPrivateName(Entry);
 
   if (lval)
     return Entry;
@@ -7054,27 +7035,29 @@
   llvm::Constant *Init =
     llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
 
+  llvm::GlobalValue::LinkageTypes L = ForDefinition
+                                          ? llvm::GlobalValue::ExternalLinkage
+                                          : llvm::GlobalValue::WeakAnyLinkage;
   if (Entry) {
     Entry->setInitializer(Init);
   } else {
     Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
-                                     llvm::GlobalValue::WeakAnyLinkage,
+                                     L,
                                      Init,
                                      ("OBJC_EHTYPE_$_" +
                                       ID->getIdentifier()->getName()));
   }
+  assert(Entry->getLinkage() == L);
 
   if (ID->getVisibility() == HiddenVisibility)
     Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
   Entry->setAlignment(CGM.getDataLayout().getABITypeAlignment(
       ObjCTypes.EHTypeTy));
 
-  if (ForDefinition) {
+  if (ForDefinition)
     Entry->setSection("__DATA,__objc_const");
-    Entry->setLinkage(llvm::GlobalValue::ExternalLinkage);
-  } else {
+  else
     Entry->setSection("__DATA,__datacoal_nt,coalesced");
-  }
 
   return Entry;
 }
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index d097b6f..8d6c653 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -21,7 +21,7 @@
 #include "clang/AST/RecordLayout.h"
 #include "clang/AST/StmtObjC.h"
 #include "clang/CodeGen/CGFunctionInfo.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/IR/CallSite.h"
 
 using namespace clang;
 using namespace CodeGen;
@@ -158,7 +158,7 @@
     bool MightThrow;
     llvm::Value *Fn;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       if (!MightThrow) {
         CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
         return;
@@ -303,7 +303,7 @@
     CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
       : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
     }
   };
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 7f030f2..2a8ae90 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -268,7 +268,8 @@
                                   const CodeGen::CGBlockInfo &blockInfo) = 0;
   virtual llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
                                            QualType T) = 0;
-  virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) = 0;
+  virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
+                                               bool Weak = false) = 0;
 
   struct MessageSendInfo {
     const CGFunctionInfo &CallInfo;
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
index 7c454ac..6e1a3c9 100644
--- a/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -33,32 +33,35 @@
   assert(T->isOpenCLSpecificType() &&
          "Not an OpenCL specific type!");
 
+  llvm::LLVMContext& Ctx = CGM.getLLVMContext();
+  uint32_t ImgAddrSpc =
+    CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
   switch (cast<BuiltinType>(T)->getKind()) {
   default: 
     llvm_unreachable("Unexpected opencl builtin type!");
     return 0;
   case BuiltinType::OCLImage1d:
     return llvm::PointerType::get(llvm::StructType::create(
-                           CGM.getLLVMContext(), "opencl.image1d_t"), 0);
+                           Ctx, "opencl.image1d_t"), ImgAddrSpc);
   case BuiltinType::OCLImage1dArray:
     return llvm::PointerType::get(llvm::StructType::create(
-                           CGM.getLLVMContext(), "opencl.image1d_array_t"), 0);
+                           Ctx, "opencl.image1d_array_t"), ImgAddrSpc);
   case BuiltinType::OCLImage1dBuffer:
     return llvm::PointerType::get(llvm::StructType::create(
-                           CGM.getLLVMContext(), "opencl.image1d_buffer_t"), 0);
+                           Ctx, "opencl.image1d_buffer_t"), ImgAddrSpc);
   case BuiltinType::OCLImage2d:
     return llvm::PointerType::get(llvm::StructType::create(
-                           CGM.getLLVMContext(), "opencl.image2d_t"), 0);
+                           Ctx, "opencl.image2d_t"), ImgAddrSpc);
   case BuiltinType::OCLImage2dArray:
     return llvm::PointerType::get(llvm::StructType::create(
-                           CGM.getLLVMContext(), "opencl.image2d_array_t"), 0);
+                           Ctx, "opencl.image2d_array_t"), ImgAddrSpc);
   case BuiltinType::OCLImage3d:
     return llvm::PointerType::get(llvm::StructType::create(
-                           CGM.getLLVMContext(), "opencl.image3d_t"), 0);
+                           Ctx, "opencl.image3d_t"), ImgAddrSpc);
   case BuiltinType::OCLSampler:
-    return llvm::IntegerType::get(CGM.getLLVMContext(),32);
+    return llvm::IntegerType::get(Ctx, 32);
   case BuiltinType::OCLEvent:
     return llvm::PointerType::get(llvm::StructType::create(
-                           CGM.getLLVMContext(), "opencl.event_t"), 0);
+                           Ctx, "opencl.event_t"), 0);
   }
 }
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index aa687b9..7049df7 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -332,11 +332,11 @@
   
   switch (Ty->getLinkage()) {
   case NoLinkage:
-  case VisibleNoLinkage:
   case InternalLinkage:
   case UniqueExternalLinkage:
     return llvm::GlobalValue::InternalLinkage;
 
+  case VisibleNoLinkage:
   case ExternalLinkage:
     if (!CGM.getLangOpts().RTTI) {
       // RTTI is not enabled, which means that this type info struct is going
@@ -544,8 +544,25 @@
   
   // And the name.
   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
+  llvm::Constant *TypeNameField;
 
-  Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy));
+  // If we're supposed to demote the visibility, be sure to set a flag
+  // to use a string comparison for type_info comparisons.
+  CGCXXABI::RTTIUniquenessKind RTTIUniqueness =
+      CGM.getCXXABI().classifyRTTIUniqueness(Ty, Linkage);
+  if (RTTIUniqueness != CGCXXABI::RUK_Unique) {
+    // The flag is the sign bit, which on ARM64 is defined to be clear
+    // for global pointers.  This is very ARM64-specific.
+    TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
+    llvm::Constant *flag =
+        llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
+    TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
+    TypeNameField =
+        llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
+  } else {
+    TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
+  }
+  Fields.push_back(TypeNameField);
 
   switch (Ty->getTypeClass()) {
 #define TYPE(Class, Base)
@@ -644,32 +661,35 @@
     OldGV->eraseFromParent();
   }
 
-  // GCC only relies on the uniqueness of the type names, not the
-  // type_infos themselves, so we can emit these as hidden symbols.
-  // But don't do this if we're worried about strict visibility
-  // compatibility.
-  if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
-    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+  // The Itanium ABI specifies that type_info objects must be globally
+  // unique, with one exception: if the type is an incomplete class
+  // type or a (possibly indirect) pointer to one.  That exception
+  // affects the general case of comparing type_info objects produced
+  // by the typeid operator, which is why the comparison operators on
+  // std::type_info generally use the type_info name pointers instead
+  // of the object addresses.  However, the language's built-in uses
+  // of RTTI generally require class types to be complete, even when
+  // manipulating pointers to those class types.  This allows the
+  // implementation of dynamic_cast to rely on address equality tests,
+  // which is much faster.
 
-    CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForRTTI);
-    CGM.setTypeVisibility(TypeName, RD, CodeGenModule::TVK_ForRTTIName);
-  } else {
-    Visibility TypeInfoVisibility = DefaultVisibility;
-    if (CGM.getCodeGenOpts().HiddenWeakVTables &&
-        Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
-      TypeInfoVisibility = HiddenVisibility;
+  // All of this is to say that it's important that both the type_info
+  // object and the type_info name be uniqued when weakly emitted.
 
-    // The type name should have the same visibility as the type itself.
-    Visibility ExplicitVisibility = Ty->getVisibility();
-    TypeName->setVisibility(CodeGenModule::
-                            GetLLVMVisibility(ExplicitVisibility));
-  
-    TypeInfoVisibility = minVisibility(TypeInfoVisibility, Ty->getVisibility());
-    GV->setVisibility(CodeGenModule::GetLLVMVisibility(TypeInfoVisibility));
+  // Give the type_info object and name the formal visibility of the
+  // type itself.
+  Visibility formalVisibility = Ty->getVisibility();
+  llvm::GlobalValue::VisibilityTypes llvmVisibility =
+    CodeGenModule::GetLLVMVisibility(formalVisibility);
+  TypeName->setVisibility(llvmVisibility);
+  GV->setVisibility(llvmVisibility);
+
+  // FIXME: integrate this better into the above when we move to trunk
+  if (RTTIUniqueness == CGCXXABI::RUK_NonUniqueHidden) {
+    TypeName->setVisibility(llvm::GlobalValue::HiddenVisibility);
+    GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
   }
 
-  GV->setUnnamedAddr(true);
-
   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
 }
 
@@ -766,9 +786,8 @@
   }
 
   // Walk all bases.
-  for (CXXRecordDecl::base_class_const_iterator I = BaseDecl->bases_begin(),
-       E = BaseDecl->bases_end(); I != E; ++I) 
-    Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+  for (const auto &I : BaseDecl->bases()) 
+    Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
   
   return Flags;
 }
@@ -778,9 +797,8 @@
   SeenBases Bases;
   
   // Walk all bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) 
-    Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+  for (const auto &I : RD->bases()) 
+    Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
   
   return Flags;
 }
@@ -827,15 +845,12 @@
   //       __offset_shift = 8
   //     };
   //   };
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    const CXXBaseSpecifier *Base = I;
-
+  for (const auto &Base : RD->bases()) {
     // The __base_type member points to the RTTI for the base type.
-    Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(Base->getType()));
+    Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(Base.getType()));
 
     const CXXRecordDecl *BaseDecl = 
-      cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+      cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
 
     int64_t OffsetFlags = 0;
     
@@ -844,7 +859,7 @@
     // subobject. For a virtual base, this is the offset in the virtual table of
     // the virtual base offset for the virtual base referenced (negative).
     CharUnits Offset;
-    if (Base->isVirtual())
+    if (Base.isVirtual())
       Offset = 
         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
     else {
@@ -856,9 +871,9 @@
     
     // The low-order byte of __offset_flags contains flags, as given by the 
     // masks from the enumeration __offset_flags_masks.
-    if (Base->isVirtual())
+    if (Base.isVirtual())
       OffsetFlags |= BCTI_Virtual;
-    if (Base->getAccessSpecifier() == AS_public)
+    if (Base.getAccessSpecifier() == AS_public)
       OffsetFlags |= BCTI_Public;
 
     Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index b29fc98..0fc7b8a 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -130,7 +130,7 @@
   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
 
   // FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
-  // map for both virtual and non virtual bases.
+  // map for both virtual and non-virtual bases.
   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
 
   /// Map from virtual bases to their field index in the complete object.
@@ -201,7 +201,7 @@
 
   /// \brief Return the BitFieldInfo that corresponds to the field FD.
   const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
-    assert(FD->isBitField() && "Invalid call for non bit-field decl!");
+    assert(FD->isBitField() && "Invalid call for non-bit-field decl!");
     llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
       it = BitFields.find(FD);
     assert(it != BitFields.end() && "Unable to find bitfield info");
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index ab92563..75b4504 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -25,205 +25,560 @@
 #include "llvm/IR/DerivedTypes.h"
 #include "llvm/IR/Type.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
 using namespace clang;
 using namespace CodeGen;
 
 namespace {
-
-class CGRecordLayoutBuilder {
-public:
-  /// FieldTypes - Holds the LLVM types that the struct is created from.
-  /// 
+/// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
+/// llvm::Type.  Some of the lowering is straightforward, some is not.  Here we
+/// detail some of the complexities and weirdnesses here.
+/// * LLVM does not have unions - Unions can, in theory be represented by any
+///   llvm::Type with correct size.  We choose a field via a specific heuristic
+///   and add padding if necessary.
+/// * LLVM does not have bitfields - Bitfields are collected into contiguous
+///   runs and allocated as a single storage type for the run.  ASTRecordLayout
+///   contains enough information to determine where the runs break.  Microsoft
+///   and Itanium follow different rules and use different codepaths.
+/// * It is desired that, when possible, bitfields use the appropriate iN type
+///   when lowered to llvm types.  For example unsigned x : 24 gets lowered to
+///   i24.  This isn't always possible because i24 has storage size of 32 bit
+///   and if it is possible to use that extra byte of padding we must use
+///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
+///   C++ examples that require clipping:
+///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
+///   struct A { int a : 24; }; // a must be clipped because a struct like B
+//    could exist: struct B : A { char b; }; // b goes at offset 3
+/// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
+///   fields.  The existing asserts suggest that LLVM assumes that *every* field
+///   has an underlying storage type.  Therefore empty structures containing
+///   zero sized subobjects such as empty records or zero sized arrays still get
+///   a zero sized (empty struct) storage type.
+/// * Clang reads the complete type rather than the base type when generating
+///   code to access fields.  Bitfields in tail position with tail padding may
+///   be clipped in the base class but not the complete class (we may discover
+///   that the tail padding is not used in the complete class.) However,
+///   because LLVM reads from the complete type it can generate incorrect code
+///   if we do not clip the tail padding off of the bitfield in the complete
+///   layout.  This introduces a somewhat awkward extra unnecessary clip stage.
+///   The location of the clip is stored internally as a sentinal of type
+///   SCISSOR.  If LLVM were updated to read base types (which it probably
+///   should because locations of things such as VBases are bogus in the llvm
+///   type anyway) then we could eliminate the SCISSOR.
+/// * Itanium allows nearly empty primary virtual bases.  These bases don't get
+///   get their own storage because they're laid out as part of another base
+///   or at the beginning of the structure.  Determining if a VBase actually
+///   gets storage awkwardly involves a walk of all bases.
+/// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
+struct CGRecordLowering {
+  // MemberInfo is a helper structure that contains information about a record
+  // member.  In additional to the standard member types, there exists a
+  // sentinal member type that ensures correct rounding.
+  struct MemberInfo {
+    CharUnits Offset;
+    enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
+    llvm::Type *Data;
+    union {
+      const FieldDecl *FD;
+      const CXXRecordDecl *RD;
+    };
+    MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
+               const FieldDecl *FD = 0)
+      : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
+    MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
+               const CXXRecordDecl *RD)
+      : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
+    // MemberInfos are sorted so we define a < operator.
+    bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
+  };
+  // The constructor.
+  CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D);
+  // Short helper routines.
+  /// \brief Constructs a MemberInfo instance from an offset and llvm::Type *.
+  MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
+    return MemberInfo(Offset, MemberInfo::Field, Data);
+  }
+  bool useMSABI() {
+    return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+           D->isMsStruct(Context);
+  }
+  /// \brief Wraps llvm::Type::getIntNTy with some implicit arguments.
+  llvm::Type *getIntNType(uint64_t NumBits) {
+    return llvm::Type::getIntNTy(Types.getLLVMContext(),
+        (unsigned)llvm::RoundUpToAlignment(NumBits, 8));
+  }
+  /// \brief Gets an llvm type of size NumBytes and alignment 1.
+  llvm::Type *getByteArrayType(CharUnits NumBytes) {
+    assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
+    llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
+    return NumBytes == CharUnits::One() ? Type :
+        (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
+  }
+  /// \brief Gets the storage type for a field decl and handles storage
+  /// for itanium bitfields that are smaller than their declared type.
+  llvm::Type *getStorageType(const FieldDecl *FD) {
+    llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
+    return useMSABI() || !FD->isBitField() ? Type :
+        getIntNType(std::min(FD->getBitWidthValue(Context),
+                             (unsigned)Context.toBits(getSize(Type))));
+  }
+  /// \brief Gets the llvm Basesubobject type from a CXXRecordDecl.
+  llvm::Type *getStorageType(const CXXRecordDecl *RD) {
+    return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
+  }
+  CharUnits bitsToCharUnits(uint64_t BitOffset) {
+    return Context.toCharUnitsFromBits(BitOffset);
+  }
+  CharUnits getSize(llvm::Type *Type) {
+    return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
+  }
+  CharUnits getAlignment(llvm::Type *Type) {
+    return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
+  }
+  bool isZeroInitializable(const FieldDecl *FD) {
+    const Type *Type = FD->getType()->getBaseElementTypeUnsafe();
+    if (const MemberPointerType *MPT = Type->getAs<MemberPointerType>())
+      return Types.getCXXABI().isZeroInitializable(MPT);
+    if (const RecordType *RT = Type->getAs<RecordType>())
+      return isZeroInitializable(RT->getDecl());
+    return true;
+  }
+  bool isZeroInitializable(const RecordDecl *RD) {
+    return Types.getCGRecordLayout(RD).isZeroInitializable();
+  }
+  void appendPaddingBytes(CharUnits Size) {
+    if (!Size.isZero())
+      FieldTypes.push_back(getByteArrayType(Size));
+  }
+  uint64_t getFieldBitOffset(const FieldDecl *FD) {
+    return Layout.getFieldOffset(FD->getFieldIndex());
+  }
+  // Layout routines.
+  void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, 
+                       llvm::Type *StorageType);
+  /// \brief Lowers an ASTRecordLayout to a llvm type.
+  void lower(bool NonVirtualBaseType);
+  void lowerUnion();
+  void accumulateFields();
+  void accumulateBitFields(RecordDecl::field_iterator Field,
+                        RecordDecl::field_iterator FieldEnd);
+  void accumulateBases();
+  void accumulateVPtrs();
+  void accumulateVBases();
+  /// \brief Recursively searches all of the bases to find out if a vbase is
+  /// not the primary vbase of some base class.
+  bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
+  void calculateZeroInit();
+  /// \brief Lowers bitfield storage types to I8 arrays for bitfields with tail
+  /// padding that is or can potentially be used.
+  void clipTailPadding();
+  /// \brief Determines if we need a packed llvm struct.
+  void determinePacked();
+  /// \brief Inserts padding everwhere it's needed.
+  void insertPadding();
+  /// \brief Fills out the structures that are ultimately consumed.
+  void fillOutputFields();
+  // Input memoization fields.
+  CodeGenTypes &Types;
+  const ASTContext &Context;
+  const RecordDecl *D;
+  const CXXRecordDecl *RD;
+  const ASTRecordLayout &Layout;
+  const llvm::DataLayout &DataLayout;
+  // Helpful intermediate data-structures.
+  std::vector<MemberInfo> Members;
+  // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
   SmallVector<llvm::Type *, 16> FieldTypes;
-
-  /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
-  /// of the struct. For example, consider:
-  ///
-  /// struct A { int i; };
-  /// struct B { void *v; };
-  /// struct C : virtual A, B { };
-  ///
-  /// The LLVM type of C will be
-  /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
-  ///
-  /// And the LLVM type of the non-virtual base struct will be
-  /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
-  ///
-  /// This only gets initialized if the base subobject type is
-  /// different from the complete-object type.
-  llvm::StructType *BaseSubobjectType;
-
-  /// FieldInfo - Holds a field and its corresponding LLVM field number.
   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
-
-  /// BitFieldInfo - Holds location and size information about a bit field.
   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
-
   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
-
-  /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
-  /// primary base classes for some other direct or indirect base class.
-  CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
-
-  /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
-  /// avoid laying out virtual bases more than once.
-  llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
-  
-  /// IsZeroInitializable - Whether this struct can be C++
-  /// zero-initialized with an LLVM zeroinitializer.
-  bool IsZeroInitializable;
-  bool IsZeroInitializableAsBase;
-
-  /// Packed - Whether the resulting LLVM struct will be packed or not.
-  bool Packed;
-
+  bool IsZeroInitializable : 1;
+  bool IsZeroInitializableAsBase : 1;
+  bool Packed : 1;
 private:
-  CodeGenTypes &Types;
-
-  /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
-  /// last base laid out. Used so that we can replace the last laid out base
-  /// type with an i8 array if needed.
-  struct LastLaidOutBaseInfo {
-    CharUnits Offset;
-    CharUnits NonVirtualSize;
-
-    bool isValid() const { return !NonVirtualSize.isZero(); }
-    void invalidate() { NonVirtualSize = CharUnits::Zero(); }
-  
-  } LastLaidOutBase;
-
-  /// Alignment - Contains the alignment of the RecordDecl.
-  CharUnits Alignment;
-
-  /// NextFieldOffset - Holds the next field offset.
-  CharUnits NextFieldOffset;
-
-  /// LayoutUnionField - Will layout a field in an union and return the type
-  /// that the field will have.
-  llvm::Type *LayoutUnionField(const FieldDecl *Field,
-                               const ASTRecordLayout &Layout);
-  
-  /// LayoutUnion - Will layout a union RecordDecl.
-  void LayoutUnion(const RecordDecl *D);
-
-  /// Lay out a sequence of contiguous bitfields.
-  bool LayoutBitfields(const ASTRecordLayout &Layout,
-                       unsigned &FirstFieldNo,
-                       RecordDecl::field_iterator &FI,
-                       RecordDecl::field_iterator FE);
-
-  /// LayoutFields - try to layout all fields in the record decl.
-  /// Returns false if the operation failed because the struct is not packed.
-  bool LayoutFields(const RecordDecl *D);
-
-  /// Layout a single base, virtual or non-virtual
-  bool LayoutBase(const CXXRecordDecl *base,
-                  const CGRecordLayout &baseLayout,
-                  CharUnits baseOffset);
-
-  /// LayoutVirtualBase - layout a single virtual base.
-  bool LayoutVirtualBase(const CXXRecordDecl *base,
-                         CharUnits baseOffset);
-
-  /// LayoutVirtualBases - layout the virtual bases of a record decl.
-  bool LayoutVirtualBases(const CXXRecordDecl *RD,
-                          const ASTRecordLayout &Layout);
-
-  /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
-  /// like MSVC.
-  bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
-                            const ASTRecordLayout &Layout);
-  
-  /// LayoutNonVirtualBase - layout a single non-virtual base.
-  bool LayoutNonVirtualBase(const CXXRecordDecl *base,
-                            CharUnits baseOffset);
-  
-  /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
-  bool LayoutNonVirtualBases(const CXXRecordDecl *RD, 
-                             const ASTRecordLayout &Layout);
-
-  /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
-  bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
-  
-  /// LayoutField - layout a single field. Returns false if the operation failed
-  /// because the current struct is not packed.
-  bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
-
-  /// LayoutBitField - layout a single bit field.
-  void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
-
-  /// AppendField - Appends a field with the given offset and type.
-  void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
-
-  /// AppendPadding - Appends enough padding bytes so that the total
-  /// struct size is a multiple of the field alignment.
-  void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
-
-  /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
-  /// tail padding of a previous base. If this happens, the type of the previous
-  /// base needs to be changed to an array of i8. Returns true if the last
-  /// laid out base was resized.
-  bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
-
-  /// getByteArrayType - Returns a byte array type with the given number of
-  /// elements.
-  llvm::Type *getByteArrayType(CharUnits NumBytes);
-  
-  /// AppendBytes - Append a given number of bytes to the record.
-  void AppendBytes(CharUnits numBytes);
-
-  /// AppendTailPadding - Append enough tail padding so that the type will have
-  /// the passed size.
-  void AppendTailPadding(CharUnits RecordSize);
-
-  CharUnits getTypeAlignment(llvm::Type *Ty) const;
-
-  /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
-  /// LLVM element types.
-  CharUnits getAlignmentAsLLVMStruct() const;
-
-  /// CheckZeroInitializable - Check if the given type contains a pointer
-  /// to data member.
-  void CheckZeroInitializable(QualType T);
-
-public:
-  CGRecordLayoutBuilder(CodeGenTypes &Types)
-    : BaseSubobjectType(0),
-      IsZeroInitializable(true), IsZeroInitializableAsBase(true),
-      Packed(false), Types(Types) { }
-
-  /// Layout - Will layout a RecordDecl.
-  void Layout(const RecordDecl *D);
+  CGRecordLowering(const CGRecordLowering &) LLVM_DELETED_FUNCTION;
+  void operator =(const CGRecordLowering &) LLVM_DELETED_FUNCTION;
 };
+} // namespace {
 
+CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D)
+  : Types(Types), Context(Types.getContext()), D(D),
+    RD(dyn_cast<CXXRecordDecl>(D)),
+    Layout(Types.getContext().getASTRecordLayout(D)),
+    DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
+    IsZeroInitializableAsBase(true), Packed(false) {}
+
+void CGRecordLowering::setBitFieldInfo(
+    const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
+  CGBitFieldInfo &Info = BitFields[FD];
+  Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
+  Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
+  Info.Size = FD->getBitWidthValue(Context);
+  Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
+  // Here we calculate the actual storage alignment of the bits.  E.g if we've
+  // got an alignment >= 2 and the bitfield starts at offset 6 we've got an
+  // alignment of 2.
+  Info.StorageAlignment =
+      Layout.getAlignment().alignmentAtOffset(StartOffset).getQuantity();
+  if (Info.Size > Info.StorageSize)
+    Info.Size = Info.StorageSize;
+  // Reverse the bit offsets for big endian machines. Because we represent
+  // a bitfield as a single large integer load, we can imagine the bits
+  // counting from the most-significant-bit instead of the
+  // least-significant-bit.
+  if (DataLayout.isBigEndian())
+    Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
 }
 
-void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
-  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
-  Alignment = Layout.getAlignment();
-  Packed = D->hasAttr<PackedAttr>() || Layout.getSize() % Alignment != 0;
+void CGRecordLowering::lower(bool NVBaseType) {
+  // The lowering process implemented in this function takes a variety of
+  // carefully ordered phases.
+  // 1) Store all members (fields and bases) in a list and sort them by offset.
+  // 2) Add a 1-byte capstone member at the Size of the structure.
+  // 3) Clip bitfield storages members if their tail padding is or might be
+  //    used by another field or base.  The clipping process uses the capstone 
+  //    by treating it as another object that occurs after the record.
+  // 4) Determine if the llvm-struct requires packing.  It's important that this
+  //    phase occur after clipping, because clipping changes the llvm type.
+  //    This phase reads the offset of the capstone when determining packedness
+  //    and updates the alignment of the capstone to be equal of the alignment
+  //    of the record after doing so.
+  // 5) Insert padding everywhere it is needed.  This phase requires 'Packed' to
+  //    have been computed and needs to know the alignment of the record in
+  //    order to understand if explicit tail padding is needed.
+  // 6) Remove the capstone, we don't need it anymore.
+  // 7) Determine if this record can be zero-initialized.  This phase could have
+  //    been placed anywhere after phase 1.
+  // 8) Format the complete list of members in a way that can be consumed by
+  //    CodeGenTypes::ComputeRecordLayout.
+  CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
+  if (D->isUnion())
+    return lowerUnion();
+  accumulateFields();
+  // RD implies C++.
+  if (RD) {
+    accumulateVPtrs();
+    accumulateBases();
+    if (Members.empty())
+      return appendPaddingBytes(Size);
+    if (!NVBaseType)
+      accumulateVBases();
+  }
+  std::stable_sort(Members.begin(), Members.end());
+  Members.push_back(StorageInfo(Size, getIntNType(8)));
+  clipTailPadding();
+  determinePacked();
+  insertPadding();
+  Members.pop_back();
+  calculateZeroInit();
+  fillOutputFields();
+}
 
-  if (D->isUnion()) {
-    LayoutUnion(D);
+void CGRecordLowering::lowerUnion() {
+  CharUnits LayoutSize = Layout.getSize();
+  llvm::Type *StorageType = 0;
+  // Compute zero-initializable status.
+  if (!D->field_empty() && !isZeroInitializable(*D->field_begin()))
+    IsZeroInitializable = IsZeroInitializableAsBase = false;
+  // Iterate through the fields setting bitFieldInfo and the Fields array. Also
+  // locate the "most appropriate" storage type.  The heuristic for finding the
+  // storage type isn't necessary, the first (non-0-length-bitfield) field's
+  // type would work fine and be simpler but would be differen than what we've
+  // been doing and cause lit tests to change.
+  for (const auto *Field : D->fields()) {
+    if (Field->isBitField()) {
+      // Skip 0 sized bitfields.
+      if (Field->getBitWidthValue(Context) == 0)
+        continue;
+      llvm::Type *FieldType = getStorageType(Field);
+      if (LayoutSize < getSize(FieldType))
+        FieldType = getByteArrayType(LayoutSize);
+      setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
+    }
+    Fields[Field] = 0;
+    llvm::Type *FieldType = getStorageType(Field);
+    // Conditionally update our storage type if we've got a new "better" one.
+    if (!StorageType ||
+        getAlignment(FieldType) >  getAlignment(StorageType) ||
+        (getAlignment(FieldType) == getAlignment(StorageType) &&
+        getSize(FieldType) > getSize(StorageType)))
+      StorageType = FieldType;
+  }
+  // If we have no storage type just pad to the appropriate size and return.
+  if (!StorageType)
+    return appendPaddingBytes(LayoutSize);
+  // If our storage size was bigger than our required size (can happen in the
+  // case of packed bitfields on Itanium) then just use an I8 array.
+  if (LayoutSize < getSize(StorageType))
+    StorageType = getByteArrayType(LayoutSize);
+  FieldTypes.push_back(StorageType);
+  appendPaddingBytes(LayoutSize - getSize(StorageType));
+  // Set packed if we need it.
+  if (LayoutSize % getAlignment(StorageType))
+    Packed = true;
+}
+
+void CGRecordLowering::accumulateFields() {
+  for (RecordDecl::field_iterator Field = D->field_begin(),
+                                  FieldEnd = D->field_end();
+    Field != FieldEnd;)
+    if (Field->isBitField()) {
+      RecordDecl::field_iterator Start = Field;
+      // Iterate to gather the list of bitfields.
+      for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
+      accumulateBitFields(Start, Field);
+    } else {
+      Members.push_back(MemberInfo(
+          bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
+          getStorageType(*Field), *Field));
+      ++Field;
+    }
+}
+
+void
+CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
+                                      RecordDecl::field_iterator FieldEnd) {
+  // Run stores the first element of the current run of bitfields.  FieldEnd is
+  // used as a special value to note that we don't have a current run.  A
+  // bitfield run is a contiguous collection of bitfields that can be stored in
+  // the same storage block.  Zero-sized bitfields and bitfields that would
+  // cross an alignment boundary break a run and start a new one.
+  RecordDecl::field_iterator Run = FieldEnd;
+  // Tail is the offset of the first bit off the end of the current run.  It's
+  // used to determine if the ASTRecordLayout is treating these two bitfields as
+  // contiguous.  StartBitOffset is offset of the beginning of the Run.
+  uint64_t StartBitOffset, Tail = 0;
+  if (useMSABI()) {
+    for (; Field != FieldEnd; ++Field) {
+      uint64_t BitOffset = getFieldBitOffset(*Field);
+      // Zero-width bitfields end runs.
+      if (Field->getBitWidthValue(Context) == 0) {
+        Run = FieldEnd;
+        continue;
+      }
+      llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
+      // If we don't have a run yet, or don't live within the previous run's
+      // allocated storage then we allocate some storage and start a new run.
+      if (Run == FieldEnd || BitOffset >= Tail) {
+        Run = Field;
+        StartBitOffset = BitOffset;
+        Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
+        // Add the storage member to the record.  This must be added to the
+        // record before the bitfield members so that it gets laid out before
+        // the bitfields it contains get laid out.
+        Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
+      }
+      // Bitfields get the offset of their storage but come afterward and remain
+      // there after a stable sort.
+      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
+                                   MemberInfo::Field, 0, *Field));
+    }
     return;
   }
+  for (;;) {
+    // Check to see if we need to start a new run.
+    if (Run == FieldEnd) {
+      // If we're out of fields, return.
+      if (Field == FieldEnd)
+        break;
+      // Any non-zero-length bitfield can start a new run.
+      if (Field->getBitWidthValue(Context) != 0) {
+        Run = Field;
+        StartBitOffset = getFieldBitOffset(*Field);
+        Tail = StartBitOffset + Field->getBitWidthValue(Context);
+      }
+      ++Field;
+      continue;
+    }
+    // Add bitfields to the run as long as they qualify.
+    if (Field != FieldEnd && Field->getBitWidthValue(Context) != 0 &&
+        Tail == getFieldBitOffset(*Field)) {
+      Tail += Field->getBitWidthValue(Context);
+      ++Field;
+      continue;
+    }
+    // We've hit a break-point in the run and need to emit a storage field.
+    llvm::Type *Type = getIntNType(Tail - StartBitOffset);
+    // Add the storage member to the record and set the bitfield info for all of
+    // the bitfields in the run.  Bitfields get the offset of their storage but
+    // come afterward and remain there after a stable sort.
+    Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
+    for (; Run != Field; ++Run)
+      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
+                                   MemberInfo::Field, 0, *Run));
+    Run = FieldEnd;
+  }
+}
 
-  if (LayoutFields(D))
+void CGRecordLowering::accumulateBases() {
+  // If we've got a primary virtual base, we need to add it with the bases.
+  if (Layout.isPrimaryBaseVirtual())
+    Members.push_back(StorageInfo(
+      CharUnits::Zero(),
+      getStorageType(Layout.getPrimaryBase())));
+  // Accumulate the non-virtual bases.
+  for (const auto &Base : RD->bases()) {
+    if (Base.isVirtual())
+      continue;
+    const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+    if (!BaseDecl->isEmpty())
+      Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
+          MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
+  }
+}
+
+void CGRecordLowering::accumulateVPtrs() {
+  if (Layout.hasOwnVFPtr())
+    Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
+        llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
+            getPointerTo()->getPointerTo()));
+  if (Layout.hasOwnVBPtr())
+    Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
+        llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
+}
+
+void CGRecordLowering::accumulateVBases() {
+  Members.push_back(MemberInfo(Layout.getNonVirtualSize(),
+                               MemberInfo::Scissor, 0, RD));
+  for (const auto &Base : RD->vbases()) {
+    const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+    if (BaseDecl->isEmpty())
+      continue;
+    CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
+    // If the vbase is a primary virtual base of some base, then it doesn't
+    // get its own storage location but instead lives inside of that base.
+    if (!useMSABI() && Context.isNearlyEmpty(BaseDecl) &&
+        !hasOwnStorage(RD, BaseDecl)) {
+      Members.push_back(MemberInfo(Offset, MemberInfo::VBase, 0, BaseDecl));
+      continue;
+    }
+    // If we've got a vtordisp, add it as a storage type.
+    if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
+      Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
+                                    getIntNType(32)));
+    Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
+                                 getStorageType(BaseDecl), BaseDecl));
+  }
+}
+
+bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
+                                     const CXXRecordDecl *Query) {
+  const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
+  if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
+    return false;
+  for (const auto &Base : Decl->bases())
+    if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
+      return false;
+  return true;
+}
+
+void CGRecordLowering::calculateZeroInit() {
+  for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
+                                               MemberEnd = Members.end();
+       IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
+    if (Member->Kind == MemberInfo::Field) {
+      if (!Member->FD || isZeroInitializable(Member->FD))
+        continue;
+      IsZeroInitializable = IsZeroInitializableAsBase = false;
+    } else if (Member->Kind == MemberInfo::Base ||
+               Member->Kind == MemberInfo::VBase) {
+      if (isZeroInitializable(Member->RD))
+        continue;
+      IsZeroInitializable = false;
+      if (Member->Kind == MemberInfo::Base)
+        IsZeroInitializableAsBase = false;
+    }
+  }
+}
+
+void CGRecordLowering::clipTailPadding() {
+  std::vector<MemberInfo>::iterator Prior = Members.begin();
+  CharUnits Tail = getSize(Prior->Data);
+  for (std::vector<MemberInfo>::iterator Member = Prior + 1,
+                                         MemberEnd = Members.end();
+       Member != MemberEnd; ++Member) {
+    // Only members with data and the scissor can cut into tail padding.
+    if (!Member->Data && Member->Kind != MemberInfo::Scissor)
+      continue;
+    if (Member->Offset < Tail) {
+      assert(Prior->Kind == MemberInfo::Field && !Prior->FD &&
+             "Only storage fields have tail padding!");
+      Prior->Data = getByteArrayType(bitsToCharUnits(llvm::RoundUpToAlignment(
+          cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
+    }
+    if (Member->Data)
+      Prior = Member;
+    Tail = Prior->Offset + getSize(Prior->Data);
+  }
+}
+
+void CGRecordLowering::determinePacked() {
+  CharUnits Alignment = CharUnits::One();
+  for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
+                                               MemberEnd = Members.end();
+       Member != MemberEnd; ++Member) {
+    if (!Member->Data)
+      continue;
+    // If any member falls at an offset that it not a multiple of its alignment,
+    // then the entire record must be packed.
+    if (Member->Offset % getAlignment(Member->Data))
+      Packed = true;
+    Alignment = std::max(Alignment, getAlignment(Member->Data));
+  }
+  // If the size of the record (the capstone's offset) is not a multiple of the
+  // record's alignment, it must be packed.
+  if (Members.back().Offset % Alignment)
+    Packed = true;
+  // Update the alignment of the sentinal.
+  if (!Packed)
+    Members.back().Data = getIntNType(Context.toBits(Alignment));
+}
+
+void CGRecordLowering::insertPadding() {
+  std::vector<std::pair<CharUnits, CharUnits> > Padding;
+  CharUnits Size = CharUnits::Zero();
+  for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
+                                               MemberEnd = Members.end();
+       Member != MemberEnd; ++Member) {
+    if (!Member->Data)
+      continue;
+    CharUnits Offset = Member->Offset;
+    assert(Offset >= Size);
+    // Insert padding if we need to.
+    if (Offset != Size.RoundUpToAlignment(Packed ? CharUnits::One() :
+                                          getAlignment(Member->Data)))
+      Padding.push_back(std::make_pair(Size, Offset - Size));
+    Size = Offset + getSize(Member->Data);
+  }
+  if (Padding.empty())
     return;
+  // Add the padding to the Members list and sort it.
+  for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
+        Pad = Padding.begin(), PadEnd = Padding.end();
+        Pad != PadEnd; ++Pad)
+    Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
+  std::stable_sort(Members.begin(), Members.end());
+}
 
-  // We weren't able to layout the struct. Try again with a packed struct
-  Packed = true;
-  LastLaidOutBase.invalidate();
-  NextFieldOffset = CharUnits::Zero();
-  FieldTypes.clear();
-  Fields.clear();
-  BitFields.clear();
-  NonVirtualBases.clear();
-  VirtualBases.clear();
-
-  LayoutFields(D);
+void CGRecordLowering::fillOutputFields() {
+  for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
+                                               MemberEnd = Members.end();
+       Member != MemberEnd; ++Member) {
+    if (Member->Data)
+      FieldTypes.push_back(Member->Data);
+    if (Member->Kind == MemberInfo::Field) {
+      if (Member->FD)
+        Fields[Member->FD] = FieldTypes.size() - 1;
+      // A field without storage must be a bitfield.
+      if (!Member->Data)
+        setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
+    } else if (Member->Kind == MemberInfo::Base)
+      NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
+    else if (Member->Kind == MemberInfo::VBase)
+      VirtualBases[Member->RD] = FieldTypes.size() - 1;
+  }
 }
 
 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
@@ -231,6 +586,9 @@
                                         uint64_t Offset, uint64_t Size,
                                         uint64_t StorageSize,
                                         uint64_t StorageAlignment) {
+  // This function is vestigial from CGRecordLayoutBuilder days but is still 
+  // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
+  // when addressed will allow for the removal of this function.
   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
   CharUnits TypeSizeInBytes =
     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
@@ -262,709 +620,33 @@
   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageAlignment);
 }
 
-/// \brief Layout the range of bitfields from BFI to BFE as contiguous storage.
-bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
-                                            unsigned &FirstFieldNo,
-                                            RecordDecl::field_iterator &FI,
-                                            RecordDecl::field_iterator FE) {
-  assert(FI != FE);
-  uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo);
-  uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
-
-  unsigned CharAlign = Types.getTarget().getCharAlign();
-  assert(FirstFieldOffset % CharAlign == 0 &&
-         "First field offset is misaligned");
-  CharUnits FirstFieldOffsetInBytes
-    = Types.getContext().toCharUnitsFromBits(FirstFieldOffset);
-
-  unsigned StorageAlignment
-    = llvm::MinAlign(Alignment.getQuantity(),
-                     FirstFieldOffsetInBytes.getQuantity());
-
-  if (FirstFieldOffset < NextFieldOffsetInBits) {
-    CharUnits FieldOffsetInCharUnits =
-      Types.getContext().toCharUnitsFromBits(FirstFieldOffset);
-
-    // Try to resize the last base field.
-    if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits))
-      llvm_unreachable("We must be able to resize the last base if we need to "
-                       "pack bits into it.");
-
-    NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
-    assert(FirstFieldOffset >= NextFieldOffsetInBits);
-  }
-
-  // Append padding if necessary.
-  AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset),
-                CharUnits::One());
-
-  // Find the last bitfield in a contiguous run of bitfields.
-  RecordDecl::field_iterator BFI = FI;
-  unsigned LastFieldNo = FirstFieldNo;
-  uint64_t NextContiguousFieldOffset = FirstFieldOffset;
-  for (RecordDecl::field_iterator FJ = FI;
-       (FJ != FE && (*FJ)->isBitField() &&
-        NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) &&
-        (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) {
-    NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext());
-    ++LastFieldNo;
-
-    // We must use packed structs for packed fields, and also unnamed bit
-    // fields since they don't affect the struct alignment.
-    if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName()))
-      return false;
-  }
-  RecordDecl::field_iterator BFE = llvm::next(FI);
-  --LastFieldNo;
-  assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields");
-  FieldDecl *LastFD = *FI;
-
-  // Find the last bitfield's offset, add its size, and round it up to the
-  // character alignment to compute the storage required.
-  uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo);
-  uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext());
-  uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset;
-  CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits(
-    llvm::RoundUpToAlignment(TotalBits, CharAlign));
-  uint64_t StorageBits = Types.getContext().toBits(StorageBytes);
-
-  // Grow the storage to encompass any known padding in the layout when doing
-  // so will make the storage a power-of-two. There are two cases when we can
-  // do this. The first is when we have a subsequent field and can widen up to
-  // its offset. The second is when the data size of the AST record layout is
-  // past the end of the current storage. The latter is true when there is tail
-  // padding on a struct and no members of a super class can be packed into it.
-  //
-  // Note that we widen the storage as much as possible here to express the
-  // maximum latitude the language provides, and rely on the backend to lower
-  // these in conjunction with shifts and masks to narrower operations where
-  // beneficial.
-  uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize());
-  if (BFE != FE)
-    // If there are more fields to be laid out, the offset at the end of the
-    // bitfield is the offset of the next field in the record.
-    EndOffset = Layout.getFieldOffset(LastFieldNo + 1);
-  assert(EndOffset >= (FirstFieldOffset + TotalBits) &&
-         "End offset is not past the end of the known storage bits.");
-  uint64_t SpaceBits = EndOffset - FirstFieldOffset;
-  uint64_t LongBits = Types.getTarget().getLongWidth();
-  uint64_t WidenedBits = (StorageBits / LongBits) * LongBits +
-                         llvm::NextPowerOf2(StorageBits % LongBits - 1);
-  assert(WidenedBits >= StorageBits && "Widening shrunk the bits!");
-  if (WidenedBits <= SpaceBits) {
-    StorageBits = WidenedBits;
-    StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits);
-    assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes));
-  }
-
-  unsigned FieldIndex = FieldTypes.size();
-  AppendBytes(StorageBytes);
-
-  // Now walk the bitfields associating them with this field of storage and
-  // building up the bitfield specific info.
-  unsigned FieldNo = FirstFieldNo;
-  for (; BFI != BFE; ++BFI, ++FieldNo) {
-    FieldDecl *FD = *BFI;
-    uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset;
-    uint64_t FieldSize = FD->getBitWidthValue(Types.getContext());
-    Fields[FD] = FieldIndex;
-    BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize,
-                                             StorageBits, StorageAlignment);
-  }
-  FirstFieldNo = LastFieldNo;
-  return true;
-}
-
-bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
-                                        uint64_t fieldOffset) {
-  // If the field is packed, then we need a packed struct.
-  if (!Packed && D->hasAttr<PackedAttr>())
-    return false;
-
-  assert(!D->isBitField() && "Bitfields should be laid out seperately.");
-
-  CheckZeroInitializable(D->getType());
-
-  assert(fieldOffset % Types.getTarget().getCharWidth() == 0
-         && "field offset is not on a byte boundary!");
-  CharUnits fieldOffsetInBytes
-    = Types.getContext().toCharUnitsFromBits(fieldOffset);
-
-  llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
-  CharUnits typeAlignment = getTypeAlignment(Ty);
-
-  // If the type alignment is larger then the struct alignment, we must use
-  // a packed struct.
-  if (typeAlignment > Alignment) {
-    assert(!Packed && "Alignment is wrong even with packed struct!");
-    return false;
-  }
-
-  if (!Packed) {
-    if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
-      const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
-      if (const MaxFieldAlignmentAttr *MFAA =
-            RD->getAttr<MaxFieldAlignmentAttr>()) {
-        if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
-          return false;
-      }
-    }
-  }
-
-  // Round up the field offset to the alignment of the field type.
-  CharUnits alignedNextFieldOffsetInBytes =
-    NextFieldOffset.RoundUpToAlignment(typeAlignment);
-
-  if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
-    // Try to resize the last base field.
-    if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
-      alignedNextFieldOffsetInBytes = 
-        NextFieldOffset.RoundUpToAlignment(typeAlignment);
-    }
-  }
-
-  if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
-    assert(!Packed && "Could not place field even with packed struct!");
-    return false;
-  }
-
-  AppendPadding(fieldOffsetInBytes, typeAlignment);
-
-  // Now append the field.
-  Fields[D] = FieldTypes.size();
-  AppendField(fieldOffsetInBytes, Ty);
-
-  LastLaidOutBase.invalidate();
-  return true;
-}
-
-llvm::Type *
-CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
-                                        const ASTRecordLayout &Layout) {
-  Fields[Field] = 0;
-  if (Field->isBitField()) {
-    uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
-
-    // Ignore zero sized bit fields.
-    if (FieldSize == 0)
-      return 0;
-
-    unsigned StorageBits = llvm::RoundUpToAlignment(
-      FieldSize, Types.getTarget().getCharAlign());
-    CharUnits NumBytesToAppend
-      = Types.getContext().toCharUnitsFromBits(StorageBits);
-
-    llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
-    if (NumBytesToAppend > CharUnits::One())
-      FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
-
-    // Add the bit field info.
-    BitFields[Field] = CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize,
-                                                StorageBits,
-                                                Alignment.getQuantity());
-    return FieldTy;
-  }
-
-  // This is a regular union field.
-  return Types.ConvertTypeForMem(Field->getType());
-}
-
-void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
-  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
-
-  const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
-
-  llvm::Type *unionType = 0;
-  CharUnits unionSize = CharUnits::Zero();
-  CharUnits unionAlign = CharUnits::Zero();
-
-  bool hasOnlyZeroSizedBitFields = true;
-  bool checkedFirstFieldZeroInit = false;
-
-  unsigned fieldNo = 0;
-  for (RecordDecl::field_iterator field = D->field_begin(),
-       fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
-    assert(layout.getFieldOffset(fieldNo) == 0 &&
-          "Union field offset did not start at the beginning of record!");
-    llvm::Type *fieldType = LayoutUnionField(*field, layout);
-
-    if (!fieldType)
-      continue;
-
-    if (field->getDeclName() && !checkedFirstFieldZeroInit) {
-      CheckZeroInitializable(field->getType());
-      checkedFirstFieldZeroInit = true;
-    }
-
-    hasOnlyZeroSizedBitFields = false;
-
-    CharUnits fieldAlign = CharUnits::fromQuantity(
-                          Types.getDataLayout().getABITypeAlignment(fieldType));
-    CharUnits fieldSize = CharUnits::fromQuantity(
-                             Types.getDataLayout().getTypeAllocSize(fieldType));
-
-    if (fieldAlign < unionAlign)
-      continue;
-
-    if (fieldAlign > unionAlign || fieldSize > unionSize) {
-      unionType = fieldType;
-      unionAlign = fieldAlign;
-      unionSize = fieldSize;
-    }
-  }
-
-  // Now add our field.
-  if (unionType) {
-    AppendField(CharUnits::Zero(), unionType);
-
-    if (getTypeAlignment(unionType) > layout.getAlignment()) {
-      // We need a packed struct.
-      Packed = true;
-      unionAlign = CharUnits::One();
-    }
-  }
-  if (unionAlign.isZero()) {
-    (void)hasOnlyZeroSizedBitFields;
-    assert(hasOnlyZeroSizedBitFields &&
-           "0-align record did not have all zero-sized bit-fields!");
-    unionAlign = CharUnits::One();
-  }
-
-  // Append tail padding.
-  CharUnits recordSize = layout.getSize();
-  if (recordSize > unionSize)
-    AppendPadding(recordSize, unionAlign);
-}
-
-bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
-                                       const CGRecordLayout &baseLayout,
-                                       CharUnits baseOffset) {
-  ResizeLastBaseFieldIfNecessary(baseOffset);
-
-  AppendPadding(baseOffset, CharUnits::One());
-
-  const ASTRecordLayout &baseASTLayout
-    = Types.getContext().getASTRecordLayout(base);
-
-  LastLaidOutBase.Offset = NextFieldOffset;
-  LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
-
-  llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
-  if (getTypeAlignment(subobjectType) > Alignment)
-    return false;
-
-  AppendField(baseOffset, subobjectType);
-  return true;
-}
-
-bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
-                                                 CharUnits baseOffset) {
-  // Ignore empty bases.
-  if (base->isEmpty()) return true;
-
-  const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
-  if (IsZeroInitializableAsBase) {
-    assert(IsZeroInitializable &&
-           "class zero-initializable as base but not as complete object");
-
-    IsZeroInitializable = IsZeroInitializableAsBase =
-      baseLayout.isZeroInitializableAsBase();
-  }
-
-  if (!LayoutBase(base, baseLayout, baseOffset))
-    return false;
-  NonVirtualBases[base] = (FieldTypes.size() - 1);
-  return true;
-}
-
-bool
-CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
-                                         CharUnits baseOffset) {
-  // Ignore empty bases.
-  if (base->isEmpty()) return true;
-
-  const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
-  if (IsZeroInitializable)
-    IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
-
-  if (!LayoutBase(base, baseLayout, baseOffset))
-    return false;
-  VirtualBases[base] = (FieldTypes.size() - 1);
-  return true;
-}
-
-bool
-CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
-                                          const ASTRecordLayout &Layout) {
-  if (!RD->getNumVBases())
-    return true;
-
-  // The vbases list is uniqued and ordered by a depth-first
-  // traversal, which is what we need here.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
-        E = RD->vbases_end(); I != E; ++I) {
-
-    const CXXRecordDecl *BaseDecl = 
-      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
-
-    CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
-    if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
-      return false;
-  }
-  return true;
-}
-
-/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
-bool
-CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
-                                          const ASTRecordLayout &Layout) {
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *BaseDecl = 
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
-    // We only want to lay out virtual bases that aren't indirect primary bases
-    // of some other base.
-    if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
-      // Only lay out the base once.
-      if (!LaidOutVirtualBases.insert(BaseDecl))
-        continue;
-
-      CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
-      if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
-        return false;
-    }
-
-    if (!BaseDecl->getNumVBases()) {
-      // This base isn't interesting since it doesn't have any virtual bases.
-      continue;
-    }
-    
-    if (!LayoutVirtualBases(BaseDecl, Layout))
-      return false;
-  }
-  return true;
-}
-
-bool
-CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
-                                             const ASTRecordLayout &Layout) {
-  const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
-  // If we have a primary base, lay it out first.
-  if (PrimaryBase) {
-    if (!Layout.isPrimaryBaseVirtual()) {
-      if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
-        return false;
-    } else {
-      if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
-        return false;
-    }
-
-  // Otherwise, add a vtable / vf-table if the layout says to do so.
-  } else if (Layout.hasOwnVFPtr()) {
-    llvm::Type *FunctionType =
-      llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
-                              /*isVarArg=*/true);
-    llvm::Type *VTableTy = FunctionType->getPointerTo();
-
-    if (getTypeAlignment(VTableTy) > Alignment) {
-      // FIXME: Should we allow this to happen in Sema?
-      assert(!Packed && "Alignment is wrong even with packed struct!");
-      return false;
-    }
-
-    assert(NextFieldOffset.isZero() &&
-           "VTable pointer must come first!");
-    AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
-  }
-
-  // Layout the non-virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    if (I->isVirtual())
-      continue;
-
-    const CXXRecordDecl *BaseDecl = 
-      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
-    // We've already laid out the primary base.
-    if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
-      continue;
-
-    if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
-      return false;
-  }
-
-  // Add a vb-table pointer if the layout insists.
-    if (Layout.hasOwnVBPtr()) {
-    CharUnits VBPtrOffset = Layout.getVBPtrOffset();
-    llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
-    AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
-    AppendField(VBPtrOffset, Vbptr);
-  }
-
-  return true;
-}
-
-bool
-CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
-  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
-
-  CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
-  CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
-  CharUnits AlignedNonVirtualTypeSize =
-    NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
-  
-  // First check if we can use the same fields as for the complete class.
-  CharUnits RecordSize = Layout.getSize();
-  if (AlignedNonVirtualTypeSize == RecordSize)
-    return true;
-
-  // Check if we need padding.
-  CharUnits AlignedNextFieldOffset =
-    NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
-
-  if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
-    assert(!Packed && "cannot layout even as packed struct");
-    return false; // Needs packing.
-  }
-
-  bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
-  if (needsPadding) {
-    CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
-    FieldTypes.push_back(getByteArrayType(NumBytes));
-  }
-  
-  BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
-                                               FieldTypes, "", Packed);
-  Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
-
-  // Pull the padding back off.
-  if (needsPadding)
-    FieldTypes.pop_back();
-
-  return true;
-}
-
-bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
-  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
-  assert(!Alignment.isZero() && "Did not set alignment!");
-
-  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
-
-  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
-  if (RD)
-    if (!LayoutNonVirtualBases(RD, Layout))
-      return false;
-
-  unsigned FieldNo = 0;
-  
-  for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end();
-       FI != FE; ++FI, ++FieldNo) {
-    FieldDecl *FD = *FI;
-
-    // If this field is a bitfield, layout all of the consecutive
-    // non-zero-length bitfields and the last zero-length bitfield; these will
-    // all share storage.
-    if (FD->isBitField()) {
-      // If all we have is a zero-width bitfield, skip it.
-      if (FD->getBitWidthValue(Types.getContext()) == 0)
-        continue;
-
-      // Layout this range of bitfields.
-      if (!LayoutBitfields(Layout, FieldNo, FI, FE)) {
-        assert(!Packed &&
-               "Could not layout bitfields even with a packed LLVM struct!");
-        return false;
-      }
-      assert(FI != FE && "Advanced past the last bitfield");
-      continue;
-    }
-
-    if (!LayoutField(FD, Layout.getFieldOffset(FieldNo))) {
-      assert(!Packed &&
-             "Could not layout fields even with a packed LLVM struct!");
-      return false;
-    }
-  }
-
-  if (RD) {
-    // We've laid out the non-virtual bases and the fields, now compute the
-    // non-virtual base field types.
-    if (!ComputeNonVirtualBaseType(RD)) {
-      assert(!Packed && "Could not layout even with a packed LLVM struct!");
-      return false;
-    }
-
-    // Lay out the virtual bases.  The MS ABI uses a different
-    // algorithm here due to the lack of primary virtual bases.
-    if (Types.getTarget().getCXXABI().hasPrimaryVBases()) {
-      RD->getIndirectPrimaryBases(IndirectPrimaryBases);
-      if (Layout.isPrimaryBaseVirtual())
-        IndirectPrimaryBases.insert(Layout.getPrimaryBase());
-
-      if (!LayoutVirtualBases(RD, Layout))
-        return false;
-    } else {
-      if (!MSLayoutVirtualBases(RD, Layout))
-        return false;
-    }
-  }
-  
-  // Append tail padding if necessary.
-  AppendTailPadding(Layout.getSize());
-
-  return true;
-}
-
-void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
-  ResizeLastBaseFieldIfNecessary(RecordSize);
-
-  assert(NextFieldOffset <= RecordSize && "Size mismatch!");
-
-  CharUnits AlignedNextFieldOffset =
-    NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
-
-  if (AlignedNextFieldOffset == RecordSize) {
-    // We don't need any padding.
-    return;
-  }
-
-  CharUnits NumPadBytes = RecordSize - NextFieldOffset;
-  AppendBytes(NumPadBytes);
-}
-
-void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
-                                        llvm::Type *fieldType) {
-  CharUnits fieldSize =
-    CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(fieldType));
-
-  FieldTypes.push_back(fieldType);
-
-  NextFieldOffset = fieldOffset + fieldSize;
-}
-
-void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
-                                          CharUnits fieldAlignment) {
-  assert(NextFieldOffset <= fieldOffset &&
-         "Incorrect field layout!");
-
-  // Do nothing if we're already at the right offset.
-  if (fieldOffset == NextFieldOffset) return;
-
-  // If we're not emitting a packed LLVM type, try to avoid adding
-  // unnecessary padding fields.
-  if (!Packed) {
-    // Round up the field offset to the alignment of the field type.
-    CharUnits alignedNextFieldOffset =
-      NextFieldOffset.RoundUpToAlignment(fieldAlignment);
-    assert(alignedNextFieldOffset <= fieldOffset);
-
-    // If that's the right offset, we're done.
-    if (alignedNextFieldOffset == fieldOffset) return;
-  }
-
-  // Otherwise we need explicit padding.
-  CharUnits padding = fieldOffset - NextFieldOffset;
-  AppendBytes(padding);
-}
-
-bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
-  // Check if we have a base to resize.
-  if (!LastLaidOutBase.isValid())
-    return false;
-
-  // This offset does not overlap with the tail padding.
-  if (offset >= NextFieldOffset)
-    return false;
-
-  // Restore the field offset and append an i8 array instead.
-  FieldTypes.pop_back();
-  NextFieldOffset = LastLaidOutBase.Offset;
-  AppendBytes(LastLaidOutBase.NonVirtualSize);
-  LastLaidOutBase.invalidate();
-
-  return true;
-}
-
-llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
-  assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
-
-  llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
-  if (numBytes > CharUnits::One())
-    Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
-
-  return Ty;
-}
-
-void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
-  if (numBytes.isZero())
-    return;
-
-  // Append the padding field
-  AppendField(NextFieldOffset, getByteArrayType(numBytes));
-}
-
-CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
-  if (Packed)
-    return CharUnits::One();
-
-  return CharUnits::fromQuantity(Types.getDataLayout().getABITypeAlignment(Ty));
-}
-
-CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
-  if (Packed)
-    return CharUnits::One();
-
-  CharUnits maxAlignment = CharUnits::One();
-  for (size_t i = 0; i != FieldTypes.size(); ++i)
-    maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
-
-  return maxAlignment;
-}
-
-/// Merge in whether a field of the given type is zero-initializable.
-void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
-  // This record already contains a member pointer.
-  if (!IsZeroInitializableAsBase)
-    return;
-
-  // Can only have member pointers if we're compiling C++.
-  if (!Types.getContext().getLangOpts().CPlusPlus)
-    return;
-
-  const Type *elementType = T->getBaseElementTypeUnsafe();
-
-  if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
-    if (!Types.getCXXABI().isZeroInitializable(MPT))
-      IsZeroInitializable = IsZeroInitializableAsBase = false;
-  } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
-    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-    const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
-    if (!Layout.isZeroInitializable())
-      IsZeroInitializable = IsZeroInitializableAsBase = false;
-  }
-}
-
 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
                                                   llvm::StructType *Ty) {
-  CGRecordLayoutBuilder Builder(*this);
+  CGRecordLowering Builder(*this, D);
 
-  Builder.Layout(D);
-
-  Ty->setBody(Builder.FieldTypes, Builder.Packed);
+  Builder.lower(false);
 
   // If we're in C++, compute the base subobject type.
   llvm::StructType *BaseTy = 0;
-  if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
-    BaseTy = Builder.BaseSubobjectType;
-    if (!BaseTy) BaseTy = Ty;
+  if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
+    BaseTy = Ty;
+    if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
+      CGRecordLowering BaseBuilder(*this, D);
+      BaseBuilder.lower(true);
+      BaseTy = llvm::StructType::create(
+          getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
+      addRecordTypeName(D, BaseTy, ".base");
+    }
   }
 
+  // Fill in the struct *after* computing the base type.  Filling in the body
+  // signifies that the type is no longer opaque and record layout is complete,
+  // but we may need to recursively layout D while laying D out as a base type.
+  Ty->setBody(Builder.FieldTypes, Builder.Packed);
+
   CGRecordLayout *RL =
     new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
-                       Builder.IsZeroInitializableAsBase);
+                        Builder.IsZeroInitializableAsBase);
 
   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
@@ -994,12 +676,9 @@
 
   if (BaseTy) {
     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
-    CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
-    CharUnits AlignedNonVirtualTypeSize = 
-      NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
 
     uint64_t AlignedNonVirtualTypeSizeInBits = 
-      getContext().toBits(AlignedNonVirtualTypeSize);
+      getContext().toBits(NonVirtualSize);
 
     assert(AlignedNonVirtualTypeSizeInBits == 
            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 0bc51dd..cba9e6c 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -16,14 +16,14 @@
 #include "CodeGenModule.h"
 #include "TargetInfo.h"
 #include "clang/AST/StmtVisitor.h"
-#include "clang/Sema/SemaDiagnostic.h"
 #include "clang/Basic/PrettyStackTrace.h"
 #include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaDiagnostic.h"
 #include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/InlineAsm.h"
 #include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/CallSite.h"
 using namespace clang;
 using namespace CodeGen;
 
@@ -43,6 +43,7 @@
 
 void CodeGenFunction::EmitStmt(const Stmt *S) {
   assert(S && "Null statement?");
+  PGO.setCurrentStmt(S);
 
   // These statements have their own debug info handling.
   if (EmitSimpleStmt(S))
@@ -76,6 +77,7 @@
   case Stmt::SEHFinallyStmtClass:
   case Stmt::MSDependentExistsStmtClass:
   case Stmt::OMPParallelDirectiveClass:
+  case Stmt::OMPSimdDirectiveClass:
     llvm_unreachable("invalid statement class to emit generically");
   case Stmt::NullStmtClass:
   case Stmt::CompoundStmtClass:
@@ -242,7 +244,7 @@
       EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
                        /*IsInit*/false);
     }
-      
+
   }
 
   return RetAlloca;
@@ -309,9 +311,8 @@
 
 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
   bool inserted = false;
-  for (llvm::BasicBlock::use_iterator
-         i = block->use_begin(), e = block->use_end(); i != e; ++i) {
-    if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(*i)) {
+  for (llvm::User *u : block->users()) {
+    if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
       CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
       inserted = true;
       break;
@@ -358,7 +359,9 @@
     ResolveBranchFixups(Dest.getBlock());
   }
 
+  RegionCounter Cnt = getPGORegionCounter(D->getStmt());
   EmitBlock(Dest.getBlock());
+  Cnt.beginRegion(Builder);
 }
 
 /// Change the cleanup scope of the labels in this lexical scope to
@@ -430,6 +433,7 @@
   // C99 6.8.4.1: The first substatement is executed if the expression compares
   // unequal to 0.  The condition must be a scalar type.
   LexicalScope ConditionScope(*this, S.getSourceRange());
+  RegionCounter Cnt = getPGORegionCounter(&S);
 
   if (S.getConditionVariable())
     EmitAutoVarDecl(*S.getConditionVariable());
@@ -447,6 +451,8 @@
     // If the skipped block has no labels in it, just emit the executed block.
     // This avoids emitting dead code and simplifies the CFG substantially.
     if (!ContainsLabel(Skipped)) {
+      if (CondConstant)
+        Cnt.beginRegion(Builder);
       if (Executed) {
         RunCleanupsScope ExecutedScope(*this);
         EmitStmt(Executed);
@@ -462,10 +468,12 @@
   llvm::BasicBlock *ElseBlock = ContBlock;
   if (S.getElse())
     ElseBlock = createBasicBlock("if.else");
-  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
+
+  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Cnt.getCount());
 
   // Emit the 'then' code.
-  EmitBlock(ThenBlock); 
+  EmitBlock(ThenBlock);
+  Cnt.beginRegion(Builder);
   {
     RunCleanupsScope ThenScope(*this);
     EmitStmt(S.getThen());
@@ -493,6 +501,8 @@
 }
 
 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
+  RegionCounter Cnt = getPGORegionCounter(&S);
+
   // Emit the header for the loop, which will also become
   // the continue target.
   JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
@@ -535,8 +545,8 @@
     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
     if (ConditionScope.requiresCleanups())
       ExitBlock = createBasicBlock("while.exit");
-
-    Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+    Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock,
+                         PGO.createLoopWeights(S.getCond(), Cnt));
 
     if (ExitBlock != LoopExit.getBlock()) {
       EmitBlock(ExitBlock);
@@ -549,6 +559,7 @@
   {
     RunCleanupsScope BodyScope(*this);
     EmitBlock(LoopBody);
+    Cnt.beginRegion(Builder);
     EmitStmt(S.getBody());
   }
 
@@ -573,19 +584,19 @@
   JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
   JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
 
+  RegionCounter Cnt = getPGORegionCounter(&S);
+
   // Store the blocks to use for break and continue.
   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
 
   // Emit the body of the loop.
   llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
-  EmitBlock(LoopBody);
+  EmitBlockWithFallThrough(LoopBody, Cnt);
   {
     RunCleanupsScope BodyScope(*this);
     EmitStmt(S.getBody());
   }
 
-  BreakContinueStack.pop_back();
-
   EmitBlock(LoopCond.getBlock());
 
   // C99 6.8.5.2: "The evaluation of the controlling expression takes place
@@ -596,6 +607,8 @@
   // compares unequal to 0.  The condition must be a scalar type.
   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
 
+  BreakContinueStack.pop_back();
+
   // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
   // to correctly handle break/continue though.
   bool EmitBoolCondBranch = true;
@@ -605,7 +618,8 @@
 
   // As long as the condition is true, iterate the loop.
   if (EmitBoolCondBranch)
-    Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock());
+    Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock(),
+                         PGO.createLoopWeights(S.getCond(), Cnt));
 
   // Emit the exit block.
   EmitBlock(LoopExit.getBlock());
@@ -629,6 +643,8 @@
   if (S.getInit())
     EmitStmt(S.getInit());
 
+  RegionCounter Cnt = getPGORegionCounter(&S);
+
   // Start the loop with a block that tests the condition.
   // If there's an increment, the continue scope will be overwritten
   // later.
@@ -636,6 +652,16 @@
   llvm::BasicBlock *CondBlock = Continue.getBlock();
   EmitBlock(CondBlock);
 
+  // If the for loop doesn't have an increment we can just use the
+  // condition as the continue block.  Otherwise we'll need to create
+  // a block for it (in the current scope, i.e. in the scope of the
+  // condition), and that we will become our continue block.
+  if (S.getInc())
+    Continue = getJumpDestInCurrentScope("for.inc");
+
+  // Store the blocks to use for break and continue.
+  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
   // Create a cleanup scope for the condition variable cleanups.
   RunCleanupsScope ConditionScope(*this);
 
@@ -657,7 +683,9 @@
 
     // C99 6.8.5p2/p4: The first substatement is executed if the expression
     // compares unequal to 0.  The condition must be a scalar type.
-    EmitBranchOnBoolExpr(S.getCond(), ForBody, ExitBlock);
+    llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+    Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock,
+                         PGO.createLoopWeights(S.getCond(), Cnt));
 
     if (ExitBlock != LoopExit.getBlock()) {
       EmitBlock(ExitBlock);
@@ -669,16 +697,7 @@
     // Treat it as a non-zero constant.  Don't even create a new block for the
     // body, just fall into it.
   }
-
-  // If the for loop doesn't have an increment we can just use the
-  // condition as the continue block.  Otherwise we'll need to create
-  // a block for it (in the current scope, i.e. in the scope of the
-  // condition), and that we will become our continue block.
-  if (S.getInc())
-    Continue = getJumpDestInCurrentScope("for.inc");
-
-  // Store the blocks to use for break and continue.
-  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+  Cnt.beginRegion(Builder);
 
   {
     // Create a separate cleanup scope for the body, in case it is not
@@ -720,6 +739,8 @@
   EmitStmt(S.getRangeStmt());
   EmitStmt(S.getBeginEndStmt());
 
+  RegionCounter Cnt = getPGORegionCounter(&S);
+
   // Start the loop with a block that tests the condition.
   // If there's an increment, the continue scope will be overwritten
   // later.
@@ -737,7 +758,9 @@
 
   // The body is executed if the expression, contextually converted
   // to bool, is true.
-  EmitBranchOnBoolExpr(S.getCond(), ForBody, ExitBlock);
+  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+  Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock,
+                       PGO.createLoopWeights(S.getCond(), Cnt));
 
   if (ExitBlock != LoopExit.getBlock()) {
     EmitBlock(ExitBlock);
@@ -745,6 +768,7 @@
   }
 
   EmitBlock(ForBody);
+  Cnt.beginRegion(Builder);
 
   // Create a block for the increment. In case of a 'continue', we jump there.
   JumpDest Continue = getJumpDestInCurrentScope("for.inc");
@@ -809,7 +833,8 @@
 
   // FIXME: Clean this up by using an LValue for ReturnTemp,
   // EmitStoreThroughLValue, and EmitAnyExpr.
-  if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
+  if (getLangOpts().ElideConstructors &&
+      S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
     // Apply the named return value optimization for this return statement,
     // which means doing nothing: the appropriate result has already been
     // constructed into the NRVO variable.
@@ -818,7 +843,7 @@
     // that the cleanup code should not destroy the variable.
     if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
       Builder.CreateStore(Builder.getTrue(), NRVOFlag);
-  } else if (!ReturnValue) {
+  } else if (!ReturnValue || (RV && RV->getType()->isVoidType())) {
     // Make sure not to return anything, but evaluate the expression
     // for side effects.
     if (RV)
@@ -866,9 +891,8 @@
   if (HaveInsertPoint())
     EmitStopPoint(&S);
 
-  for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
-       I != E; ++I)
-    EmitDecl(**I);
+  for (const auto *I : S.decls())
+    EmitDecl(*I);
 }
 
 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
@@ -880,8 +904,7 @@
   if (HaveInsertPoint())
     EmitStopPoint(&S);
 
-  JumpDest Block = BreakContinueStack.back().BreakBlock;
-  EmitBranchThroughCleanup(Block);
+  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
 }
 
 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
@@ -893,8 +916,7 @@
   if (HaveInsertPoint())
     EmitStopPoint(&S);
 
-  JumpDest Block = BreakContinueStack.back().ContinueBlock;
-  EmitBranchThroughCleanup(Block);
+  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
 }
 
 /// EmitCaseStmtRange - If case statement range is not too big then
@@ -906,11 +928,13 @@
   llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
   llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
 
+  RegionCounter CaseCnt = getPGORegionCounter(&S);
+
   // Emit the code for this case. We do this first to make sure it is
   // properly chained from our predecessor before generating the
   // switch machinery to enter this block.
-  EmitBlock(createBasicBlock("sw.bb"));
-  llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
+  EmitBlockWithFallThrough(CaseDest, CaseCnt);
   EmitStmt(S.getSubStmt());
 
   // If range is empty, do nothing.
@@ -921,7 +945,18 @@
   // FIXME: parameters such as this should not be hardcoded.
   if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
     // Range is small enough to add multiple switch instruction cases.
-    for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
+    uint64_t Total = CaseCnt.getCount();
+    unsigned NCases = Range.getZExtValue() + 1;
+    // We only have one region counter for the entire set of cases here, so we
+    // need to divide the weights evenly between the generated cases, ensuring
+    // that the total weight is preserved. E.g., a weight of 5 over three cases
+    // will be distributed as weights of 2, 2, and 1.
+    uint64_t Weight = Total / NCases, Rem = Total % NCases;
+    for (unsigned I = 0; I != NCases; ++I) {
+      if (SwitchWeights)
+        SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
+      if (Rem)
+        Rem--;
       SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
       LHS++;
     }
@@ -946,7 +981,19 @@
     Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
   llvm::Value *Cond =
     Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
-  Builder.CreateCondBr(Cond, CaseDest, FalseDest);
+
+  llvm::MDNode *Weights = 0;
+  if (SwitchWeights) {
+    uint64_t ThisCount = CaseCnt.getCount();
+    uint64_t DefaultCount = (*SwitchWeights)[0];
+    Weights = PGO.createBranchWeights(ThisCount, DefaultCount);
+
+    // Since we're chaining the switch default through each large case range, we
+    // need to update the weight for the default, ie, the first case, to include
+    // this case.
+    (*SwitchWeights)[0] += ThisCount;
+  }
+  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
 
   // Restore the appropriate insertion point.
   if (RestoreBB)
@@ -959,7 +1006,7 @@
   // If there is no enclosing switch instance that we're aware of, then this
   // case statement and its block can be elided.  This situation only happens
   // when we've constant-folded the switch, are emitting the constant case,
-  // and part of the constant case includes another case statement.  For 
+  // and part of the constant case includes another case statement.  For
   // instance: switch (4) { case 4: do { case 5: } while (1); }
   if (!SwitchInsn) {
     EmitStmt(S.getSubStmt());
@@ -972,17 +1019,22 @@
     return;
   }
 
+  RegionCounter CaseCnt = getPGORegionCounter(&S);
   llvm::ConstantInt *CaseVal =
     Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
 
-  // If the body of the case is just a 'break', and if there was no fallthrough,
-  // try to not emit an empty block.
-  if ((CGM.getCodeGenOpts().OptimizationLevel > 0) &&
+  // If the body of the case is just a 'break', try to not emit an empty block.
+  // If we're profiling or we're not optimizing, leave the block in for better
+  // debug and coverage analysis.
+  if (!CGM.getCodeGenOpts().ProfileInstrGenerate &&
+      CGM.getCodeGenOpts().OptimizationLevel > 0 &&
       isa<BreakStmt>(S.getSubStmt())) {
     JumpDest Block = BreakContinueStack.back().BreakBlock;
 
     // Only do this optimization if there are no cleanups that need emitting.
     if (isObviouslyBranchWithoutCleanups(Block)) {
+      if (SwitchWeights)
+        SwitchWeights->push_back(CaseCnt.getCount());
       SwitchInsn->addCase(CaseVal, Block.getBlock());
 
       // If there was a fallthrough into this case, make sure to redirect it to
@@ -995,8 +1047,10 @@
     }
   }
 
-  EmitBlock(createBasicBlock("sw.bb"));
-  llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
+  EmitBlockWithFallThrough(CaseDest, CaseCnt);
+  if (SwitchWeights)
+    SwitchWeights->push_back(CaseCnt.getCount());
   SwitchInsn->addCase(CaseVal, CaseDest);
 
   // Recursively emitting the statement is acceptable, but is not wonderful for
@@ -1014,8 +1068,17 @@
   // Otherwise, iteratively add consecutive cases to this switch stmt.
   while (NextCase && NextCase->getRHS() == 0) {
     CurCase = NextCase;
-    llvm::ConstantInt *CaseVal = 
+    llvm::ConstantInt *CaseVal =
       Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
+
+    CaseCnt = getPGORegionCounter(NextCase);
+    if (SwitchWeights)
+      SwitchWeights->push_back(CaseCnt.getCount());
+    if (CGM.getCodeGenOpts().ProfileInstrGenerate) {
+      CaseDest = createBasicBlock("sw.bb");
+      EmitBlockWithFallThrough(CaseDest, CaseCnt);
+    }
+
     SwitchInsn->addCase(CaseVal, CaseDest);
     NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
   }
@@ -1028,7 +1091,10 @@
   llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
   assert(DefaultBlock->empty() &&
          "EmitDefaultStmt: Default block already defined?");
-  EmitBlock(DefaultBlock);
+
+  RegionCounter Cnt = getPGORegionCounter(&S);
+  EmitBlockWithFallThrough(DefaultBlock, Cnt);
+
   EmitStmt(S.getSubStmt());
 }
 
@@ -1185,7 +1251,8 @@
 static bool FindCaseStatementsForValue(const SwitchStmt &S,
                                        const llvm::APSInt &ConstantCondValue,
                                 SmallVectorImpl<const Stmt*> &ResultStmts,
-                                       ASTContext &C) {
+                                       ASTContext &C,
+                                       const SwitchCase *&ResultCase) {
   // First step, find the switch case that is being branched to.  We can do this
   // efficiently by scanning the SwitchCase list.
   const SwitchCase *Case = S.getSwitchCaseList();
@@ -1228,6 +1295,7 @@
   //   while (1) {
   //     case 4: ...
   bool FoundCase = false;
+  ResultCase = Case;
   return CollectStatementsForCase(S.getBody(), Case, FoundCase,
                                   ResultStmts) != CSFC_Failure &&
          FoundCase;
@@ -1243,6 +1311,7 @@
 
   // Handle nested switch statements.
   llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
   llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
 
   // See if we can constant fold the condition of the switch and therefore only
@@ -1250,8 +1319,13 @@
   llvm::APSInt ConstantCondValue;
   if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
     SmallVector<const Stmt*, 4> CaseStmts;
+    const SwitchCase *Case = 0;
     if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
-                                   getContext())) {
+                                   getContext(), Case)) {
+      if (Case) {
+        RegionCounter CaseCnt = getPGORegionCounter(Case);
+        CaseCnt.beginRegion(Builder);
+      }
       RunCleanupsScope ExecutedScope(*this);
 
       // At this point, we are no longer "within" a switch instance, so
@@ -1263,6 +1337,8 @@
       // specified series of statements and we're good.
       for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
         EmitStmt(CaseStmts[i]);
+      RegionCounter ExitCnt = getPGORegionCounter(&S);
+      ExitCnt.beginRegion(Builder);
 
       // Now we want to restore the saved switch instance so that nested
       // switches continue to function properly
@@ -1280,12 +1356,29 @@
   // failure.
   llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
   SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+  if (PGO.haveRegionCounts()) {
+    // Walk the SwitchCase list to find how many there are.
+    uint64_t DefaultCount = 0;
+    unsigned NumCases = 0;
+    for (const SwitchCase *Case = S.getSwitchCaseList();
+         Case;
+         Case = Case->getNextSwitchCase()) {
+      if (isa<DefaultStmt>(Case))
+        DefaultCount = getPGORegionCounter(Case).getCount();
+      NumCases += 1;
+    }
+    SwitchWeights = new SmallVector<uint64_t, 16>();
+    SwitchWeights->reserve(NumCases);
+    // The default needs to be first. We store the edge count, so we already
+    // know the right weight.
+    SwitchWeights->push_back(DefaultCount);
+  }
   CaseRangeBlock = DefaultBlock;
 
   // Clear the insertion point to indicate we are in unreachable code.
   Builder.ClearInsertionPoint();
 
-  // All break statements jump to NextBlock. If BreakContinueStack is non empty
+  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
   // then reuse last ContinueBlock.
   JumpDest OuterContinue;
   if (!BreakContinueStack.empty())
@@ -1320,8 +1413,20 @@
 
   // Emit continuation.
   EmitBlock(SwitchExit.getBlock(), true);
+  RegionCounter ExitCnt = getPGORegionCounter(&S);
+  ExitCnt.beginRegion(Builder);
 
+  if (SwitchWeights) {
+    assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
+           "switch weights do not match switch cases");
+    // If there's only one jump destination there's no sense weighting it.
+    if (SwitchWeights->size() > 1)
+      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
+                              PGO.createBranchWeights(*SwitchWeights));
+    delete SwitchWeights;
+  }
   SwitchInsn = SavedSwitchInsn;
+  SwitchWeights = SavedSwitchWeights;
   CaseRangeBlock = SavedCRBlock;
 }
 
@@ -1493,7 +1598,7 @@
       Name = GAS->getOutputName(i);
     TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
     bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
-    assert(IsValid && "Failed to parse output constraint"); 
+    assert(IsValid && "Failed to parse output constraint");
     OutputConstraintInfos.push_back(Info);
   }
 
@@ -1829,8 +1934,8 @@
   // Create the function declaration.
   FunctionType::ExtInfo ExtInfo;
   const CGFunctionInfo &FuncInfo =
-    CGM.getTypes().arrangeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
-                                              /*IsVariadic=*/false);
+      CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
+                                                    /*IsVariadic=*/false);
   llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
 
   llvm::Function *F =
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index bfff470..bd280ea 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -66,7 +66,8 @@
     if (VTTVT.getBase() == RD) {
       // Just get the address point for the regular vtable.
       AddressPoint =
-          ItaniumVTContext.getVTableLayout(RD).getAddressPoint(i->VTableBase);
+          getItaniumVTableContext().getVTableLayout(RD).getAddressPoint(
+              i->VTableBase);
       assert(AddressPoint != 0 && "Did not find vtable address point!");
     } else {
       AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase);
@@ -94,7 +95,7 @@
   VTT->setLinkage(Linkage);
 
   // Set the right visibility.
-  CGM.setTypeVisibility(VTT, RD, CodeGenModule::TVK_ForVTT);
+  CGM.setGlobalVisibility(VTT, RD);
 }
 
 llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index f28d9b6..0ad765c 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -30,14 +30,7 @@
 using namespace CodeGen;
 
 CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
-  : CGM(CGM), ItaniumVTContext(CGM.getContext()) {
-  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
-    // FIXME: Eventually, we should only have one of V*TContexts available.
-    // Today we use both in the Microsoft ABI as MicrosoftVFTableContext
-    // is not completely supported in CodeGen yet.
-    MicrosoftVTContext.reset(new MicrosoftVTableContext(CGM.getContext()));
-  }
-}
+    : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {}
 
 llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD, 
                                               const ThunkInfo &Thunk) {
@@ -54,54 +47,13 @@
   Out.flush();
 
   llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
-  return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
+  return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true,
+                                 /*DontDefer*/ true);
 }
 
 static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
                                const ThunkInfo &Thunk, llvm::Function *Fn) {
   CGM.setGlobalVisibility(Fn, MD);
-
-  if (!CGM.getCodeGenOpts().HiddenWeakVTables)
-    return;
-
-  // If the thunk has weak/linkonce linkage, but the function must be
-  // emitted in every translation unit that references it, then we can
-  // emit its thunks with hidden visibility, since its thunks must be
-  // emitted when the function is.
-
-  // This follows CodeGenModule::setTypeVisibility; see the comments
-  // there for explanation.
-
-  if ((Fn->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage &&
-       Fn->getLinkage() != llvm::GlobalVariable::WeakODRLinkage) ||
-      Fn->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
-    return;
-
-  if (MD->getExplicitVisibility(ValueDecl::VisibilityForValue))
-    return;
-
-  switch (MD->getTemplateSpecializationKind()) {
-  case TSK_ExplicitInstantiationDefinition:
-  case TSK_ExplicitInstantiationDeclaration:
-    return;
-
-  case TSK_Undeclared:
-    break;
-
-  case TSK_ExplicitSpecialization:
-  case TSK_ImplicitInstantiation:
-    return;
-    break;
-  }
-
-  // If there's an explicit definition, and that definition is
-  // out-of-line, then we can't assume that all users will have a
-  // definition to emit.
-  const FunctionDecl *Def = 0;
-  if (MD->hasBody(Def) && Def->isOutOfLine())
-    return;
-
-  Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
 }
 
 #ifndef NDEBUG
@@ -177,7 +129,7 @@
                                       GlobalDecl GD, const ThunkInfo &Thunk) {
   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
-  QualType ResultType = FPT->getResultType();
+  QualType ResultType = FPT->getReturnType();
 
   // Get the original function
   assert(FnInfo.isVariadic());
@@ -248,11 +200,11 @@
   QualType ThisType = MD->getThisType(getContext());
   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
   QualType ResultType =
-    CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getResultType();
+      CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType();
   FunctionArgList FunctionArgs;
 
   // Create the implicit 'this' parameter declaration.
-  CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs);
+  CGM.getCXXABI().buildThisParam(*this, FunctionArgs);
 
   // Add the rest of the parameters.
   for (FunctionDecl::param_const_iterator I = MD->param_begin(),
@@ -260,6 +212,9 @@
        I != E; ++I)
     FunctionArgs.push_back(*I);
 
+  if (isa<CXXDestructorDecl>(MD))
+    CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs);
+
   // Start defining the function.
   StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
                 SourceLocation());
@@ -316,7 +271,7 @@
 
   // Determine whether we have a return value slot to use.
   QualType ResultType =
-    CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getResultType();
+      CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType();
   ReturnValueSlot Slot;
   if (!ResultType->isVoidType() &&
       CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
@@ -461,12 +416,8 @@
   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
     return;
 
-  const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector;
-  if (MicrosoftVTContext.isValid()) {
-    ThunkInfoVector = MicrosoftVTContext->getThunkInfo(GD);
-  } else {
-    ThunkInfoVector = ItaniumVTContext.getThunkInfo(GD);
-  }
+  const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector =
+      VTContext->getThunkInfo(GD);
 
   if (!ThunkInfoVector)
     return;
@@ -603,8 +554,8 @@
   if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
     DI->completeClassData(Base.getBase());
 
-  OwningPtr<VTableLayout> VTLayout(
-      ItaniumVTContext.createConstructionVTableLayout(
+  std::unique_ptr<VTableLayout> VTLayout(
+      getItaniumVTableContext().createConstructionVTableLayout(
           Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD));
 
   // Add the address points.
@@ -633,7 +584,7 @@
   // Create the variable that will hold the construction vtable.
   llvm::GlobalVariable *VTable = 
     CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
-  CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForConstructionVTable);
+  CGM.setGlobalVisibility(VTable, RD);
 
   // V-tables are always unnamed_addr.
   VTable->setUnnamedAddr(true);
@@ -752,7 +703,7 @@
 /// strongly elsewhere.  Otherwise, we'd just like to avoid emitting
 /// v-tables when unnecessary.
 bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
-  assert(RD->isDynamicClass() && "Non dynamic classes have no VTable.");
+  assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable.");
 
   // If we have an explicit instantiation declaration (and not a
   // definition), the v-table is defined elsewhere.
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index e8cd55e..e1554be 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -31,11 +31,8 @@
 class CodeGenVTables {
   CodeGenModule &CGM;
 
-  // FIXME: Consider moving ItaniumVTContext and MicrosoftVTContext into
-  // respective CXXABI classes?
-  ItaniumVTableContext ItaniumVTContext;
-  OwningPtr<MicrosoftVTableContext> MicrosoftVTContext;
-  
+  VTableContextBase *VTContext;
+
   /// VTableAddressPointsMapTy - Address points for a single vtable.
   typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
 
@@ -72,10 +69,12 @@
 
   CodeGenVTables(CodeGenModule &CGM);
 
-  ItaniumVTableContext &getItaniumVTableContext() { return ItaniumVTContext; }
+  ItaniumVTableContext &getItaniumVTableContext() {
+    return *cast<ItaniumVTableContext>(VTContext);
+  }
 
   MicrosoftVTableContext &getMicrosoftVTableContext() {
-    return *MicrosoftVTContext.get();
+    return *cast<MicrosoftVTableContext>(VTContext);
   }
 
   /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 83dbbf0..12cb1ab 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -1,12 +1,18 @@
 set(LLVM_LINK_COMPONENTS
-  asmparser
-  bitreader
-  bitwriter
-  irreader
-  instrumentation
-  ipo
-  linker
-  vectorize
+  BitReader
+  BitWriter
+  Core
+  IPO
+  IRReader
+  InstCombine
+  Instrumentation
+  Linker
+  MC
+  ObjCARCOpts
+  ScalarOpts
+  Support
+  Target
+  TransformUtils
   )
 
 add_clang_library(clangCodeGen
@@ -14,12 +20,12 @@
   CGAtomic.cpp
   CGBlocks.cpp
   CGBuiltin.cpp
-  CGCall.cpp
-  CGClass.cpp
   CGCUDANV.cpp
   CGCUDARuntime.cpp
   CGCXX.cpp
   CGCXXABI.cpp
+  CGCall.cpp
+  CGClass.cpp
   CGCleanup.cpp
   CGDebugInfo.cpp
   CGDecl.cpp
@@ -27,45 +33,36 @@
   CGException.cpp
   CGExpr.cpp
   CGExprAgg.cpp
+  CGExprCXX.cpp
   CGExprComplex.cpp
   CGExprConstant.cpp
-  CGExprCXX.cpp
   CGExprScalar.cpp
   CGObjC.cpp
   CGObjCGNU.cpp
   CGObjCMac.cpp
   CGObjCRuntime.cpp
   CGOpenCLRuntime.cpp
-  CGRecordLayoutBuilder.cpp
   CGRTTI.cpp
+  CGRecordLayoutBuilder.cpp
   CGStmt.cpp
-  CGVTables.cpp
   CGVTT.cpp
+  CGVTables.cpp
   CodeGenABITypes.cpp
   CodeGenAction.cpp
   CodeGenFunction.cpp
   CodeGenModule.cpp
+  CodeGenPGO.cpp
   CodeGenTBAA.cpp
   CodeGenTypes.cpp
   ItaniumCXXABI.cpp
   MicrosoftCXXABI.cpp
-  MicrosoftVBTables.cpp
   ModuleBuilder.cpp
   TargetInfo.cpp
-  )
 
-add_dependencies(clangCodeGen
-  ClangARMNeon
-  ClangAttrClasses
-  ClangAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticFrontend
-  ClangStmtNodes
-  )
+  DEPENDS
+  intrinsics_gen
 
-target_link_libraries(clangCodeGen
+  LINK_LIBS
   clangBasic
   clangAST
   clangFrontend
diff --git a/lib/CodeGen/CodeGenABITypes.cpp b/lib/CodeGen/CodeGenABITypes.cpp
index 18c836c..fba7184 100644
--- a/lib/CodeGen/CodeGenABITypes.cpp
+++ b/lib/CodeGen/CodeGenABITypes.cpp
@@ -17,23 +17,23 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/CodeGen/CodeGenABITypes.h"
-
-#include "clang/CodeGen/CGFunctionInfo.h"
 #include "CodeGenModule.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
 
 using namespace clang;
 using namespace CodeGen;
 
 CodeGenABITypes::CodeGenABITypes(ASTContext &C,
-                                 const CodeGenOptions &CodeGenOpts,
                                  llvm::Module &M,
-                                 const llvm::DataLayout &TD,
-                                 DiagnosticsEngine &Diags)
-  : CGM(new CodeGen::CodeGenModule(C, CodeGenOpts, M, TD, Diags)) {
+                                 const llvm::DataLayout &TD)
+  : CGO(new CodeGenOptions),
+    CGM(new CodeGen::CodeGenModule(C, *CGO, M, TD, C.getDiagnostics())) {
 }
 
 CodeGenABITypes::~CodeGenABITypes()
 {
+  delete CGO;
   delete CGM;
 }
 
@@ -60,10 +60,10 @@
 }
 
 const CGFunctionInfo &
-CodeGenABITypes::arrangeLLVMFunctionInfo(CanQualType returnType,
+CodeGenABITypes::arrangeFreeFunctionCall(CanQualType returnType,
                                          llvm::ArrayRef<CanQualType> argTypes,
                                          FunctionType::ExtInfo info,
                                          RequiredArgs args) {
-  return CGM->getTypes().arrangeLLVMFunctionInfo(returnType, argTypes,
-                                                info, args);
+  return CGM->getTypes().arrangeLLVMFunctionInfo(
+      returnType, /*IsInstanceMethod=*/false, argTypes, info, args);
 }
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 3072204..573f8e9 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -18,17 +18,19 @@
 #include "clang/CodeGen/ModuleBuilder.h"
 #include "clang/Frontend/CompilerInstance.h"
 #include "clang/Frontend/FrontendDiagnostic.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IRReader/IRReader.h"
-#include "llvm/Linker.h"
+#include "llvm/Linker/Linker.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/SourceMgr.h"
 #include "llvm/Support/Timer.h"
+#include <memory>
 using namespace clang;
 using namespace llvm;
 
@@ -45,42 +47,33 @@
 
     Timer LLVMIRGeneration;
 
-    OwningPtr<CodeGenerator> Gen;
+    std::unique_ptr<CodeGenerator> Gen;
 
-    OwningPtr<llvm::Module> TheModule, LinkModule;
+    std::unique_ptr<llvm::Module> TheModule, LinkModule;
 
   public:
     BackendConsumer(BackendAction action, DiagnosticsEngine &_Diags,
                     const CodeGenOptions &compopts,
                     const TargetOptions &targetopts,
-                    const LangOptions &langopts,
-                    bool TimePasses,
-                    const std::string &infile,
-                    llvm::Module *LinkModule,
-                    raw_ostream *OS,
-                    LLVMContext &C) :
-      Diags(_Diags),
-      Action(action),
-      CodeGenOpts(compopts),
-      TargetOpts(targetopts),
-      LangOpts(langopts),
-      AsmOutStream(OS),
-      Context(), 
-      LLVMIRGeneration("LLVM IR Generation Time"),
-      Gen(CreateLLVMCodeGen(Diags, infile, compopts, targetopts, C)),
-      LinkModule(LinkModule)
-    {
+                    const LangOptions &langopts, bool TimePasses,
+                    const std::string &infile, llvm::Module *LinkModule,
+                    raw_ostream *OS, LLVMContext &C)
+        : Diags(_Diags), Action(action), CodeGenOpts(compopts),
+          TargetOpts(targetopts), LangOpts(langopts), AsmOutStream(OS),
+          Context(), LLVMIRGeneration("LLVM IR Generation Time"),
+          Gen(CreateLLVMCodeGen(Diags, infile, compopts, targetopts, C)),
+          LinkModule(LinkModule) {
       llvm::TimePassesIsEnabled = TimePasses;
     }
 
-    llvm::Module *takeModule() { return TheModule.take(); }
-    llvm::Module *takeLinkModule() { return LinkModule.take(); }
+    llvm::Module *takeModule() { return TheModule.release(); }
+    llvm::Module *takeLinkModule() { return LinkModule.release(); }
 
-    virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+    void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override {
       Gen->HandleCXXStaticMemberVarInstantiation(VD);
     }
 
-    virtual void Initialize(ASTContext &Ctx) {
+    void Initialize(ASTContext &Ctx) override {
       Context = &Ctx;
 
       if (llvm::TimePassesIsEnabled)
@@ -94,7 +87,7 @@
         LLVMIRGeneration.stopTimer();
     }
 
-    virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+    bool HandleTopLevelDecl(DeclGroupRef D) override {
       PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
                                      Context->getSourceManager(),
                                      "LLVM IR generation of declaration");
@@ -110,7 +103,7 @@
       return true;
     }
 
-    virtual void HandleTranslationUnit(ASTContext &C) {
+    void HandleTranslationUnit(ASTContext &C) override {
       {
         PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
         if (llvm::TimePassesIsEnabled)
@@ -132,7 +125,7 @@
       if (!M) {
         // The module has been released by IR gen on failures, do not double
         // free.
-        TheModule.take();
+        TheModule.release();
         return;
       }
 
@@ -158,41 +151,49 @@
       void *OldContext = Ctx.getInlineAsmDiagnosticContext();
       Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
 
+      LLVMContext::DiagnosticHandlerTy OldDiagnosticHandler =
+          Ctx.getDiagnosticHandler();
+      void *OldDiagnosticContext = Ctx.getDiagnosticContext();
+      Ctx.setDiagnosticHandler(DiagnosticHandler, this);
+
       EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
+                        C.getTargetInfo().getTargetDescription(),
                         TheModule.get(), Action, AsmOutStream);
-      
+
       Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
+
+      Ctx.setDiagnosticHandler(OldDiagnosticHandler, OldDiagnosticContext);
     }
 
-    virtual void HandleTagDeclDefinition(TagDecl *D) {
+    void HandleTagDeclDefinition(TagDecl *D) override {
       PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
                                      Context->getSourceManager(),
                                      "LLVM IR generation of declaration");
       Gen->HandleTagDeclDefinition(D);
     }
 
-    virtual void HandleTagDeclRequiredDefinition(const TagDecl *D) {
+    void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
       Gen->HandleTagDeclRequiredDefinition(D);
     }
 
-    virtual void CompleteTentativeDefinition(VarDecl *D) {
+    void CompleteTentativeDefinition(VarDecl *D) override {
       Gen->CompleteTentativeDefinition(D);
     }
 
-    virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+    void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) override {
       Gen->HandleVTable(RD, DefinitionRequired);
     }
 
-    virtual void HandleLinkerOptionPragma(llvm::StringRef Opts) {
+    void HandleLinkerOptionPragma(llvm::StringRef Opts) override {
       Gen->HandleLinkerOptionPragma(Opts);
     }
 
-    virtual void HandleDetectMismatch(llvm::StringRef Name,
-                                      llvm::StringRef Value) {
+    void HandleDetectMismatch(llvm::StringRef Name,
+                                      llvm::StringRef Value) override {
       Gen->HandleDetectMismatch(Name, Value);
     }
 
-    virtual void HandleDependentLibrary(llvm::StringRef Opts) {
+    void HandleDependentLibrary(llvm::StringRef Opts) override {
       Gen->HandleDependentLibrary(Opts);
     }
 
@@ -202,8 +203,23 @@
       ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
     }
 
+    static void DiagnosticHandler(const llvm::DiagnosticInfo &DI,
+                                  void *Context) {
+      ((BackendConsumer *)Context)->DiagnosticHandlerImpl(DI);
+    }
+
     void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
                                SourceLocation LocCookie);
+
+    void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI);
+    /// \brief Specialized handler for InlineAsm diagnostic.
+    /// \return True if the diagnostic has been successfully reported, false
+    /// otherwise.
+    bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D);
+    /// \brief Specialized handler for StackSize diagnostic.
+    /// \return True if the diagnostic has been successfully reported, false
+    /// otherwise.
+    bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D);
   };
   
   void BackendConsumer::anchor() {}
@@ -282,7 +298,116 @@
   Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message);
 }
 
-//
+#define ComputeDiagID(Severity, GroupName, DiagID)                             \
+  do {                                                                         \
+    switch (Severity) {                                                        \
+    case llvm::DS_Error:                                                       \
+      DiagID = diag::err_fe_##GroupName;                                       \
+      break;                                                                   \
+    case llvm::DS_Warning:                                                     \
+      DiagID = diag::warn_fe_##GroupName;                                      \
+      break;                                                                   \
+    case llvm::DS_Remark:                                                      \
+      llvm_unreachable("'remark' severity not expected");                      \
+      break;                                                                   \
+    case llvm::DS_Note:                                                        \
+      DiagID = diag::note_fe_##GroupName;                                      \
+      break;                                                                   \
+    }                                                                          \
+  } while (false)
+
+#define ComputeDiagRemarkID(Severity, GroupName, DiagID)                       \
+  do {                                                                         \
+    switch (Severity) {                                                        \
+    case llvm::DS_Error:                                                       \
+      DiagID = diag::err_fe_##GroupName;                                       \
+      break;                                                                   \
+    case llvm::DS_Warning:                                                     \
+      DiagID = diag::warn_fe_##GroupName;                                      \
+      break;                                                                   \
+    case llvm::DS_Remark:                                                      \
+      DiagID = diag::remark_fe_##GroupName;                                    \
+      break;                                                                   \
+    case llvm::DS_Note:                                                        \
+      DiagID = diag::note_fe_##GroupName;                                      \
+      break;                                                                   \
+    }                                                                          \
+  } while (false)
+
+bool
+BackendConsumer::InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D) {
+  unsigned DiagID;
+  ComputeDiagID(D.getSeverity(), inline_asm, DiagID);
+  std::string Message = D.getMsgStr().str();
+
+  // If this problem has clang-level source location information, report the
+  // issue as being a problem in the source with a note showing the instantiated
+  // code.
+  SourceLocation LocCookie =
+      SourceLocation::getFromRawEncoding(D.getLocCookie());
+  if (LocCookie.isValid())
+    Diags.Report(LocCookie, DiagID).AddString(Message);
+  else {
+    // Otherwise, report the backend diagnostic as occurring in the generated
+    // .s file.
+    // If Loc is invalid, we still need to report the diagnostic, it just gets
+    // no location info.
+    FullSourceLoc Loc;
+    Diags.Report(Loc, DiagID).AddString(Message);
+  }
+  // We handled all the possible severities.
+  return true;
+}
+
+bool
+BackendConsumer::StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D) {
+  if (D.getSeverity() != llvm::DS_Warning)
+    // For now, the only support we have for StackSize diagnostic is warning.
+    // We do not know how to format other severities.
+    return false;
+
+  // FIXME: We should demangle the function name.
+  // FIXME: Is there a way to get a location for that function?
+  FullSourceLoc Loc;
+  Diags.Report(Loc, diag::warn_fe_backend_frame_larger_than)
+      << D.getStackSize() << D.getFunction().getName();
+  return true;
+}
+
+/// \brief This function is invoked when the backend needs
+/// to report something to the user.
+void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
+  unsigned DiagID = diag::err_fe_inline_asm;
+  llvm::DiagnosticSeverity Severity = DI.getSeverity();
+  // Get the diagnostic ID based.
+  switch (DI.getKind()) {
+  case llvm::DK_InlineAsm:
+    if (InlineAsmDiagHandler(cast<DiagnosticInfoInlineAsm>(DI)))
+      return;
+    ComputeDiagID(Severity, inline_asm, DiagID);
+    break;
+  case llvm::DK_StackSize:
+    if (StackSizeDiagHandler(cast<DiagnosticInfoStackSize>(DI)))
+      return;
+    ComputeDiagID(Severity, backend_frame_larger_than, DiagID);
+    break;
+  default:
+    // Plugin IDs are not bound to any value as they are set dynamically.
+    ComputeDiagRemarkID(Severity, backend_plugin, DiagID);
+    break;
+  }
+  std::string MsgStorage;
+  {
+    raw_string_ostream Stream(MsgStorage);
+    DiagnosticPrinterRawOStream DP(Stream);
+    DI.print(DP);
+  }
+
+  // Report the backend message using the usual diagnostic mechanism.
+  FullSourceLoc Loc;
+  Diags.Report(Loc, DiagID).AddString(MsgStorage);
+}
+#undef ComputeDiagID
 
 CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
   : Act(_Act), LinkModule(0),
@@ -310,9 +435,7 @@
   TheModule.reset(BEConsumer->takeModule());
 }
 
-llvm::Module *CodeGenAction::takeModule() {
-  return TheModule.take();
-}
+llvm::Module *CodeGenAction::takeModule() { return TheModule.release(); }
 
 llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
   OwnsVMContext = false;
@@ -342,7 +465,7 @@
 ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
                                               StringRef InFile) {
   BackendAction BA = static_cast<BackendAction>(Act);
-  OwningPtr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+  std::unique_ptr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
   if (BA != Backend_EmitNothing && !OS)
     return 0;
 
@@ -362,20 +485,20 @@
       return 0;
     }
 
-    LinkModuleToUse = getLazyBitcodeModule(BCBuf, *VMContext, &ErrorStr);
-    if (!LinkModuleToUse) {
+    ErrorOr<llvm::Module *> ModuleOrErr =
+        getLazyBitcodeModule(BCBuf, *VMContext);
+    if (error_code EC = ModuleOrErr.getError()) {
       CI.getDiagnostics().Report(diag::err_cannot_open_file)
-        << LinkBCFile << ErrorStr;
+        << LinkBCFile << EC.message();
       return 0;
     }
+    LinkModuleToUse = ModuleOrErr.get();
   }
 
-  BEConsumer = 
-      new BackendConsumer(BA, CI.getDiagnostics(),
-                          CI.getCodeGenOpts(), CI.getTargetOpts(),
-                          CI.getLangOpts(),
-                          CI.getFrontendOpts().ShowTimers, InFile,
-                          LinkModuleToUse, OS.take(), *VMContext);
+  BEConsumer = new BackendConsumer(BA, CI.getDiagnostics(), CI.getCodeGenOpts(),
+                                   CI.getTargetOpts(), CI.getLangOpts(),
+                                   CI.getFrontendOpts().ShowTimers, InFile,
+                                   LinkModuleToUse, OS.release(), *VMContext);
   return BEConsumer;
 }
 
@@ -408,31 +531,30 @@
         SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(),
         Err.getColumnNo() + 1);
 
-      // Get a custom diagnostic for the error. We strip off a leading
-      // diagnostic code if there is one.
+      // Strip off a leading diagnostic code if there is one.
       StringRef Msg = Err.getMessage();
       if (Msg.startswith("error: "))
         Msg = Msg.substr(7);
 
-      // Escape '%', which is interpreted as a format character.
-      SmallString<128> EscapedMessage;
-      for (unsigned i = 0, e = Msg.size(); i != e; ++i) {
-        if (Msg[i] == '%')
-          EscapedMessage += '%';
-        EscapedMessage += Msg[i];
-      }
+      unsigned DiagID =
+          CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
 
-      unsigned DiagID = CI.getDiagnostics().getCustomDiagID(
-          DiagnosticsEngine::Error, EscapedMessage);
-
-      CI.getDiagnostics().Report(Loc, DiagID);
+      CI.getDiagnostics().Report(Loc, DiagID) << Msg;
       return;
     }
+    const TargetOptions &TargetOpts = CI.getTargetOpts();
+    if (TheModule->getTargetTriple() != TargetOpts.Triple) {
+      unsigned DiagID = CI.getDiagnostics().getCustomDiagID(
+          DiagnosticsEngine::Warning,
+          "overriding the module target triple with %0");
 
-    EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(),
-                      CI.getTargetOpts(), CI.getLangOpts(),
-                      TheModule.get(),
-                      BA, OS);
+      CI.getDiagnostics().Report(SourceLocation(), DiagID) << TargetOpts.Triple;
+      TheModule->setTargetTriple(TargetOpts.Triple);
+    }
+
+    EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(), TargetOpts,
+                      CI.getLangOpts(), CI.getTarget().getTargetDescription(),
+                      TheModule.get(), BA, OS);
     return;
   }
 
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index ce1b445..806540a 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -16,12 +16,12 @@
 #include "CGCXXABI.h"
 #include "CGDebugInfo.h"
 #include "CodeGenModule.h"
+#include "CodeGenPGO.h"
 #include "TargetInfo.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/Decl.h"
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/StmtCXX.h"
-#include "clang/Basic/OpenCL.h"
 #include "clang/Basic/TargetInfo.h"
 #include "clang/CodeGen/CGFunctionInfo.h"
 #include "clang/Frontend/CodeGenOptions.h"
@@ -44,7 +44,8 @@
       NextCleanupDestIndex(1), FirstBlockInfo(0), EHResumeBlock(0),
       ExceptionSlot(0), EHSelectorSlot(0), DebugInfo(CGM.getModuleDebugInfo()),
       DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(0),
-      SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0), NumReturnExprs(0),
+      PGO(cgm), SwitchInsn(0), SwitchWeights(0),
+      CaseRangeBlock(0), UnreachableBlock(0), NumReturnExprs(0),
       NumSimpleReturnExprs(0), CXXABIThisDecl(0), CXXABIThisValue(0),
       CXXThisValue(0), CXXDefaultInitExprThis(0),
       CXXStructorImplicitParamDecl(0), CXXStructorImplicitParamValue(0),
@@ -157,7 +158,7 @@
   // cleans up functions which started with a unified return block.
   if (ReturnBlock.getBlock()->hasOneUse()) {
     llvm::BranchInst *BI =
-      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
+      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
     if (BI && BI->isUnconditional() &&
         BI->getSuccessor(0) == ReturnBlock.getBlock()) {
       // Reset insertion point, including debug location, and delete the
@@ -275,6 +276,14 @@
 
   if (CGM.getCodeGenOpts().EmitDeclMetadata)
     EmitDeclMetadata();
+
+  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
+           I = DeferredReplacements.begin(),
+           E = DeferredReplacements.end();
+       I != E; ++I) {
+    I->first->replaceAllUsesWith(I->second);
+    I->first->eraseFromParent();
+  }
 }
 
 /// ShouldInstrumentFunction - Return true if the current function should be
@@ -381,7 +390,12 @@
       if (pointeeTy.isVolatileQualified())
         typeQuals += typeQuals.empty() ? "volatile" : " volatile";
     } else {
-      addressQuals.push_back(Builder.getInt32(0));
+      uint32_t AddrSpc = 0;
+      if (ty->isImageType())
+        AddrSpc =
+          CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
+
+      addressQuals.push_back(Builder.getInt32(AddrSpc));
 
       // Get argument type name.
       std::string typeName = ty.getUnqualifiedType().getAsString();
@@ -399,16 +413,17 @@
       if (ty.isVolatileQualified())
         typeQuals += typeQuals.empty() ? "volatile" : " volatile";
     }
-    
+
     argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
 
     // Get image access qualifier:
     if (ty->isImageType()) {
-      if (parm->hasAttr<OpenCLImageAccessAttr>() &&
-          parm->getAttr<OpenCLImageAccessAttr>()->getAccess() == CLIA_write_only)
+      const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>();
+      if (A && A->isWriteOnly())
         accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
       else
         accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
+      // FIXME: what about read_write?
     } else
       accessQuals.push_back(llvm::MDString::get(Context, "none"));
 
@@ -438,16 +453,15 @@
     GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs,
                          Builder, getContext());
 
-  if (FD->hasAttr<VecTypeHintAttr>()) {
-    VecTypeHintAttr *attr = FD->getAttr<VecTypeHintAttr>();
-    QualType hintQTy = attr->getTypeHint();
+  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
+    QualType hintQTy = A->getTypeHint();
     const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
     bool isSignedInteger =
         hintQTy->isSignedIntegerType() ||
         (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
     llvm::Value *attrMDArgs[] = {
       llvm::MDString::get(Context, "vec_type_hint"),
-      llvm::UndefValue::get(CGM.getTypes().ConvertType(attr->getTypeHint())),
+      llvm::UndefValue::get(CGM.getTypes().ConvertType(A->getTypeHint())),
       llvm::ConstantInt::get(
           llvm::IntegerType::get(Context, 32),
           llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0)))
@@ -455,24 +469,22 @@
     kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
   }
 
-  if (FD->hasAttr<WorkGroupSizeHintAttr>()) {
-    WorkGroupSizeHintAttr *attr = FD->getAttr<WorkGroupSizeHintAttr>();
+  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
     llvm::Value *attrMDArgs[] = {
       llvm::MDString::get(Context, "work_group_size_hint"),
-      Builder.getInt32(attr->getXDim()),
-      Builder.getInt32(attr->getYDim()),
-      Builder.getInt32(attr->getZDim())
+      Builder.getInt32(A->getXDim()),
+      Builder.getInt32(A->getYDim()),
+      Builder.getInt32(A->getZDim())
     };
     kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
   }
 
-  if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
-    ReqdWorkGroupSizeAttr *attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
+  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
     llvm::Value *attrMDArgs[] = {
       llvm::MDString::get(Context, "reqd_work_group_size"),
-      Builder.getInt32(attr->getXDim()),
-      Builder.getInt32(attr->getYDim()),
-      Builder.getInt32(attr->getZDim())
+      Builder.getInt32(A->getXDim()),
+      Builder.getInt32(A->getYDim()),
+      Builder.getInt32(A->getZDim())
     };
     kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
   }
@@ -505,15 +517,18 @@
   }
 
   // Pass inline keyword to optimizer if it appears explicitly on any
-  // declaration.
-  if (!CGM.getCodeGenOpts().NoInline)
-    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
-      for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
-             RE = FD->redecls_end(); RI != RE; ++RI)
+  // declaration. Also, in the case of -fno-inline attach NoInline
+  // attribute to all function that are not marked AlwaysInline.
+  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+    if (!CGM.getCodeGenOpts().NoInline) {
+      for (auto RI : FD->redecls())
         if (RI->isInlineSpecified()) {
           Fn->addFnAttr(llvm::Attribute::InlineHint);
           break;
         }
+    } else if (!FD->hasAttr<AlwaysInlineAttr>())
+      Fn->addFnAttr(llvm::Attribute::NoInline);
+  }
 
   if (getLangOpts().OpenCL) {
     // Add metadata for a kernel function.
@@ -581,6 +596,14 @@
     // Indirect aggregate return; emit returned value directly into sret slot.
     // This reduces code size, and affects correctness in C++.
     ReturnValue = CurFn->arg_begin();
+  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
+             !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
+    // Load the sret pointer from the argument struct and return into that.
+    unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
+    llvm::Function::arg_iterator EI = CurFn->arg_end();
+    --EI;
+    llvm::Value *Addr = Builder.CreateStructGEP(EI, Idx);
+    ReturnValue = Builder.CreateLoad(Addr, "agg.result");
   } else {
     ReturnValue = CreateIRTemp(RetTy, "retval");
 
@@ -645,12 +668,34 @@
 
 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
                                        const Stmt *Body) {
+  RegionCounter Cnt = getPGORegionCounter(Body);
+  Cnt.beginRegion(Builder);
   if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
     EmitCompoundStmtWithoutScope(*S);
   else
     EmitStmt(Body);
 }
 
+/// When instrumenting to collect profile data, the counts for some blocks
+/// such as switch cases need to not include the fall-through counts, so
+/// emit a branch around the instrumentation code. When not instrumenting,
+/// this just calls EmitBlock().
+void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
+                                               RegionCounter &Cnt) {
+  llvm::BasicBlock *SkipCountBB = 0;
+  if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) {
+    // When instrumenting for profiling, the fallthrough to certain
+    // statements needs to skip over the instrumentation code so that we
+    // get an accurate count.
+    SkipCountBB = createBasicBlock("skipcount");
+    EmitBranch(SkipCountBB);
+  }
+  EmitBlock(BB);
+  Cnt.beginRegion(Builder, /*AddIncomingFallThrough=*/true);
+  if (SkipCountBB)
+    EmitBlock(SkipCountBB);
+}
+
 /// Tries to mark the given function nounwind based on the
 /// non-existence of any throwing calls within it.  We believe this is
 /// lightweight enough to do at -O0.
@@ -691,19 +736,22 @@
     DebugInfo = NULL; // disable debug info indefinitely for this function
 
   FunctionArgList Args;
-  QualType ResTy = FD->getResultType();
+  QualType ResTy = FD->getReturnType();
 
   CurGD = GD;
-  const CXXMethodDecl *MD;
-  if ((MD = dyn_cast<CXXMethodDecl>(FD)) && MD->isInstance()) {
+  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+  if (MD && MD->isInstance()) {
     if (CGM.getCXXABI().HasThisReturn(GD))
       ResTy = MD->getThisType(getContext());
-    CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
+    CGM.getCXXABI().buildThisParam(*this, Args);
   }
 
   for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
     Args.push_back(FD->getParamDecl(i));
 
+  if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
+    CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
+
   SourceRange BodyRange;
   if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
   CurEHLocation = BodyRange.getEnd();
@@ -712,6 +760,7 @@
   StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin());
 
   // Generate the body of the function.
+  PGO.assignRegionCounters(GD.getDecl(), CurFn);
   if (isa<CXXDestructorDecl>(FD))
     EmitDestructorBody(Args);
   else if (isa<CXXConstructorDecl>(FD))
@@ -753,7 +802,7 @@
   //   If the '}' that terminates a function is reached, and the value of the
   //   function call is used by the caller, the behavior is undefined.
   if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() &&
-      !FD->getResultType()->isVoidType() && Builder.GetInsertBlock()) {
+      !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
     if (SanOpts->Return)
       EmitCheck(Builder.getFalse(), "missing_return",
                 EmitCheckSourceLocation(FD->getLocation()),
@@ -771,6 +820,9 @@
   // a quick pass now to see if we can.
   if (!CurFn->doesNotThrow())
     TryMarkNoThrow(CurFn);
+
+  PGO.emitInstrumentationData();
+  PGO.destroyRegionCounters();
 }
 
 /// ContainsLabel - Return true if the statement contains a label in it.  If
@@ -869,19 +921,25 @@
 ///
 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
                                            llvm::BasicBlock *TrueBlock,
-                                           llvm::BasicBlock *FalseBlock) {
+                                           llvm::BasicBlock *FalseBlock,
+                                           uint64_t TrueCount) {
   Cond = Cond->IgnoreParens();
 
   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+
     // Handle X && Y in a condition.
     if (CondBOp->getOpcode() == BO_LAnd) {
+      RegionCounter Cnt = getPGORegionCounter(CondBOp);
+
       // If we have "1 && X", simplify the code.  "0 && X" would have constant
       // folded if the case was simple enough.
       bool ConstantBool = false;
       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
           ConstantBool) {
         // br(1 && X) -> br(X).
-        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+        Cnt.beginRegion(Builder);
+        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
+                                    TrueCount);
       }
 
       // If we have "X && 1", simplify the code to use an uncond branch.
@@ -889,33 +947,42 @@
       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
           ConstantBool) {
         // br(X && 1) -> br(X).
-        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
+                                    TrueCount);
       }
 
       // Emit the LHS as a conditional.  If the LHS conditional is false, we
       // want to jump to the FalseBlock.
       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+      // The counter tells us how often we evaluate RHS, and all of TrueCount
+      // can be propagated to that branch.
+      uint64_t RHSCount = Cnt.getCount();
 
       ConditionalEvaluation eval(*this);
-      EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
+      EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
       EmitBlock(LHSTrue);
 
       // Any temporaries created here are conditional.
+      Cnt.beginRegion(Builder);
       eval.begin(*this);
-      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
       eval.end(*this);
 
       return;
     }
 
     if (CondBOp->getOpcode() == BO_LOr) {
+      RegionCounter Cnt = getPGORegionCounter(CondBOp);
+
       // If we have "0 || X", simplify the code.  "1 || X" would have constant
       // folded if the case was simple enough.
       bool ConstantBool = false;
       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
           !ConstantBool) {
         // br(0 || X) -> br(X).
-        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+        Cnt.beginRegion(Builder);
+        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
+                                    TrueCount);
       }
 
       // If we have "X || 0", simplify the code to use an uncond branch.
@@ -923,20 +990,28 @@
       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
           !ConstantBool) {
         // br(X || 0) -> br(X).
-        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
+                                    TrueCount);
       }
 
       // Emit the LHS as a conditional.  If the LHS conditional is true, we
       // want to jump to the TrueBlock.
       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+      // We have the count for entry to the RHS and for the whole expression
+      // being true, so we can divy up True count between the short circuit and
+      // the RHS.
+      uint64_t LHSCount = Cnt.getParentCount() - Cnt.getCount();
+      uint64_t RHSCount = TrueCount - LHSCount;
 
       ConditionalEvaluation eval(*this);
-      EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
+      EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
       EmitBlock(LHSFalse);
 
       // Any temporaries created here are conditional.
+      Cnt.beginRegion(Builder);
       eval.begin(*this);
-      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
+
       eval.end(*this);
 
       return;
@@ -945,8 +1020,13 @@
 
   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
     // br(!x, t, f) -> br(x, f, t)
-    if (CondUOp->getOpcode() == UO_LNot)
-      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
+    if (CondUOp->getOpcode() == UO_LNot) {
+      // Negate the count.
+      uint64_t FalseCount = PGO.getCurrentRegionCount() - TrueCount;
+      // Negate the condition and swap the destination blocks.
+      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
+                                  FalseCount);
+    }
   }
 
   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
@@ -954,17 +1034,32 @@
     llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
     llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
 
+    RegionCounter Cnt = getPGORegionCounter(CondOp);
     ConditionalEvaluation cond(*this);
-    EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+    EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, Cnt.getCount());
+
+    // When computing PGO branch weights, we only know the overall count for
+    // the true block. This code is essentially doing tail duplication of the
+    // naive code-gen, introducing new edges for which counts are not
+    // available. Divide the counts proportionally between the LHS and RHS of
+    // the conditional operator.
+    uint64_t LHSScaledTrueCount = 0;
+    if (TrueCount) {
+      double LHSRatio = Cnt.getCount() / (double) Cnt.getParentCount();
+      LHSScaledTrueCount = TrueCount * LHSRatio;
+    }
 
     cond.begin(*this);
     EmitBlock(LHSBlock);
-    EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+    Cnt.beginRegion(Builder);
+    EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
+                         LHSScaledTrueCount);
     cond.end(*this);
 
     cond.begin(*this);
     EmitBlock(RHSBlock);
-    EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+    EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
+                         TrueCount - LHSScaledTrueCount);
     cond.end(*this);
 
     return;
@@ -980,9 +1075,15 @@
     return;
   }
 
+  // Create branch weights based on the number of times we get here and the
+  // number of times the condition should be true.
+  uint64_t CurrentCount = std::max(PGO.getCurrentRegionCount(), TrueCount);
+  llvm::MDNode *Weights = PGO.createBranchWeights(TrueCount,
+                                                  CurrentCount - TrueCount);
+
   // Emit the code with the fully general case.
   llvm::Value *CondV = EvaluateExprAsBool(Cond);
-  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
+  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights);
 }
 
 /// ErrorUnsupported - Print out an error that codegen doesn't support the
@@ -1075,7 +1176,7 @@
                                           getContext().getAsArrayType(Ty))) {
       QualType eltType;
       llvm::Value *numElts;
-      llvm::tie(numElts, eltType) = getVLASize(vlaType);
+      std::tie(numElts, eltType) = getVLASize(vlaType);
 
       SizeVal = numElts;
       CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
@@ -1264,7 +1365,7 @@
       numElements = vlaSize;
     } else {
       // It's undefined behavior if this wraps around, so mark it that way.
-      // FIXME: Teach -fcatch-undefined-behavior to trap this.
+      // FIXME: Teach -fsanitize=undefined to trap this.
       numElements = Builder.CreateNUWMul(numElements, vlaSize);
     }
   } while ((type = getContext().getAsVariableArrayType(elementType)));
@@ -1308,6 +1409,10 @@
     case Type::ObjCObjectPointer:
       llvm_unreachable("type class is never variably-modified!");
 
+    case Type::Adjusted:
+      type = cast<AdjustedType>(ty)->getAdjustedType();
+      break;
+
     case Type::Decayed:
       type = cast<DecayedType>(ty)->getPointeeType();
       break;
@@ -1375,7 +1480,7 @@
 
     case Type::FunctionProto:
     case Type::FunctionNoProto:
-      type = cast<FunctionType>(ty)->getResultType();
+      type = cast<FunctionType>(ty)->getReturnType();
       break;
 
     case Type::Paren:
@@ -1464,12 +1569,10 @@
   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
   // FIXME We create a new bitcast for every annotation because that's what
   // llvm-gcc was doing.
-  for (specific_attr_iterator<AnnotateAttr>
-       ai = D->specific_attr_begin<AnnotateAttr>(),
-       ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
+  for (const auto *I : D->specific_attrs<AnnotateAttr>())
     EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
                        Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
-                       (*ai)->getAnnotation(), D->getLocation());
+                       I->getAnnotation(), D->getLocation());
 }
 
 llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
@@ -1479,15 +1582,13 @@
   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
                                     CGM.Int8PtrTy);
 
-  for (specific_attr_iterator<AnnotateAttr>
-       ai = D->specific_attr_begin<AnnotateAttr>(),
-       ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) {
+  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
     // FIXME Always emit the cast inst so we can differentiate between
     // annotation on the first field of a struct and annotation on the struct
     // itself.
     if (VTy != CGM.Int8PtrTy)
       V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
-    V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation());
+    V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
     V = Builder.CreateBitCast(V, VTy);
   }
 
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index db291e3..5b0653a 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -17,8 +17,9 @@
 #include "CGBuilder.h"
 #include "CGDebugInfo.h"
 #include "CGValue.h"
-#include "EHScopeStack.h"
 #include "CodeGenModule.h"
+#include "CodeGenPGO.h"
+#include "EHScopeStack.h"
 #include "clang/AST/CharUnits.h"
 #include "clang/AST/ExprCXX.h"
 #include "clang/AST/ExprObjC.h"
@@ -30,8 +31,8 @@
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/ValueHandle.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
 
 namespace llvm {
   class BasicBlock;
@@ -687,8 +688,8 @@
       // act exactly like l-values but are formally required to be
       // r-values in C.
       return expr->isGLValue() ||
-             expr->getType()->isRecordType() ||
-             expr->getType()->isFunctionType();
+             expr->getType()->isFunctionType() ||
+             hasAggregateEvaluationKind(expr->getType());
     }
 
     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
@@ -827,9 +828,21 @@
   };
   SmallVector<BreakContinue, 8> BreakContinueStack;
 
+  CodeGenPGO PGO;
+
+public:
+  /// Get a counter for instrumentation of the region associated with the given
+  /// statement.
+  RegionCounter getPGORegionCounter(const Stmt *S) {
+    return RegionCounter(PGO, S);
+  }
+private:
+
   /// SwitchInsn - This is nearest current switch instruction. It is null if
   /// current context is not in a switch.
   llvm::SwitchInst *SwitchInsn;
+  /// The branch weights of SwitchInsn when doing instrumentation based PGO.
+  SmallVector<uint64_t, 16> *SwitchWeights;
 
   /// CaseRangeBlock - This block holds if condition check for last case
   /// statement range in current switch instruction.
@@ -1019,6 +1032,7 @@
   void pushLifetimeExtendedDestroy(CleanupKind kind, llvm::Value *addr,
                                    QualType type, Destroyer *destroyer,
                                    bool useEHCleanupForArray);
+  void pushStackRestore(CleanupKind kind, llvm::Value *SPMem);
   void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
                    bool useEHCleanupForArray);
   llvm::Function *generateDestroyHelper(llvm::Constant *addr, QualType type,
@@ -1137,6 +1151,7 @@
   void EmitDestructorBody(FunctionArgList &Args);
   void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
   void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body);
+  void EmitBlockWithFallThrough(llvm::BasicBlock *BB, RegionCounter &Cnt);
 
   void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
                                   CallArgList &CallArgs);
@@ -1380,6 +1395,10 @@
                                  AggValueSlot::IsNotAliased);
   }
 
+  /// CreateInAllocaTmp - Create a temporary memory object for the given
+  /// aggregate type.
+  AggValueSlot CreateInAllocaTmp(QualType T, const Twine &Name = "inalloca");
+
   /// Emit a cast to void* in the appropriate address space.
   llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
 
@@ -1767,7 +1786,8 @@
                          llvm::GlobalValue::LinkageTypes Linkage);
 
   /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
-  void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
+  void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, bool ArgIsPointer,
+                    unsigned ArgNo);
 
   /// protectFromPeepholes - Protect a value that we're intending to
   /// store to the side, but which will probably be used later, from
@@ -2159,6 +2179,18 @@
   llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty);
   llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
   llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+  llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
+                                         unsigned LLVMIntrinsic,
+                                         unsigned AltLLVMIntrinsic,
+                                         const char *NameHint,
+                                         unsigned Modifier,
+                                         const CallExpr *E,
+                                         SmallVectorImpl<llvm::Value *> &Ops,
+                                         llvm::Value *Align = 0);
+  llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
+                                          unsigned Modifier, llvm::Type *ArgTy,
+                                          const CallExpr *E);
   llvm::Value *EmitNeonCall(llvm::Function *F,
                             SmallVectorImpl<llvm::Value*> &O,
                             const char *name,
@@ -2168,6 +2200,20 @@
                                    bool negateForRightShift);
   llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
                                  llvm::Type *Ty, bool usgn, const char *name);
+  llvm::Value *EmitConcatVectors(llvm::Value *Lo, llvm::Value *Hi,
+                                 llvm::Type *ArgTy);
+  llvm::Value *EmitExtractHigh(llvm::Value *In, llvm::Type *ResTy);
+  // Helper functions for EmitARM64BuiltinExpr.
+  llvm::Value *vectorWrapScalar8(llvm::Value *Op);
+  llvm::Value *vectorWrapScalar16(llvm::Value *Op);
+  llvm::Value *emitVectorWrappedScalar8Intrinsic(
+      unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name);
+  llvm::Value *emitVectorWrappedScalar16Intrinsic(
+      unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name);
+  llvm::Value *EmitARM64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+  llvm::Value *EmitNeon64Call(llvm::Function *F,
+                              llvm::SmallVectorImpl<llvm::Value *> &O,
+                              const char *name);
 
   llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
   llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -2304,9 +2350,9 @@
 
   /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
   /// a static local variable.
-  llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
-                                            const char *Separator,
-                                       llvm::GlobalValue::LinkageTypes Linkage);
+  llvm::Constant *CreateStaticVarDecl(const VarDecl &D,
+                                      const char *Separator,
+                                      llvm::GlobalValue::LinkageTypes Linkage);
 
   /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
   /// global variable that has already been created for it.  If the initializer
@@ -2413,8 +2459,10 @@
   /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
   /// if statement) to the specified blocks.  Based on the condition, this might
   /// try to simplify the codegen of the conditional based on the branch.
+  /// TrueCount should be the number of times we expect the condition to
+  /// evaluate to true based on PGO data.
   void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
-                            llvm::BasicBlock *FalseBlock);
+                            llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
 
   /// \brief Emit a description of a type in a format suitable for passing to
   /// a runtime sanitizer handler.
@@ -2467,6 +2515,11 @@
   llvm::MDNode *getRangeForLoadFromType(QualType Ty);
   void EmitReturnOfRValue(RValue RV, QualType Ty);
 
+  void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
+
+  llvm::SmallVector<std::pair<llvm::Instruction *, llvm::Value *>, 4>
+  DeferredReplacements;
+
   /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
   /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
   ///
@@ -2492,69 +2545,81 @@
                                   std::string &ConstraintStr,
                                   SourceLocation Loc);
 
+public:
   /// EmitCallArgs - Emit call arguments for a function.
-  /// The CallArgTypeInfo parameter is used for iterating over the known
-  /// argument types of the function being called.
-  template<typename T>
-  void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
+  template <typename T>
+  void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
                     CallExpr::const_arg_iterator ArgBeg,
                     CallExpr::const_arg_iterator ArgEnd,
                     bool ForceColumnInfo = false) {
-    CGDebugInfo *DI = getDebugInfo();
-    SourceLocation CallLoc;
-    if (DI) CallLoc = DI->getLocation();
-
-    CallExpr::const_arg_iterator Arg = ArgBeg;
-
-    // First, use the argument types that the type info knows about
     if (CallArgTypeInfo) {
-      for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
-           E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
-        assert(Arg != ArgEnd && "Running over edge of argument list!");
-        QualType ArgType = *I;
-#ifndef NDEBUG
-        QualType ActualArgType = Arg->getType();
-        if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
-          QualType ActualBaseType =
-            ActualArgType->getAs<PointerType>()->getPointeeType();
-          QualType ArgBaseType =
-            ArgType->getAs<PointerType>()->getPointeeType();
-          if (ArgBaseType->isVariableArrayType()) {
-            if (const VariableArrayType *VAT =
-                getContext().getAsVariableArrayType(ActualBaseType)) {
-              if (!VAT->getSizeExpr())
-                ActualArgType = ArgType;
-            }
-          }
-        }
-        assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
-               getTypePtr() ==
-               getContext().getCanonicalType(ActualArgType).getTypePtr() &&
-               "type mismatch in call argument!");
-#endif
-        EmitCallArg(Args, *Arg, ArgType);
-
-        // Each argument expression could modify the debug
-        // location. Restore it.
-        if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
-      }
-
-      // Either we've emitted all the call args, or we have a call to a
-      // variadic function.
-      assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
-             "Extra arguments in non-variadic function!");
-
-    }
-
-    // If we still have any arguments, emit them using the type of the argument.
-    for (; Arg != ArgEnd; ++Arg) {
-      EmitCallArg(Args, *Arg, Arg->getType());
-
-      // Restore the debug location.
-      if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
+      EmitCallArgs(Args, CallArgTypeInfo->isVariadic(),
+                   CallArgTypeInfo->param_type_begin(),
+                   CallArgTypeInfo->param_type_end(), ArgBeg, ArgEnd,
+                   ForceColumnInfo);
+    } else {
+      // T::param_type_iterator might not have a default ctor.
+      const QualType *NoIter = 0;
+      EmitCallArgs(Args, /*AllowExtraArguments=*/true, NoIter, NoIter, ArgBeg,
+                   ArgEnd, ForceColumnInfo);
     }
   }
 
+  template<typename ArgTypeIterator>
+  void EmitCallArgs(CallArgList& Args,
+                    bool AllowExtraArguments,
+                    ArgTypeIterator ArgTypeBeg,
+                    ArgTypeIterator ArgTypeEnd,
+                    CallExpr::const_arg_iterator ArgBeg,
+                    CallExpr::const_arg_iterator ArgEnd,
+                    bool ForceColumnInfo = false) {
+    SmallVector<QualType, 16> ArgTypes;
+    CallExpr::const_arg_iterator Arg = ArgBeg;
+
+    // First, use the argument types that the type info knows about
+    for (ArgTypeIterator I = ArgTypeBeg, E = ArgTypeEnd; I != E; ++I, ++Arg) {
+      assert(Arg != ArgEnd && "Running over edge of argument list!");
+#ifndef NDEBUG
+      QualType ArgType = *I;
+      QualType ActualArgType = Arg->getType();
+      if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
+        QualType ActualBaseType =
+            ActualArgType->getAs<PointerType>()->getPointeeType();
+        QualType ArgBaseType =
+            ArgType->getAs<PointerType>()->getPointeeType();
+        if (ArgBaseType->isVariableArrayType()) {
+          if (const VariableArrayType *VAT =
+              getContext().getAsVariableArrayType(ActualBaseType)) {
+            if (!VAT->getSizeExpr())
+              ActualArgType = ArgType;
+          }
+        }
+      }
+      assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+             getTypePtr() ==
+             getContext().getCanonicalType(ActualArgType).getTypePtr() &&
+             "type mismatch in call argument!");
+#endif
+      ArgTypes.push_back(*I);
+    }
+
+    // Either we've emitted all the call args, or we have a call to variadic
+    // function or some other call that allows extra arguments.
+    assert((Arg == ArgEnd || AllowExtraArguments) &&
+           "Extra arguments in non-variadic function!");
+
+    // If we still have any arguments, emit them using the type of the argument.
+    for (; Arg != ArgEnd; ++Arg)
+      ArgTypes.push_back(Arg->getType());
+
+    EmitCallArgs(Args, ArgTypes, ArgBeg, ArgEnd, ForceColumnInfo);
+  }
+
+  void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
+                    CallExpr::const_arg_iterator ArgBeg,
+                    CallExpr::const_arg_iterator ArgEnd, bool ForceColumnInfo);
+
+private:
   const TargetCodeGenInfo &getTargetHooks() const {
     return CGM.getTargetCodeGenInfo();
   }
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 792fbfc..c26f769 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -19,6 +19,7 @@
 #include "CGObjCRuntime.h"
 #include "CGOpenCLRuntime.h"
 #include "CodeGenFunction.h"
+#include "CodeGenPGO.h"
 #include "CodeGenTBAA.h"
 #include "TargetInfo.h"
 #include "clang/AST/ASTContext.h"
@@ -40,30 +41,30 @@
 #include "clang/Sema/SemaDiagnostic.h"
 #include "llvm/ADT/APSInt.h"
 #include "llvm/ADT/Triple.h"
+#include "llvm/IR/CallSite.h"
 #include "llvm/IR/CallingConv.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
 #include "llvm/Support/ConvertUTF.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/Mangler.h"
 
 using namespace clang;
 using namespace CodeGen;
 
 static const char AnnotationSection[] = "llvm.metadata";
 
-static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
+static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
   switch (CGM.getTarget().getCXXABI().getKind()) {
   case TargetCXXABI::GenericAArch64:
   case TargetCXXABI::GenericARM:
   case TargetCXXABI::iOS:
+  case TargetCXXABI::iOS64:
   case TargetCXXABI::GenericItanium:
-    return *CreateItaniumCXXABI(CGM);
+    return CreateItaniumCXXABI(CGM);
   case TargetCXXABI::Microsoft:
-    return *CreateMicrosoftCXXABI(CGM);
+    return CreateMicrosoftCXXABI(CGM);
   }
 
   llvm_unreachable("invalid C++ ABI kind");
@@ -77,7 +78,8 @@
       ABI(createCXXABI(*this)), VMContext(M.getContext()), TBAA(0),
       TheTargetCodeGenInfo(0), Types(*this), VTables(*this), ObjCRuntime(0),
       OpenCLRuntime(0), CUDARuntime(0), DebugInfo(0), ARCData(0),
-      NoObjCARCExceptionsMetadata(0), RRData(0), CFConstantStringClassRef(0),
+      NoObjCARCExceptionsMetadata(0), RRData(0), PGOData(0),
+      CFConstantStringClassRef(0),
       ConstantStringClassRef(0), NSConstantStringType(0),
       NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockObjectAssign(0),
       BlockObjectDispose(0), BlockDescriptorType(0), GenericBlockLiteralType(0),
@@ -117,7 +119,7 @@
   if (SanOpts.Thread ||
       (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
     TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(),
-                           ABI.getMangleContext());
+                           getCXXABI().getMangleContext());
 
   // If debug info or coverage generation is enabled, create the CGDebugInfo
   // object.
@@ -131,6 +133,9 @@
   if (C.getLangOpts().ObjCAutoRefCount)
     ARCData = new ARCEntrypoints();
   RRData = new RREntrypoints();
+
+  if (!CodeGenOpts.InstrProfileInput.empty())
+    PGOData = new PGOProfileData(*this, CodeGenOpts.InstrProfileInput);
 }
 
 CodeGenModule::~CodeGenModule() {
@@ -138,7 +143,6 @@
   delete OpenCLRuntime;
   delete CUDARuntime;
   delete TheTargetCodeGenInfo;
-  delete &ABI;
   delete TBAA;
   delete DebugInfo;
   delete ARCData;
@@ -184,10 +188,14 @@
     llvm::Function *OldF = cast<llvm::Function>(Entry);
     llvm::Function *NewF = dyn_cast<llvm::Function>(Replacement);
     if (!NewF) {
-      llvm::ConstantExpr *CE = cast<llvm::ConstantExpr>(Replacement);
-      assert(CE->getOpcode() == llvm::Instruction::BitCast ||
-             CE->getOpcode() == llvm::Instruction::GetElementPtr);
-      NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
+      if (llvm::GlobalAlias *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
+        NewF = dyn_cast<llvm::Function>(Alias->getAliasedGlobal());
+      } else {
+        llvm::ConstantExpr *CE = cast<llvm::ConstantExpr>(Replacement);
+        assert(CE->getOpcode() == llvm::Instruction::BitCast ||
+               CE->getOpcode() == llvm::Instruction::GetElementPtr);
+        NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
+      }
     }
 
     // Replace old with new, but keep the old order.
@@ -201,6 +209,9 @@
 }
 
 void CodeGenModule::checkAliases() {
+  // Check if the constructed aliases are well formed. It is really unfortunate
+  // that we have to do this in CodeGen, but we only construct mangled names
+  // and aliases during codegen.
   bool Error = false;
   for (std::vector<GlobalDecl>::iterator I = Aliases.begin(),
          E = Aliases.end(); I != E; ++I) {
@@ -211,12 +222,37 @@
     llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
     llvm::GlobalAlias *Alias = cast<llvm::GlobalAlias>(Entry);
     llvm::GlobalValue *GV = Alias->getAliasedGlobal();
-    if (GV->isDeclaration()) {
-      Error = true;
-      getDiags().Report(AA->getLocation(), diag::err_alias_to_undefined);
-    } else if (!Alias->resolveAliasedGlobal(/*stopOnWeak*/ false)) {
+    if (!GV) {
       Error = true;
       getDiags().Report(AA->getLocation(), diag::err_cyclic_alias);
+    } else if (GV->isDeclaration()) {
+      Error = true;
+      getDiags().Report(AA->getLocation(), diag::err_alias_to_undefined);
+    }
+
+    // We have to handle alias to weak aliases in here. LLVM itself disallows
+    // this since the object semantics would not match the IL one. For
+    // compatibility with gcc we implement it by just pointing the alias
+    // to its aliasee's aliasee. We also warn, since the user is probably
+    // expecting the link to be weak.
+    llvm::Constant *Aliasee = Alias->getAliasee();
+    llvm::GlobalValue *AliaseeGV;
+    if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee)) {
+      assert((CE->getOpcode() == llvm::Instruction::BitCast ||
+              CE->getOpcode() == llvm::Instruction::AddrSpaceCast) &&
+             "Unsupported aliasee");
+      AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
+    } else {
+      AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
+    }
+    if (auto GA = dyn_cast<llvm::GlobalAlias>(AliaseeGV)) {
+      if (GA->mayBeOverridden()) {
+        getDiags().Report(AA->getLocation(), diag::warn_alias_to_weak_alias)
+          << GA->getAliasedGlobal()->getName() << GA->getName();
+        Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+            GA->getAliasee(), Alias->getType());
+        Alias->setAliasee(Aliasee);
+      }
     }
   }
   if (!Error)
@@ -233,6 +269,10 @@
   }
 }
 
+void CodeGenModule::clear() {
+  DeferredDeclsToEmit.clear();
+}
+
 void CodeGenModule::Release() {
   EmitDeferred();
   applyReplacements();
@@ -247,7 +287,7 @@
   EmitCtorList(GlobalDtors, "llvm.global_dtors");
   EmitGlobalAnnotations();
   EmitStaticExternCAliases();
-  EmitLLVMUsed();
+  emitLLVMUsed();
 
   if (CodeGenOpts.Autolink &&
       (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
@@ -332,9 +372,9 @@
     Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
 }
 
-void CodeGenModule::Error(SourceLocation loc, StringRef error) {
-  unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, error);
-  getDiags().Report(Context.getFullLoc(loc), diagID);
+void CodeGenModule::Error(SourceLocation loc, StringRef message) {
+  unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
+  getDiags().Report(Context.getFullLoc(loc), diagID) << message;
 }
 
 /// ErrorUnsupported - Print out an error that codegen doesn't support the
@@ -405,81 +445,13 @@
   TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
 
   // Override the TLS model if it is explicitly specified.
-  if (D.hasAttr<TLSModelAttr>()) {
-    const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>();
+  if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
     TLM = GetLLVMTLSModel(Attr->getModel());
   }
 
   GV->setThreadLocalMode(TLM);
 }
 
-/// Set the symbol visibility of type information (vtable and RTTI)
-/// associated with the given type.
-void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
-                                      const CXXRecordDecl *RD,
-                                      TypeVisibilityKind TVK) const {
-  setGlobalVisibility(GV, RD);
-
-  if (!CodeGenOpts.HiddenWeakVTables)
-    return;
-
-  // We never want to drop the visibility for RTTI names.
-  if (TVK == TVK_ForRTTIName)
-    return;
-
-  // We want to drop the visibility to hidden for weak type symbols.
-  // This isn't possible if there might be unresolved references
-  // elsewhere that rely on this symbol being visible.
-
-  // This should be kept roughly in sync with setThunkVisibility
-  // in CGVTables.cpp.
-
-  // Preconditions.
-  if (GV->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage ||
-      GV->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
-    return;
-
-  // Don't override an explicit visibility attribute.
-  if (RD->getExplicitVisibility(NamedDecl::VisibilityForType))
-    return;
-
-  switch (RD->getTemplateSpecializationKind()) {
-  // We have to disable the optimization if this is an EI definition
-  // because there might be EI declarations in other shared objects.
-  case TSK_ExplicitInstantiationDefinition:
-  case TSK_ExplicitInstantiationDeclaration:
-    return;
-
-  // Every use of a non-template class's type information has to emit it.
-  case TSK_Undeclared:
-    break;
-
-  // In theory, implicit instantiations can ignore the possibility of
-  // an explicit instantiation declaration because there necessarily
-  // must be an EI definition somewhere with default visibility.  In
-  // practice, it's possible to have an explicit instantiation for
-  // an arbitrary template class, and linkers aren't necessarily able
-  // to deal with mixed-visibility symbols.
-  case TSK_ExplicitSpecialization:
-  case TSK_ImplicitInstantiation:
-    return;
-  }
-
-  // If there's a key function, there may be translation units
-  // that don't have the key function's definition.  But ignore
-  // this if we're emitting RTTI under -fno-rtti.
-  if (!(TVK != TVK_ForRTTI) || LangOpts.RTTI) {
-    // FIXME: what should we do if we "lose" the key function during
-    // the emission of the file?
-    if (Context.getCurrentKeyFunction(RD))
-      return;
-  }
-
-  // Otherwise, drop the visibility to hidden.
-  GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
-  GV->setUnnamedAddr(true);
-}
-
 StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
   const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
 
@@ -581,18 +553,13 @@
 CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
   const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
 
-  if (isa<CXXDestructorDecl>(D) &&
-      getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
-                                         GD.getDtorType()))
-    return llvm::Function::LinkOnceODRLinkage;
-
   GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
 
   if (Linkage == GVA_Internal)
     return llvm::Function::InternalLinkage;
   
   if (D->hasAttr<DLLExportAttr>())
-    return llvm::Function::DLLExportLinkage;
+    return llvm::Function::ExternalLinkage;
   
   if (D->hasAttr<WeakAttr>())
     return llvm::Function::WeakAnyLinkage;
@@ -622,11 +589,18 @@
   // explicit instantiations can occur in multiple translation units
   // and must all be equivalent. However, we are not allowed to
   // throw away these explicit instantiations.
-  if (Linkage == GVA_ExplicitTemplateInstantiation)
+  if (Linkage == GVA_StrongODR)
     return !Context.getLangOpts().AppleKext
              ? llvm::Function::WeakODRLinkage
              : llvm::Function::ExternalLinkage;
-  
+
+  // Destructor variants in the Microsoft C++ ABI are always linkonce_odr thunks
+  // emitted on an as-needed basis.
+  if (isa<CXXDestructorDecl>(D) &&
+      getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
+                                         GD.getDtorType()))
+    return llvm::Function::LinkOnceODRLinkage;
+
   // Otherwise, we have strong external linkage.
   assert(Linkage == GVA_StrongExternal);
   return llvm::Function::ExternalLinkage;
@@ -686,10 +660,15 @@
     // Naked implies noinline: we should not be inlining such functions.
     B.addAttribute(llvm::Attribute::Naked);
     B.addAttribute(llvm::Attribute::NoInline);
+  } else if (D->hasAttr<OptimizeNoneAttr>()) {
+    // OptimizeNone implies noinline; we should not be inlining such functions.
+    B.addAttribute(llvm::Attribute::OptimizeNone);
+    B.addAttribute(llvm::Attribute::NoInline);
+  } else if (D->hasAttr<NoDuplicateAttr>()) {
+    B.addAttribute(llvm::Attribute::NoDuplicate);
   } else if (D->hasAttr<NoInlineAttr>()) {
     B.addAttribute(llvm::Attribute::NoInline);
-  } else if ((D->hasAttr<AlwaysInlineAttr>() ||
-              D->hasAttr<ForceInlineAttr>()) &&
+  } else if (D->hasAttr<AlwaysInlineAttr>() &&
              !F->getAttributes().hasAttribute(llvm::AttributeSet::FunctionIndex,
                                               llvm::Attribute::NoInline)) {
     // (noinline wins over always_inline, and we can't specify both in IR)
@@ -704,8 +683,16 @@
   if (D->hasAttr<MinSizeAttr>())
     B.addAttribute(llvm::Attribute::MinSize);
 
+  if (D->hasAttr<OptimizeNoneAttr>()) {
+    // OptimizeNone wins over OptimizeForSize and MinSize.
+    B.removeAttribute(llvm::Attribute::OptimizeForSize);
+    B.removeAttribute(llvm::Attribute::MinSize);
+  }
+
   if (LangOpts.getStackProtector() == LangOptions::SSPOn)
     B.addAttribute(llvm::Attribute::StackProtect);
+  else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
+    B.addAttribute(llvm::Attribute::StackProtectStrong);
   else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
     B.addAttribute(llvm::Attribute::StackProtectReq);
 
@@ -751,7 +738,7 @@
     GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
 
   if (D->hasAttr<UsedAttr>())
-    AddUsedGlobal(GV);
+    addUsedGlobal(GV);
 
   if (const SectionAttr *SA = D->getAttr<SectionAttr>())
     GV->setSection(SA->getName());
@@ -788,7 +775,12 @@
   if (!IsIncompleteFunction)
     SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
 
-  if (getCXXABI().HasThisReturn(GD)) {
+  // Add the Returned attribute for "this", except for iOS 5 and earlier
+  // where substantial code, including the libstdc++ dylib, was compiled with
+  // GCC and does not actually return "this".
+  if (getCXXABI().HasThisReturn(GD) &&
+      !(getTarget().getTriple().isiOS() &&
+        getTarget().getTriple().isOSVersionLT(6))) {
     assert(!F->arg_empty() &&
            F->arg_begin()->getType()
              ->canLosslesslyBitCastTo(F->getReturnType()) &&
@@ -800,7 +792,8 @@
   // overridden by a definition.
 
   if (FD->hasAttr<DLLImportAttr>()) {
-    F->setLinkage(llvm::Function::DLLImportLinkage);
+    F->setLinkage(llvm::Function::ExternalLinkage);
+    F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
   } else if (FD->hasAttr<WeakAttr>() ||
              FD->isWeakImported()) {
     // "extern_weak" is overloaded in LLVM; we probably should have
@@ -808,6 +801,8 @@
     F->setLinkage(llvm::Function::ExternalWeakLinkage);
   } else {
     F->setLinkage(llvm::Function::ExternalLinkage);
+    if (FD->hasAttr<DLLExportAttr>())
+      F->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
 
     LinkageInfo LV = FD->getLinkageAndVisibility();
     if (LV.getLinkage() == ExternalLinkage && LV.isVisibilityExplicit()) {
@@ -825,39 +820,51 @@
                     llvm::Attribute::NoBuiltin);
 }
 
-void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
+void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
   assert(!GV->isDeclaration() &&
          "Only globals with definition can force usage.");
   LLVMUsed.push_back(GV);
 }
 
-void CodeGenModule::EmitLLVMUsed() {
+void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
+  assert(!GV->isDeclaration() &&
+         "Only globals with definition can force usage.");
+  LLVMCompilerUsed.push_back(GV);
+}
+
+static void emitUsed(CodeGenModule &CGM, StringRef Name,
+                     std::vector<llvm::WeakVH> &List) {
   // Don't create llvm.used if there is no need.
-  if (LLVMUsed.empty())
+  if (List.empty())
     return;
 
-  // Convert LLVMUsed to what ConstantArray needs.
+  // Convert List to what ConstantArray needs.
   SmallVector<llvm::Constant*, 8> UsedArray;
-  UsedArray.resize(LLVMUsed.size());
-  for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
+  UsedArray.resize(List.size());
+  for (unsigned i = 0, e = List.size(); i != e; ++i) {
     UsedArray[i] =
-     llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
-                                    Int8PtrTy);
+     llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*List[i]),
+                                    CGM.Int8PtrTy);
   }
 
   if (UsedArray.empty())
     return;
-  llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
+  llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
 
   llvm::GlobalVariable *GV =
-    new llvm::GlobalVariable(getModule(), ATy, false,
+    new llvm::GlobalVariable(CGM.getModule(), ATy, false,
                              llvm::GlobalValue::AppendingLinkage,
                              llvm::ConstantArray::get(ATy, UsedArray),
-                             "llvm.used");
+                             Name);
 
   GV->setSection("llvm.metadata");
 }
 
+void CodeGenModule::emitLLVMUsed() {
+  emitUsed(*this, "llvm.used", LLVMUsed);
+  emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
+}
+
 void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
   llvm::Value *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
   LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
@@ -1002,31 +1009,23 @@
     // Stop if we're out of both deferred v-tables and deferred declarations.
     if (DeferredDeclsToEmit.empty()) break;
 
-    GlobalDecl D = DeferredDeclsToEmit.back();
+    DeferredGlobal &G = DeferredDeclsToEmit.back();
+    GlobalDecl D = G.GD;
+    llvm::GlobalValue *GV = G.GV;
     DeferredDeclsToEmit.pop_back();
 
+    assert(GV == GetGlobalValue(getMangledName(D)));
     // Check to see if we've already emitted this.  This is necessary
     // for a couple of reasons: first, decls can end up in the
     // deferred-decls queue multiple times, and second, decls can end
     // up with definitions in unusual ways (e.g. by an extern inline
     // function acquiring a strong function redefinition).  Just
     // ignore these cases.
-    //
-    // TODO: That said, looking this up multiple times is very wasteful.
-    StringRef Name = getMangledName(D);
-    llvm::GlobalValue *CGRef = GetGlobalValue(Name);
-    assert(CGRef && "Deferred decl wasn't referenced?");
-
-    if (!CGRef->isDeclaration())
-      continue;
-
-    // GlobalAlias::isDeclaration() defers to the aliasee, but for our
-    // purposes an alias counts as a definition.
-    if (isa<llvm::GlobalAlias>(CGRef))
+    if(!GV->isDeclaration())
       continue;
 
     // Otherwise, emit the definition and move on to the next one.
-    EmitGlobalDefinition(D);
+    EmitGlobalDefinition(D, GV);
   }
 }
 
@@ -1096,10 +1095,8 @@
                                          llvm::GlobalValue *GV) {
   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
   // Get the struct elements for these annotations.
-  for (specific_attr_iterator<AnnotateAttr>
-       ai = D->specific_attr_begin<AnnotateAttr>(),
-       ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
-    Annotations.push_back(EmitAnnotateAttr(GV, *ai, D->getLocation()));
+  for (const auto *I : D->specific_attrs<AnnotateAttr>())
+    Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
 }
 
 bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
@@ -1196,12 +1193,14 @@
       if (!FD->doesDeclarationForceExternallyVisibleDefinition())
         return;
 
-      const FunctionDecl *InlineDefinition = 0;
-      FD->getBody(InlineDefinition);
-
       StringRef MangledName = getMangledName(GD);
-      DeferredDecls.erase(MangledName);
-      EmitGlobalDefinition(InlineDefinition);
+
+      // Compute the function info and LLVM type.
+      const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+      llvm::Type *Ty = getTypes().GetFunctionType(FI);
+
+      GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
+                              /*DontDefer=*/false);
       return;
     }
   } else {
@@ -1231,8 +1230,8 @@
   // If the value has already been used, add it directly to the
   // DeferredDeclsToEmit list.
   StringRef MangledName = getMangledName(GD);
-  if (GetGlobalValue(MangledName))
-    DeferredDeclsToEmit.push_back(GD);
+  if (llvm::GlobalValue *GV = GetGlobalValue(MangledName))
+    addDeferredDeclToEmit(GV, GD);
   else {
     // Otherwise, remember that we saw a deferred decl with this name.  The
     // first use of the mangled name will cause it to move into
@@ -1301,8 +1300,7 @@
   if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
     return true;
   const FunctionDecl *F = cast<FunctionDecl>(GD.getDecl());
-  if (CodeGenOpts.OptimizationLevel == 0 &&
-      !F->hasAttr<AlwaysInlineAttr>() && !F->hasAttr<ForceInlineAttr>())
+  if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
     return false;
   // PR9614. Avoid cases where the source code is lying to us. An available
   // externally function should have an equivalent function somewhere else,
@@ -1329,7 +1327,7 @@
     }
 }
 
-void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
   const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
 
   PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(), 
@@ -1351,7 +1349,7 @@
       else if (const CXXDestructorDecl *DD =dyn_cast<CXXDestructorDecl>(Method))
         EmitCXXDestructor(DD, GD.getDtorType());
       else
-        EmitGlobalFunctionDefinition(GD);
+        EmitGlobalFunctionDefinition(GD, GV);
 
       if (Method->isVirtual())
         getVTables().EmitThunks(GD);
@@ -1359,7 +1357,7 @@
       return;
     }
 
-    return EmitGlobalFunctionDefinition(GD);
+    return EmitGlobalFunctionDefinition(GD, GV);
   }
   
   if (const VarDecl *VD = dyn_cast<VarDecl>(D))
@@ -1379,6 +1377,7 @@
 CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
                                        llvm::Type *Ty,
                                        GlobalDecl GD, bool ForVTable,
+                                       bool DontDefer,
                                        llvm::AttributeSet ExtraAttrs) {
   const Decl *D = GD.getDecl();
 
@@ -1398,14 +1397,6 @@
     return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
   }
 
-  // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
-  // each other bottoming out with the base dtor.  Therefore we emit non-base
-  // dtors on usage, even if there is no dtor definition in the TU.
-  if (D && isa<CXXDestructorDecl>(D) &&
-      getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
-                                         GD.getDtorType()))
-    DeferredDeclsToEmit.push_back(GD);
-
   // This function doesn't have a complete type (for example, the return
   // type is an incomplete struct). Use a fake type instead, and make
   // sure not to try to set attributes.
@@ -1433,50 +1424,64 @@
                                              B));
   }
 
-  // This is the first use or definition of a mangled name.  If there is a
-  // deferred decl with this name, remember that we need to emit it at the end
-  // of the file.
-  llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
-  if (DDI != DeferredDecls.end()) {
-    // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
-    // list, and remove it from DeferredDecls (since we don't need it anymore).
-    DeferredDeclsToEmit.push_back(DDI->second);
-    DeferredDecls.erase(DDI);
+  if (!DontDefer) {
+    // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
+    // each other bottoming out with the base dtor.  Therefore we emit non-base
+    // dtors on usage, even if there is no dtor definition in the TU.
+    if (D && isa<CXXDestructorDecl>(D) &&
+        getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
+                                           GD.getDtorType()))
+      addDeferredDeclToEmit(F, GD);
 
-  // Otherwise, if this is a sized deallocation function, emit a weak definition
-  // for it at the end of the translation unit.
-  } else if (D && cast<FunctionDecl>(D)
-                      ->getCorrespondingUnsizedGlobalDeallocationFunction()) {
-    DeferredDeclsToEmit.push_back(GD);
+    // This is the first use or definition of a mangled name.  If there is a
+    // deferred decl with this name, remember that we need to emit it at the end
+    // of the file.
+    llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+    if (DDI != DeferredDecls.end()) {
+      // Move the potentially referenced deferred decl to the
+      // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
+      // don't need it anymore).
+      addDeferredDeclToEmit(F, DDI->second);
+      DeferredDecls.erase(DDI);
 
-  // Otherwise, there are cases we have to worry about where we're
-  // using a declaration for which we must emit a definition but where
-  // we might not find a top-level definition:
-  //   - member functions defined inline in their classes
-  //   - friend functions defined inline in some class
-  //   - special member functions with implicit definitions
-  // If we ever change our AST traversal to walk into class methods,
-  // this will be unnecessary.
-  //
-  // We also don't emit a definition for a function if it's going to be an entry
-  // in a vtable, unless it's already marked as used.
-  } else if (getLangOpts().CPlusPlus && D) {
-    // Look for a declaration that's lexically in a record.
-    const FunctionDecl *FD = cast<FunctionDecl>(D);
-    FD = FD->getMostRecentDecl();
-    do {
-      if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
-        if (FD->isImplicit() && !ForVTable) {
-          assert(FD->isUsed() && "Sema didn't mark implicit function as used!");
-          DeferredDeclsToEmit.push_back(GD.getWithDecl(FD));
-          break;
-        } else if (FD->doesThisDeclarationHaveABody()) {
-          DeferredDeclsToEmit.push_back(GD.getWithDecl(FD));
-          break;
+      // Otherwise, if this is a sized deallocation function, emit a weak
+      // definition
+      // for it at the end of the translation unit.
+    } else if (D && cast<FunctionDecl>(D)
+                        ->getCorrespondingUnsizedGlobalDeallocationFunction()) {
+      addDeferredDeclToEmit(F, GD);
+
+      // Otherwise, there are cases we have to worry about where we're
+      // using a declaration for which we must emit a definition but where
+      // we might not find a top-level definition:
+      //   - member functions defined inline in their classes
+      //   - friend functions defined inline in some class
+      //   - special member functions with implicit definitions
+      // If we ever change our AST traversal to walk into class methods,
+      // this will be unnecessary.
+      //
+      // We also don't emit a definition for a function if it's going to be an
+      // entry
+      // in a vtable, unless it's already marked as used.
+    } else if (getLangOpts().CPlusPlus && D) {
+      // Look for a declaration that's lexically in a record.
+      const FunctionDecl *FD = cast<FunctionDecl>(D);
+      FD = FD->getMostRecentDecl();
+      do {
+        if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+          if (FD->isImplicit() && !ForVTable) {
+            assert(FD->isUsed() &&
+                   "Sema didn't mark implicit function as used!");
+            addDeferredDeclToEmit(F, GD.getWithDecl(FD));
+            break;
+          } else if (FD->doesThisDeclarationHaveABody()) {
+            addDeferredDeclToEmit(F, GD.getWithDecl(FD));
+            break;
+          }
         }
-      }
-      FD = FD->getPreviousDecl();
-    } while (FD);
+        FD = FD->getPreviousDecl();
+      } while (FD);
+    }
   }
 
   // Make sure the result is of the requested type.
@@ -1494,13 +1499,14 @@
 /// create it (this occurs when we see a definition of the function).
 llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
                                                  llvm::Type *Ty,
-                                                 bool ForVTable) {
+                                                 bool ForVTable,
+                                                 bool DontDefer) {
   // If there was no specific requested type, just convert it now.
   if (!Ty)
     Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
   
   StringRef MangledName = getMangledName(GD);
-  return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable);
+  return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer);
 }
 
 /// CreateRuntimeFunction - Create a new runtime function with the specified
@@ -1509,9 +1515,9 @@
 CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
                                      StringRef Name,
                                      llvm::AttributeSet ExtraAttrs) {
-  llvm::Constant *C
-    = GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
-                              ExtraAttrs);
+  llvm::Constant *C =
+      GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+                              /*DontDefer=*/false, ExtraAttrs);
   if (llvm::Function *F = dyn_cast<llvm::Function>(C))
     if (F->empty())
       F->setCallingConv(getRuntimeCC());
@@ -1571,6 +1577,13 @@
     return llvm::ConstantExpr::getBitCast(Entry, Ty);
   }
 
+  unsigned AddrSpace = GetGlobalVarAddressSpace(D, Ty->getAddressSpace());
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
+                             llvm::GlobalValue::ExternalLinkage,
+                             0, MangledName, 0,
+                             llvm::GlobalVariable::NotThreadLocal, AddrSpace);
+
   // This is the first use or definition of a mangled name.  If there is a
   // deferred decl with this name, remember that we need to emit it at the end
   // of the file.
@@ -1578,17 +1591,10 @@
   if (DDI != DeferredDecls.end()) {
     // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
     // list, and remove it from DeferredDecls (since we don't need it anymore).
-    DeferredDeclsToEmit.push_back(DDI->second);
+    addDeferredDeclToEmit(GV, DDI->second);
     DeferredDecls.erase(DDI);
   }
 
-  unsigned AddrSpace = GetGlobalVarAddressSpace(D, Ty->getAddressSpace());
-  llvm::GlobalVariable *GV =
-    new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
-                             llvm::GlobalValue::ExternalLinkage,
-                             0, MangledName, 0,
-                             llvm::GlobalVariable::NotThreadLocal, AddrSpace);
-
   // Handle things which are present even on external declarations.
   if (D) {
     // FIXME: This code is overly simple and should be merged with other global
@@ -1600,9 +1606,10 @@
     if (LV.getLinkage() != ExternalLinkage) {
       // Don't set internal linkage on declarations.
     } else {
-      if (D->hasAttr<DLLImportAttr>())
-        GV->setLinkage(llvm::GlobalValue::DLLImportLinkage);
-      else if (D->hasAttr<WeakAttr>() || D->isWeakImported())
+      if (D->hasAttr<DLLImportAttr>()) {
+        GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+        GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+      } else if (D->hasAttr<WeakAttr>() || D->isWeakImported())
         GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
 
       // Set visibility on a declaration only if it's explicit.
@@ -1627,6 +1634,12 @@
   if (AddrSpace != Ty->getAddressSpace())
     return llvm::ConstantExpr::getAddrSpaceCast(GV, Ty);
 
+  if (getTarget().getTriple().getArch() == llvm::Triple::xcore &&
+      D->getLanguageLinkage() == CLanguageLinkage &&
+      D->getType().isConstant(Context) &&
+      isExternallyVisible(D->getLinkageAndVisibility().getLinkage()))
+    GV->setSection(".cp.rodata");
+
   return GV;
 }
 
@@ -1875,6 +1888,10 @@
   llvm::GlobalValue::LinkageTypes Linkage = 
     GetLLVMLinkageVarDefinition(D, GV->isConstant());
   GV->setLinkage(Linkage);
+  if (D->hasAttr<DLLImportAttr>())
+    GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+  else if (D->hasAttr<DLLExportAttr>())
+    GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
 
   // If required by the ABI, give definitions of static data members with inline
   // initializers linkonce_odr linkage.
@@ -1917,9 +1934,9 @@
   if (Linkage == GVA_Internal)
     return llvm::Function::InternalLinkage;
   else if (D->hasAttr<DLLImportAttr>())
-    return llvm::Function::DLLImportLinkage;
+    return llvm::Function::ExternalLinkage;
   else if (D->hasAttr<DLLExportAttr>())
-    return llvm::Function::DLLExportLinkage;
+    return llvm::Function::ExternalLinkage;
   else if (D->hasAttr<SelectAnyAttr>()) {
     // selectany symbols are externally visible, so use weak instead of
     // linkonce.  MSVC optimizes away references to const selectany globals, so
@@ -1931,15 +1948,14 @@
       return llvm::GlobalVariable::WeakODRLinkage;
     else
       return llvm::GlobalVariable::WeakAnyLinkage;
-  } else if (Linkage == GVA_TemplateInstantiation ||
-             Linkage == GVA_ExplicitTemplateInstantiation)
+  } else if (Linkage == GVA_TemplateInstantiation || Linkage == GVA_StrongODR)
     return llvm::GlobalVariable::WeakODRLinkage;
   else if (!getLangOpts().CPlusPlus && 
-           ((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
-             D->getAttr<CommonAttr>()) &&
+           ((!CodeGenOpts.NoCommon && !D->hasAttr<NoCommonAttr>()) ||
+             D->hasAttr<CommonAttr>()) &&
            !D->hasExternalStorage() && !D->getInit() &&
-           !D->getAttr<SectionAttr>() && !D->getTLSKind() &&
-           !D->getAttr<WeakImportAttr>()) {
+           !D->hasAttr<SectionAttr>() && !D->getTLSKind() &&
+           !D->hasAttr<WeakImportAttr>()) {
     // Thread local vars aren't considered common linkage.
     return llvm::GlobalVariable::CommonLinkage;
   } else if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
@@ -1965,7 +1981,7 @@
   for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
          ui != ue; ) {
     llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
-    llvm::User *user = *use;
+    llvm::User *user = use->getUser();
 
     // Recognize and replace uses of bitcasts.  Most calls to
     // unprototyped functions will use bitcasts.
@@ -1978,7 +1994,7 @@
     // Recognize calls to the function.
     llvm::CallSite callSite(user);
     if (!callSite) continue;
-    if (!callSite.isCallee(use)) continue;
+    if (!callSite.isCallee(&*use)) continue;
 
     // If the return types don't match exactly, then we can't
     // transform this call unless it's dead.
@@ -2087,7 +2103,8 @@
   EmitTopLevelDecl(VD);
 }
 
-void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
+                                                 llvm::GlobalValue *GV) {
   const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
 
   // Compute the function info and LLVM type.
@@ -2095,7 +2112,9 @@
   llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
 
   // Get or create the prototype for the function.
-  llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
+  llvm::Constant *Entry =
+      GV ? GV
+         : GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer*/ true);
 
   // Strip off a bitcast if we got one back.
   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
@@ -2174,6 +2193,10 @@
     AddGlobalDtor(Fn, DA->getPriority());
   if (D->hasAttr<AnnotateAttr>())
     AddGlobalAnnotations(D, Fn);
+
+  llvm::Function *PGOInit = CodeGenPGO::emitInitialization(*this);
+  if (PGOInit)
+    AddGlobalCtor(PGOInit, 0);
 }
 
 void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
@@ -2235,9 +2258,9 @@
     if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
       // The dllexport attribute is ignored for undefined symbols.
       if (FD->hasBody())
-        GA->setLinkage(llvm::Function::DLLExportLinkage);
+        GA->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
     } else {
-      GA->setLinkage(llvm::Function::DLLExportLinkage);
+      GA->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
     }
   } else if (D->hasAttr<WeakAttr>() ||
              D->hasAttr<WeakRefAttr>() ||
@@ -2355,30 +2378,25 @@
     C = llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
   }
 
-  llvm::GlobalValue::LinkageTypes Linkage;
-  if (isUTF16)
-    // FIXME: why do utf strings get "_" labels instead of "L" labels?
-    Linkage = llvm::GlobalValue::InternalLinkage;
-  else
-    // FIXME: With OS X ld 123.2 (xcode 4) and LTO we would get a linker error
-    // when using private linkage. It is not clear if this is a bug in ld
-    // or a reasonable new restriction.
-    Linkage = llvm::GlobalValue::LinkerPrivateLinkage;
-  
   // Note: -fwritable-strings doesn't make the backing store strings of
   // CFStrings writable. (See <rdar://problem/10657500>)
   llvm::GlobalVariable *GV =
-    new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
-                             Linkage, C, ".str");
+      new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
+                               llvm::GlobalValue::PrivateLinkage, C, ".str");
   GV->setUnnamedAddr(true);
   // Don't enforce the target's minimum global alignment, since the only use
   // of the string is via this class initializer.
+  // FIXME: We set the section explicitly to avoid a bug in ld64 224.1. Without
+  // it LLVM can merge the string with a non unnamed_addr one during LTO. Doing
+  // that changes the section it ends in, which surprises ld64.
   if (isUTF16) {
     CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
     GV->setAlignment(Align.getQuantity());
+    GV->setSection("__TEXT,__ustring");
   } else {
     CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
     GV->setAlignment(Align.getQuantity());
+    GV->setSection("__TEXT,__cstring,cstring_literals");
   }
 
   // String.
@@ -2397,23 +2415,12 @@
   GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
                                 llvm::GlobalVariable::PrivateLinkage, C,
                                 "_unnamed_cfstring_");
-  if (const char *Sect = getTarget().getCFStringSection())
-    GV->setSection(Sect);
+  GV->setSection("__DATA,__cfstring");
   Entry.setValue(GV);
 
   return GV;
 }
 
-static RecordDecl *
-CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
-                 DeclContext *DC, IdentifierInfo *Id) {
-  SourceLocation Loc;
-  if (Ctx.getLangOpts().CPlusPlus)
-    return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
-  else
-    return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
-}
-
 llvm::Constant *
 CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
   unsigned StringLength = 0;
@@ -2456,9 +2463,7 @@
 
   if (!NSConstantStringType) {
     // Construct the type for a constant NSString.
-    RecordDecl *D = CreateRecordDecl(Context, TTK_Struct, 
-                                     Context.getTranslationUnitDecl(),
-                                   &Context.Idents.get("__builtin_NSString"));
+    RecordDecl *D = Context.buildImplicitRecord("__builtin_NSString");
     D->startDefinition();
       
     QualType FieldTypes[3];
@@ -2521,12 +2526,13 @@
   GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
                                 llvm::GlobalVariable::PrivateLinkage, C,
                                 "_unnamed_nsstring_");
+  const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip";
+  const char *NSStringNonFragileABISection =
+      "__DATA,__objc_stringobj,regular,no_dead_strip";
   // FIXME. Fix section.
-  if (const char *Sect = 
-        LangOpts.ObjCRuntime.isNonFragile() 
-          ? getTarget().getNSStringNonFragileABISection() 
-          : getTarget().getNSStringSection())
-    GV->setSection(Sect);
+  GV->setSection(LangOpts.ObjCRuntime.isNonFragile()
+                     ? NSStringNonFragileABISection
+                     : NSStringSection);
   Entry.setValue(GV);
   
   return GV;
@@ -2534,9 +2540,7 @@
 
 QualType CodeGenModule::getObjCFastEnumerationStateType() {
   if (ObjCFastEnumerationStateType.isNull()) {
-    RecordDecl *D = CreateRecordDecl(Context, TTK_Struct, 
-                                     Context.getTranslationUnitDecl(),
-                      &Context.Idents.get("__objcFastEnumerationState"));
+    RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
     D->startDefinition();
     
     QualType FieldTypes[] = {
@@ -2613,25 +2617,67 @@
 llvm::Constant *
 CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
   CharUnits Align = getContext().getAlignOfGlobalVarInChars(S->getType());
-  if (S->isAscii() || S->isUTF8()) {
-    SmallString<64> Str(S->getString());
-    
-    // Resize the string to the right size, which is indicated by its type.
-    const ConstantArrayType *CAT = Context.getAsConstantArrayType(S->getType());
-    Str.resize(CAT->getSize().getZExtValue());
-    return GetAddrOfConstantString(Str, /*GlobalName*/ 0, Align.getQuantity());
+
+  llvm::StringMapEntry<llvm::GlobalVariable *> *Entry = nullptr;
+  llvm::GlobalVariable *GV = nullptr;
+  if (!LangOpts.WritableStrings) {
+    llvm::StringMap<llvm::GlobalVariable *> *ConstantStringMap = nullptr;
+    switch (S->getCharByteWidth()) {
+    case 1:
+      ConstantStringMap = &Constant1ByteStringMap;
+      break;
+    case 2:
+      ConstantStringMap = &Constant2ByteStringMap;
+      break;
+    case 4:
+      ConstantStringMap = &Constant4ByteStringMap;
+      break;
+    default:
+      llvm_unreachable("unhandled byte width!");
+    }
+    Entry = &ConstantStringMap->GetOrCreateValue(S->getBytes());
+    GV = Entry->getValue();
   }
 
-  // FIXME: the following does not memoize wide strings.
-  llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
-  llvm::GlobalVariable *GV =
-    new llvm::GlobalVariable(getModule(),C->getType(),
-                             !LangOpts.WritableStrings,
-                             llvm::GlobalValue::PrivateLinkage,
-                             C,".str");
+  if (!GV) {
+    SmallString<256> MangledNameBuffer;
+    StringRef GlobalVariableName;
+    llvm::GlobalValue::LinkageTypes LT;
 
-  GV->setAlignment(Align.getQuantity());
-  GV->setUnnamedAddr(true);
+    // Mangle the string literal if the ABI allows for it.  However, we cannot
+    // do this if  we are compiling with ASan or -fwritable-strings because they
+    // rely on strings having normal linkage.
+    if (!LangOpts.WritableStrings && !SanOpts.Address &&
+        getCXXABI().getMangleContext().shouldMangleStringLiteral(S)) {
+      llvm::raw_svector_ostream Out(MangledNameBuffer);
+      getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
+      Out.flush();
+
+      LT = llvm::GlobalValue::LinkOnceODRLinkage;
+      GlobalVariableName = MangledNameBuffer;
+    } else {
+      LT = llvm::GlobalValue::PrivateLinkage;;
+      GlobalVariableName = ".str";
+    }
+
+    // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
+    unsigned AddrSpace = 0;
+    if (getLangOpts().OpenCL)
+      AddrSpace = getContext().getTargetAddressSpace(LangAS::opencl_constant);
+
+    llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
+    GV = new llvm::GlobalVariable(
+        getModule(), C->getType(), !LangOpts.WritableStrings, LT, C,
+        GlobalVariableName, /*InsertBefore=*/nullptr,
+        llvm::GlobalVariable::NotThreadLocal, AddrSpace);
+    GV->setUnnamedAddr(true);
+    if (Entry)
+      Entry->setValue(GV);
+  }
+
+  if (Align.getQuantity() > GV->getAlignment())
+    GV->setAlignment(Align.getQuantity());
+
   return GV;
 }
 
@@ -2656,7 +2702,7 @@
   llvm::Constant *C =
       llvm::ConstantDataArray::getString(CGM.getLLVMContext(), str, false);
 
-  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
+  // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
   unsigned AddrSpace = 0;
   if (CGM.getLangOpts().OpenCL)
     AddrSpace = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant);
@@ -2695,7 +2741,7 @@
     return GenerateStringLiteral(Str, false, *this, GlobalName, Alignment);
 
   llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
-    ConstantStringMap.GetOrCreateValue(Str);
+    Constant1ByteStringMap.GetOrCreateValue(Str);
 
   if (llvm::GlobalVariable *GV = Entry.getValue()) {
     if (Alignment > GV->getAlignment()) {
@@ -2793,10 +2839,7 @@
 /// properties for an implementation.
 void CodeGenModule::EmitObjCPropertyImplementations(const
                                                     ObjCImplementationDecl *D) {
-  for (ObjCImplementationDecl::propimpl_iterator
-         i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
-    ObjCPropertyImplDecl *PID = *i;
-
+  for (const auto *PID : D->property_impls()) {
     // Dynamic is just for type-checking.
     if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
       ObjCPropertyDecl *PD = PID->getPropertyDecl();
@@ -2871,13 +2914,12 @@
 
 /// EmitNamespace - Emit all declarations in a namespace.
 void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
-  for (RecordDecl::decl_iterator I = ND->decls_begin(), E = ND->decls_end();
-       I != E; ++I) {
-    if (const VarDecl *VD = dyn_cast<VarDecl>(*I))
+  for (auto *I : ND->decls()) {
+    if (const auto *VD = dyn_cast<VarDecl>(I))
       if (VD->getTemplateSpecializationKind() != TSK_ExplicitSpecialization &&
           VD->getTemplateSpecializationKind() != TSK_Undeclared)
         continue;
-    EmitTopLevelDecl(*I);
+    EmitTopLevelDecl(I);
   }
 }
 
@@ -2889,17 +2931,14 @@
     return;
   }
 
-  for (RecordDecl::decl_iterator I = LSD->decls_begin(), E = LSD->decls_end();
-       I != E; ++I) {
+  for (auto *I : LSD->decls()) {
     // Meta-data for ObjC class includes references to implemented methods.
     // Generate class's method definitions first.
-    if (ObjCImplDecl *OID = dyn_cast<ObjCImplDecl>(*I)) {
-      for (ObjCContainerDecl::method_iterator M = OID->meth_begin(),
-           MEnd = OID->meth_end();
-           M != MEnd; ++M)
-        EmitTopLevelDecl(*M);
+    if (auto *OID = dyn_cast<ObjCImplDecl>(I)) {
+      for (auto *M : OID->methods())
+        EmitTopLevelDecl(M);
     }
-    EmitTopLevelDecl(*I);
+    EmitTopLevelDecl(I);
   }
 }
 
@@ -2940,7 +2979,6 @@
     break;
     // No code generation needed.
   case Decl::UsingShadow:
-  case Decl::Using:
   case Decl::ClassTemplate:
   case Decl::VarTemplate:
   case Decl::VarTemplatePartialSpecialization:
@@ -2949,6 +2987,10 @@
   case Decl::Block:
   case Decl::Empty:
     break;
+  case Decl::Using:          // using X; [C++]
+    if (CGDebugInfo *DI = getModuleDebugInfo())
+        DI->EmitUsingDecl(cast<UsingDecl>(*D));
+    return;
   case Decl::NamespaceAlias:
     if (CGDebugInfo *DI = getModuleDebugInfo())
         DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
@@ -3048,7 +3090,15 @@
 
     ImportedModules.insert(Import->getImportedModule());
     break;
- }
+  }
+
+  case Decl::ClassTemplateSpecialization: {
+    const ClassTemplateSpecializationDecl *Spec =
+        cast<ClassTemplateSpecializationDecl>(D);
+    if (DebugInfo &&
+        Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition)
+      DebugInfo->completeTemplateDefinition(*Spec);
+  }
 
   default:
     // Make sure we handled everything we should, every other kind is a
@@ -3094,7 +3144,7 @@
     IdentifierInfo *Name = I->first;
     llvm::GlobalValue *Val = I->second;
     if (Val && !getModule().getNamedValue(Name->getName()))
-      AddUsedGlobal(new llvm::GlobalAlias(Val->getType(), Val->getLinkage(),
+      addUsedGlobal(new llvm::GlobalAlias(Val->getType(), Val->getLinkage(),
                                           Name->getName(), Val, &getModule()));
   }
 }
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index c161224..0d13bdc 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -30,7 +30,7 @@
 #include "llvm/ADT/StringMap.h"
 #include "llvm/IR/CallingConv.h"
 #include "llvm/IR/Module.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/ValueHandle.h"
 #include "llvm/Transforms/Utils/SpecialCaseList.h"
 
 namespace llvm {
@@ -85,7 +85,8 @@
   class CGCUDARuntime;
   class BlockFieldFlags;
   class FunctionArgList;
-  
+  class PGOProfileData;
+
   struct OrderGlobalInits {
     unsigned int priority;
     unsigned int lex_order;
@@ -98,10 +99,8 @@
     }
     
     bool operator<(const OrderGlobalInits &RHS) const {
-      if (priority < RHS.priority)
-        return true;
-      
-      return priority == RHS.priority && lex_order < RHS.lex_order;
+      return std::tie(priority, lex_order) <
+             std::tie(RHS.priority, RHS.lex_order);
     }
   };
 
@@ -236,7 +235,7 @@
   DiagnosticsEngine &Diags;
   const llvm::DataLayout &TheDataLayout;
   const TargetInfo &Target;
-  CGCXXABI &ABI;
+  std::unique_ptr<CGCXXABI> ABI;
   llvm::LLVMContext &VMContext;
 
   CodeGenTBAA *TBAA;
@@ -258,6 +257,7 @@
   ARCEntrypoints *ARCData;
   llvm::MDNode *NoObjCARCExceptionsMetadata;
   RREntrypoints *RRData;
+  PGOProfileData *PGOData;
 
   // WeakRefReferences - A set of references that have only been seen via
   // a weakref so far. This is used to remove the weak of the reference if we
@@ -273,7 +273,15 @@
   /// DeferredDeclsToEmit - This is a list of deferred decls which we have seen
   /// that *are* actually referenced.  These get code generated when the module
   /// is done.
-  std::vector<GlobalDecl> DeferredDeclsToEmit;
+  struct DeferredGlobal {
+    DeferredGlobal(llvm::GlobalValue *GV, GlobalDecl GD) : GV(GV), GD(GD) {}
+    llvm::AssertingVH<llvm::GlobalValue> GV;
+    GlobalDecl GD;
+  };
+  std::vector<DeferredGlobal> DeferredDeclsToEmit;
+  void addDeferredDeclToEmit(llvm::GlobalValue *GV, GlobalDecl GD) {
+    DeferredDeclsToEmit.push_back(DeferredGlobal(GV, GD));
+  }
 
   /// List of alias we have emitted. Used to make sure that what they point to
   /// is defined once we get to the end of the of the translation unit.
@@ -290,6 +298,7 @@
   /// forcing visibility of symbols which may otherwise be optimized
   /// out.
   std::vector<llvm::WeakVH> LLVMUsed;
+  std::vector<llvm::WeakVH> LLVMCompilerUsed;
 
   /// GlobalCtors - Store the list of global constructors and their respective
   /// priorities to be emitted when the translation unit is complete.
@@ -310,7 +319,10 @@
   llvm::StringMap<llvm::Constant*> AnnotationStrings;
 
   llvm::StringMap<llvm::Constant*> CFConstantStringMap;
-  llvm::StringMap<llvm::GlobalVariable*> ConstantStringMap;
+
+  llvm::StringMap<llvm::GlobalVariable *> Constant1ByteStringMap;
+  llvm::StringMap<llvm::GlobalVariable *> Constant2ByteStringMap;
+  llvm::StringMap<llvm::GlobalVariable *> Constant4ByteStringMap;
   llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap;
   llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap;
   llvm::DenseMap<const Expr*, llvm::Constant *> MaterializedGlobalTemporaryMap;
@@ -421,7 +433,7 @@
 
   GlobalDecl initializedGlobalDecl;
 
-  llvm::OwningPtr<llvm::SpecialCaseList> SanitizerBlacklist;
+  std::unique_ptr<llvm::SpecialCaseList> SanitizerBlacklist;
 
   const SanitizerOptions &SanOpts;
 
@@ -433,6 +445,8 @@
 
   ~CodeGenModule();
 
+  void clear();
+
   /// Release - Finalize LLVM code generation.
   void Release();
 
@@ -469,6 +483,10 @@
     return *RRData;
   }
 
+  PGOProfileData *getPGOData() const {
+    return PGOData;
+  }
+
   llvm::Constant *getStaticLocalDeclAddress(const VarDecl *D) {
     return StaticLocalDeclMap[D];
   }
@@ -525,7 +543,7 @@
   DiagnosticsEngine &getDiags() const { return Diags; }
   const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
   const TargetInfo &getTarget() const { return Target; }
-  CGCXXABI &getCXXABI() { return ABI; }
+  CGCXXABI &getCXXABI() const { return *ABI; }
   llvm::LLVMContext &getLLVMContext() { return VMContext; }
   
   bool shouldUseTBAA() const { return TBAA != 0; }
@@ -577,21 +595,6 @@
   /// for the thread-local variable declaration D.
   void setTLSMode(llvm::GlobalVariable *GV, const VarDecl &D) const;
 
-  /// TypeVisibilityKind - The kind of global variable that is passed to 
-  /// setTypeVisibility
-  enum TypeVisibilityKind {
-    TVK_ForVTT,
-    TVK_ForVTable,
-    TVK_ForConstructionVTable,
-    TVK_ForRTTI,
-    TVK_ForRTTIName
-  };
-
-  /// setTypeVisibility - Set the visibility for the given global
-  /// value which holds information about a type.
-  void setTypeVisibility(llvm::GlobalValue *GV, const CXXRecordDecl *D,
-                         TypeVisibilityKind TVK) const;
-
   static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
     switch (V) {
     case DefaultVisibility:   return llvm::GlobalValue::DefaultVisibility;
@@ -639,9 +642,9 @@
   /// GetAddrOfFunction - Return the address of the given function.  If Ty is
   /// non-null, then this function will use the specified type if it has to
   /// create it.
-  llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
-                                    llvm::Type *Ty = 0,
-                                    bool ForVTable = false);
+  llvm::Constant *GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty = 0,
+                                    bool ForVTable = false,
+                                    bool DontDefer = false);
 
   /// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor 
   /// for the given type.
@@ -769,14 +772,16 @@
   /// given type.
   llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
                                              CXXCtorType ctorType,
-                                             const CGFunctionInfo *fnInfo = 0);
+                                             const CGFunctionInfo *fnInfo = 0,
+                                             bool DontDefer = false);
 
   /// GetAddrOfCXXDestructor - Return the address of the constructor of the
   /// given type.
   llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
                                             CXXDtorType dtorType,
                                             const CGFunctionInfo *fnInfo = 0,
-                                            llvm::FunctionType *fnType = 0);
+                                            llvm::FunctionType *fnType = 0,
+                                            bool DontDefer = false);
 
   /// getBuiltinLibFunction - Given a builtin id for a function like
   /// "__builtin_fabsf", return a Function* for "fabsf".
@@ -798,10 +803,11 @@
   template<typename SomeDecl>
   void MaybeHandleStaticInExternC(const SomeDecl *D, llvm::GlobalValue *GV);
 
-  /// AddUsedGlobal - Add a global which should be forced to be
-  /// present in the object file; these are emitted to the llvm.used
-  /// metadata global.
-  void AddUsedGlobal(llvm::GlobalValue *GV);
+  /// Add a global to a list to be added to the llvm.used metadata.
+  void addUsedGlobal(llvm::GlobalValue *GV);
+
+  /// Add a global to a list to be added to the llvm.compiler.used metadata.
+  void addCompilerUsedGlobal(llvm::GlobalValue *GV);
 
   /// AddCXXDtorEntry - Add a destructor and object to add to the C++ global
   /// destructor function.
@@ -902,6 +908,10 @@
   /// as a return type.
   bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
 
+  /// ReturnSlotInterferesWithArgs - Return true iff the given type uses an
+  /// argument slot when 'sret' is used as a return type.
+  bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI);
+
   /// ReturnTypeUsesFPRet - Return true iff the given type uses 'fpret' when
   /// used as a return type.
   bool ReturnTypeUsesFPRet(QualType ResultType);
@@ -1009,12 +1019,11 @@
 private:
   llvm::GlobalValue *GetGlobalValue(StringRef Ref);
 
-  llvm::Constant *GetOrCreateLLVMFunction(StringRef MangledName,
-                                          llvm::Type *Ty,
-                                          GlobalDecl D,
-                                          bool ForVTable,
-                                          llvm::AttributeSet ExtraAttrs =
-                                            llvm::AttributeSet());
+  llvm::Constant *
+  GetOrCreateLLVMFunction(StringRef MangledName, llvm::Type *Ty, GlobalDecl D,
+                          bool ForVTable, bool DontDefer = false,
+                          llvm::AttributeSet ExtraAttrs = llvm::AttributeSet());
+
   llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
                                         llvm::PointerType *PTy,
                                         const VarDecl *D,
@@ -1037,9 +1046,9 @@
                              llvm::Function *F,
                              bool IsIncompleteFunction);
 
-  void EmitGlobalDefinition(GlobalDecl D);
+  void EmitGlobalDefinition(GlobalDecl D, llvm::GlobalValue *GV = 0);
 
-  void EmitGlobalFunctionDefinition(GlobalDecl GD);
+  void EmitGlobalFunctionDefinition(GlobalDecl GD, llvm::GlobalValue *GV);
   void EmitGlobalVarDefinition(const VarDecl *D);
   void EmitAliasDefinition(GlobalDecl GD);
   void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
@@ -1104,9 +1113,8 @@
   /// still have a use for.
   void EmitDeferredVTables();
 
-  /// EmitLLVMUsed - Emit the llvm.used metadata used to force
-  /// references to global which may otherwise be optimized out.
-  void EmitLLVMUsed();
+  /// Emit the llvm.used and llvm.compiler.used metadata.
+  void emitLLVMUsed();
 
   /// \brief Emit the link options introduced by imported modules.
   void EmitModuleLinkOptions();
diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp
new file mode 100644
index 0000000..ac6b7bc
--- /dev/null
+++ b/lib/CodeGen/CodeGenPGO.cpp
@@ -0,0 +1,1033 @@
+//===--- CodeGenPGO.cpp - PGO Instrumentation for LLVM CodeGen --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based profile-guided optimization
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenPGO.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Config/config.h" // for strtoull()/strtoll() define
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/Support/FileSystem.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static void ReportBadPGOData(CodeGenModule &CGM, const char *Message) {
+  DiagnosticsEngine &Diags = CGM.getDiags();
+  unsigned diagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0");
+  Diags.Report(diagID) << Message;
+}
+
+PGOProfileData::PGOProfileData(CodeGenModule &CGM, std::string Path)
+  : CGM(CGM) {
+  if (llvm::MemoryBuffer::getFile(Path, DataBuffer)) {
+    ReportBadPGOData(CGM, "failed to open pgo data file");
+    return;
+  }
+
+  if (DataBuffer->getBufferSize() > std::numeric_limits<unsigned>::max()) {
+    ReportBadPGOData(CGM, "pgo data file too big");
+    return;
+  }
+
+  // Scan through the data file and map each function to the corresponding
+  // file offset where its counts are stored.
+  const char *BufferStart = DataBuffer->getBufferStart();
+  const char *BufferEnd = DataBuffer->getBufferEnd();
+  const char *CurPtr = BufferStart;
+  uint64_t MaxCount = 0;
+  while (CurPtr < BufferEnd) {
+    // Read the function name.
+    const char *FuncStart = CurPtr;
+    // For Objective-C methods, the name may include whitespace, so search
+    // backward from the end of the line to find the space that separates the
+    // name from the number of counters. (This is a temporary hack since we are
+    // going to completely replace this file format in the near future.)
+    CurPtr = strchr(CurPtr, '\n');
+    if (!CurPtr) {
+      ReportBadPGOData(CGM, "pgo data file has malformed function entry");
+      return;
+    }
+    StringRef FuncName(FuncStart, CurPtr - FuncStart);
+
+    // Skip over the function hash.
+    CurPtr = strchr(++CurPtr, '\n');
+    if (!CurPtr) {
+      ReportBadPGOData(CGM, "pgo data file is missing the function hash");
+      return;
+    }
+
+    // Read the number of counters.
+    char *EndPtr;
+    unsigned NumCounters = strtol(++CurPtr, &EndPtr, 10);
+    if (EndPtr == CurPtr || *EndPtr != '\n' || NumCounters <= 0) {
+      ReportBadPGOData(CGM, "pgo data file has unexpected number of counters");
+      return;
+    }
+    CurPtr = EndPtr;
+
+    // Read function count.
+    uint64_t Count = strtoll(CurPtr, &EndPtr, 10);
+    if (EndPtr == CurPtr || *EndPtr != '\n') {
+      ReportBadPGOData(CGM, "pgo-data file has bad count value");
+      return;
+    }
+    CurPtr = EndPtr; // Point to '\n'.
+    FunctionCounts[FuncName] = Count;
+    MaxCount = Count > MaxCount ? Count : MaxCount;
+
+    // There is one line for each counter; skip over those lines.
+    // Since function count is already read, we start the loop from 1.
+    for (unsigned N = 1; N < NumCounters; ++N) {
+      CurPtr = strchr(++CurPtr, '\n');
+      if (!CurPtr) {
+        ReportBadPGOData(CGM, "pgo data file is missing some counter info");
+        return;
+      }
+    }
+
+    // Skip over the blank line separating functions.
+    CurPtr += 2;
+
+    DataOffsets[FuncName] = FuncStart - BufferStart;
+  }
+  MaxFunctionCount = MaxCount;
+}
+
+bool PGOProfileData::getFunctionCounts(StringRef FuncName, uint64_t &FuncHash,
+                                       std::vector<uint64_t> &Counts) {
+  // Find the relevant section of the pgo-data file.
+  llvm::StringMap<unsigned>::const_iterator OffsetIter =
+    DataOffsets.find(FuncName);
+  if (OffsetIter == DataOffsets.end())
+    return true;
+  const char *CurPtr = DataBuffer->getBufferStart() + OffsetIter->getValue();
+
+  // Skip over the function name.
+  CurPtr = strchr(CurPtr, '\n');
+  assert(CurPtr && "pgo-data has corrupted function entry");
+
+  char *EndPtr;
+  // Read the function hash.
+  FuncHash = strtoll(++CurPtr, &EndPtr, 10);
+  assert(EndPtr != CurPtr && *EndPtr == '\n' &&
+         "pgo-data file has corrupted function hash");
+  CurPtr = EndPtr;
+
+  // Read the number of counters.
+  unsigned NumCounters = strtol(++CurPtr, &EndPtr, 10);
+  assert(EndPtr != CurPtr && *EndPtr == '\n' && NumCounters > 0 &&
+         "pgo-data file has corrupted number of counters");
+  CurPtr = EndPtr;
+
+  Counts.reserve(NumCounters);
+
+  for (unsigned N = 0; N < NumCounters; ++N) {
+    // Read the count value.
+    uint64_t Count = strtoll(CurPtr, &EndPtr, 10);
+    if (EndPtr == CurPtr || *EndPtr != '\n') {
+      ReportBadPGOData(CGM, "pgo-data file has bad count value");
+      return true;
+    }
+    Counts.push_back(Count);
+    CurPtr = EndPtr + 1;
+  }
+
+  // Make sure the number of counters matches up.
+  if (Counts.size() != NumCounters) {
+    ReportBadPGOData(CGM, "pgo-data file has inconsistent counters");
+    return true;
+  }
+
+  return false;
+}
+
+void CodeGenPGO::setFuncName(llvm::Function *Fn) {
+  RawFuncName = Fn->getName();
+
+  // Function names may be prefixed with a binary '1' to indicate
+  // that the backend should not modify the symbols due to any platform
+  // naming convention. Do not include that '1' in the PGO profile name.
+  if (RawFuncName[0] == '\1')
+    RawFuncName = RawFuncName.substr(1);
+
+  if (!Fn->hasLocalLinkage()) {
+    PrefixedFuncName.reset(new std::string(RawFuncName));
+    return;
+  }
+
+  // For local symbols, prepend the main file name to distinguish them.
+  // Do not include the full path in the file name since there's no guarantee
+  // that it will stay the same, e.g., if the files are checked out from
+  // version control in different locations.
+  PrefixedFuncName.reset(new std::string(CGM.getCodeGenOpts().MainFileName));
+  if (PrefixedFuncName->empty())
+    PrefixedFuncName->assign("<unknown>");
+  PrefixedFuncName->append(":");
+  PrefixedFuncName->append(RawFuncName);
+}
+
+static llvm::Function *getRegisterFunc(CodeGenModule &CGM) {
+  return CGM.getModule().getFunction("__llvm_profile_register_functions");
+}
+
+static llvm::BasicBlock *getOrInsertRegisterBB(CodeGenModule &CGM) {
+  // Don't do this for Darwin.  compiler-rt uses linker magic.
+  if (CGM.getTarget().getTriple().isOSDarwin())
+    return nullptr;
+
+  // Only need to insert this once per module.
+  if (llvm::Function *RegisterF = getRegisterFunc(CGM))
+    return &RegisterF->getEntryBlock();
+
+  // Construct the function.
+  auto *VoidTy = llvm::Type::getVoidTy(CGM.getLLVMContext());
+  auto *RegisterFTy = llvm::FunctionType::get(VoidTy, false);
+  auto *RegisterF = llvm::Function::Create(RegisterFTy,
+                                           llvm::GlobalValue::InternalLinkage,
+                                           "__llvm_profile_register_functions",
+                                           &CGM.getModule());
+  RegisterF->setUnnamedAddr(true);
+  if (CGM.getCodeGenOpts().DisableRedZone)
+    RegisterF->addFnAttr(llvm::Attribute::NoRedZone);
+
+  // Construct and return the entry block.
+  auto *BB = llvm::BasicBlock::Create(CGM.getLLVMContext(), "", RegisterF);
+  CGBuilderTy Builder(BB);
+  Builder.CreateRetVoid();
+  return BB;
+}
+
+static llvm::Constant *getOrInsertRuntimeRegister(CodeGenModule &CGM) {
+  auto *VoidTy = llvm::Type::getVoidTy(CGM.getLLVMContext());
+  auto *VoidPtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+  auto *RuntimeRegisterTy = llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
+  return CGM.getModule().getOrInsertFunction("__llvm_profile_register_function",
+                                             RuntimeRegisterTy);
+}
+
+static bool isMachO(const CodeGenModule &CGM) {
+  return CGM.getTarget().getTriple().isOSBinFormatMachO();
+}
+
+static StringRef getCountersSection(const CodeGenModule &CGM) {
+  return isMachO(CGM) ? "__DATA,__llvm_prf_cnts" : "__llvm_prf_cnts";
+}
+
+static StringRef getNameSection(const CodeGenModule &CGM) {
+  return isMachO(CGM) ? "__DATA,__llvm_prf_names" : "__llvm_prf_names";
+}
+
+static StringRef getDataSection(const CodeGenModule &CGM) {
+  return isMachO(CGM) ? "__DATA,__llvm_prf_data" : "__llvm_prf_data";
+}
+
+llvm::GlobalVariable *CodeGenPGO::buildDataVar() {
+  // Create name variable.
+  llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+  auto *VarName = llvm::ConstantDataArray::getString(Ctx, getFuncName(),
+                                                     false);
+  auto *Name = new llvm::GlobalVariable(CGM.getModule(), VarName->getType(),
+                                        true, VarLinkage, VarName,
+                                        getFuncVarName("name"));
+  Name->setSection(getNameSection(CGM));
+  Name->setAlignment(1);
+
+  // Create data variable.
+  auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
+  auto *Int64Ty = llvm::Type::getInt64Ty(Ctx);
+  auto *Int8PtrTy = llvm::Type::getInt8PtrTy(Ctx);
+  auto *Int64PtrTy = llvm::Type::getInt64PtrTy(Ctx);
+  llvm::Type *DataTypes[] = {
+    Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int64PtrTy
+  };
+  auto *DataTy = llvm::StructType::get(Ctx, makeArrayRef(DataTypes));
+  llvm::Constant *DataVals[] = {
+    llvm::ConstantInt::get(Int32Ty, getFuncName().size()),
+    llvm::ConstantInt::get(Int32Ty, NumRegionCounters),
+    llvm::ConstantInt::get(Int64Ty, FunctionHash),
+    llvm::ConstantExpr::getBitCast(Name, Int8PtrTy),
+    llvm::ConstantExpr::getBitCast(RegionCounters, Int64PtrTy)
+  };
+  auto *Data =
+    new llvm::GlobalVariable(CGM.getModule(), DataTy, true, VarLinkage,
+                             llvm::ConstantStruct::get(DataTy, DataVals),
+                             getFuncVarName("data"));
+
+  // All the data should be packed into an array in its own section.
+  Data->setSection(getDataSection(CGM));
+  Data->setAlignment(8);
+
+  // Make sure the data doesn't get deleted.
+  CGM.addUsedGlobal(Data);
+  return Data;
+}
+
+void CodeGenPGO::emitInstrumentationData() {
+  if (!CGM.getCodeGenOpts().ProfileInstrGenerate)
+    return;
+
+  // Build the data.
+  auto *Data = buildDataVar();
+
+  // Register the data.
+  auto *RegisterBB = getOrInsertRegisterBB(CGM);
+  if (!RegisterBB)
+    return;
+  CGBuilderTy Builder(RegisterBB->getTerminator());
+  auto *VoidPtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+  Builder.CreateCall(getOrInsertRuntimeRegister(CGM),
+                     Builder.CreateBitCast(Data, VoidPtrTy));
+}
+
+llvm::Function *CodeGenPGO::emitInitialization(CodeGenModule &CGM) {
+  if (!CGM.getCodeGenOpts().ProfileInstrGenerate)
+    return nullptr;
+
+  // Only need to create this once per module.
+  if (CGM.getModule().getFunction("__llvm_profile_init"))
+    return nullptr;
+
+  // Get the function to call at initialization.
+  llvm::Constant *RegisterF = getRegisterFunc(CGM);
+  if (!RegisterF)
+    return nullptr;
+
+  // Create the initialization function.
+  auto *VoidTy = llvm::Type::getVoidTy(CGM.getLLVMContext());
+  auto *F = llvm::Function::Create(llvm::FunctionType::get(VoidTy, false),
+                                   llvm::GlobalValue::InternalLinkage,
+                                   "__llvm_profile_init", &CGM.getModule());
+  F->setUnnamedAddr(true);
+  F->addFnAttr(llvm::Attribute::NoInline);
+  if (CGM.getCodeGenOpts().DisableRedZone)
+    F->addFnAttr(llvm::Attribute::NoRedZone);
+
+  // Add the basic block and the necessary calls.
+  CGBuilderTy Builder(llvm::BasicBlock::Create(CGM.getLLVMContext(), "", F));
+  Builder.CreateCall(RegisterF);
+  Builder.CreateRetVoid();
+
+  return F;
+}
+
+namespace {
+  /// A StmtVisitor that fills a map of statements to PGO counters.
+  struct MapRegionCounters : public ConstStmtVisitor<MapRegionCounters> {
+    /// The next counter value to assign.
+    unsigned NextCounter;
+    /// The map of statements to counters.
+    llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
+
+    MapRegionCounters(llvm::DenseMap<const Stmt *, unsigned> &CounterMap)
+        : NextCounter(0), CounterMap(CounterMap) {}
+
+    void VisitChildren(const Stmt *S) {
+      for (Stmt::const_child_range I = S->children(); I; ++I)
+        if (*I)
+         this->Visit(*I);
+    }
+    void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+    /// Assign a counter to track entry to the function body.
+    void VisitFunctionDecl(const FunctionDecl *S) {
+      CounterMap[S->getBody()] = NextCounter++;
+      Visit(S->getBody());
+    }
+    void VisitObjCMethodDecl(const ObjCMethodDecl *S) {
+      CounterMap[S->getBody()] = NextCounter++;
+      Visit(S->getBody());
+    }
+    void VisitBlockDecl(const BlockDecl *S) {
+      CounterMap[S->getBody()] = NextCounter++;
+      Visit(S->getBody());
+    }
+    /// Assign a counter to track the block following a label.
+    void VisitLabelStmt(const LabelStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getSubStmt());
+    }
+    /// Assign a counter for the body of a while loop.
+    void VisitWhileStmt(const WhileStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getCond());
+      Visit(S->getBody());
+    }
+    /// Assign a counter for the body of a do-while loop.
+    void VisitDoStmt(const DoStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getBody());
+      Visit(S->getCond());
+    }
+    /// Assign a counter for the body of a for loop.
+    void VisitForStmt(const ForStmt *S) {
+      CounterMap[S] = NextCounter++;
+      if (S->getInit())
+        Visit(S->getInit());
+      const Expr *E;
+      if ((E = S->getCond()))
+        Visit(E);
+      if ((E = S->getInc()))
+        Visit(E);
+      Visit(S->getBody());
+    }
+    /// Assign a counter for the body of a for-range loop.
+    void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getRangeStmt());
+      Visit(S->getBeginEndStmt());
+      Visit(S->getCond());
+      Visit(S->getLoopVarStmt());
+      Visit(S->getBody());
+      Visit(S->getInc());
+    }
+    /// Assign a counter for the body of a for-collection loop.
+    void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getElement());
+      Visit(S->getBody());
+    }
+    /// Assign a counter for the exit block of the switch statement.
+    void VisitSwitchStmt(const SwitchStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getCond());
+      Visit(S->getBody());
+    }
+    /// Assign a counter for a particular case in a switch. This counts jumps
+    /// from the switch header as well as fallthrough from the case before this
+    /// one.
+    void VisitCaseStmt(const CaseStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getSubStmt());
+    }
+    /// Assign a counter for the default case of a switch statement. The count
+    /// is the number of branches from the loop header to the default, and does
+    /// not include fallthrough from previous cases. If we have multiple
+    /// conditional branch blocks from the switch instruction to the default
+    /// block, as with large GNU case ranges, this is the counter for the last
+    /// edge in that series, rather than the first.
+    void VisitDefaultStmt(const DefaultStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getSubStmt());
+    }
+    /// Assign a counter for the "then" part of an if statement. The count for
+    /// the "else" part, if it exists, will be calculated from this counter.
+    void VisitIfStmt(const IfStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getCond());
+      Visit(S->getThen());
+      if (S->getElse())
+        Visit(S->getElse());
+    }
+    /// Assign a counter for the continuation block of a C++ try statement.
+    void VisitCXXTryStmt(const CXXTryStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getTryBlock());
+      for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I)
+        Visit(S->getHandler(I));
+    }
+    /// Assign a counter for a catch statement's handler block.
+    void VisitCXXCatchStmt(const CXXCatchStmt *S) {
+      CounterMap[S] = NextCounter++;
+      Visit(S->getHandlerBlock());
+    }
+    /// Assign a counter for the "true" part of a conditional operator. The
+    /// count in the "false" part will be calculated from this counter.
+    void VisitConditionalOperator(const ConditionalOperator *E) {
+      CounterMap[E] = NextCounter++;
+      Visit(E->getCond());
+      Visit(E->getTrueExpr());
+      Visit(E->getFalseExpr());
+    }
+    /// Assign a counter for the right hand side of a logical and operator.
+    void VisitBinLAnd(const BinaryOperator *E) {
+      CounterMap[E] = NextCounter++;
+      Visit(E->getLHS());
+      Visit(E->getRHS());
+    }
+    /// Assign a counter for the right hand side of a logical or operator.
+    void VisitBinLOr(const BinaryOperator *E) {
+      CounterMap[E] = NextCounter++;
+      Visit(E->getLHS());
+      Visit(E->getRHS());
+    }
+  };
+
+  /// A StmtVisitor that propagates the raw counts through the AST and
+  /// records the count at statements where the value may change.
+  struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
+    /// PGO state.
+    CodeGenPGO &PGO;
+
+    /// A flag that is set when the current count should be recorded on the
+    /// next statement, such as at the exit of a loop.
+    bool RecordNextStmtCount;
+
+    /// The map of statements to count values.
+    llvm::DenseMap<const Stmt *, uint64_t> &CountMap;
+
+    /// BreakContinueStack - Keep counts of breaks and continues inside loops. 
+    struct BreakContinue {
+      uint64_t BreakCount;
+      uint64_t ContinueCount;
+      BreakContinue() : BreakCount(0), ContinueCount(0) {}
+    };
+    SmallVector<BreakContinue, 8> BreakContinueStack;
+
+    ComputeRegionCounts(llvm::DenseMap<const Stmt *, uint64_t> &CountMap,
+                        CodeGenPGO &PGO)
+        : PGO(PGO), RecordNextStmtCount(false), CountMap(CountMap) {}
+
+    void RecordStmtCount(const Stmt *S) {
+      if (RecordNextStmtCount) {
+        CountMap[S] = PGO.getCurrentRegionCount();
+        RecordNextStmtCount = false;
+      }
+    }
+
+    void VisitStmt(const Stmt *S) {
+      RecordStmtCount(S);
+      for (Stmt::const_child_range I = S->children(); I; ++I) {
+        if (*I)
+         this->Visit(*I);
+      }
+    }
+
+    void VisitFunctionDecl(const FunctionDecl *S) {
+      RegionCounter Cnt(PGO, S->getBody());
+      Cnt.beginRegion();
+      CountMap[S->getBody()] = PGO.getCurrentRegionCount();
+      Visit(S->getBody());
+    }
+
+    void VisitObjCMethodDecl(const ObjCMethodDecl *S) {
+      RegionCounter Cnt(PGO, S->getBody());
+      Cnt.beginRegion();
+      CountMap[S->getBody()] = PGO.getCurrentRegionCount();
+      Visit(S->getBody());
+    }
+
+    void VisitBlockDecl(const BlockDecl *S) {
+      RegionCounter Cnt(PGO, S->getBody());
+      Cnt.beginRegion();
+      CountMap[S->getBody()] = PGO.getCurrentRegionCount();
+      Visit(S->getBody());
+    }
+
+    void VisitReturnStmt(const ReturnStmt *S) {
+      RecordStmtCount(S);
+      if (S->getRetValue())
+        Visit(S->getRetValue());
+      PGO.setCurrentRegionUnreachable();
+      RecordNextStmtCount = true;
+    }
+
+    void VisitGotoStmt(const GotoStmt *S) {
+      RecordStmtCount(S);
+      PGO.setCurrentRegionUnreachable();
+      RecordNextStmtCount = true;
+    }
+
+    void VisitLabelStmt(const LabelStmt *S) {
+      RecordNextStmtCount = false;
+      RegionCounter Cnt(PGO, S);
+      Cnt.beginRegion();
+      CountMap[S] = PGO.getCurrentRegionCount();
+      Visit(S->getSubStmt());
+    }
+
+    void VisitBreakStmt(const BreakStmt *S) {
+      RecordStmtCount(S);
+      assert(!BreakContinueStack.empty() && "break not in a loop or switch!");
+      BreakContinueStack.back().BreakCount += PGO.getCurrentRegionCount();
+      PGO.setCurrentRegionUnreachable();
+      RecordNextStmtCount = true;
+    }
+
+    void VisitContinueStmt(const ContinueStmt *S) {
+      RecordStmtCount(S);
+      assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+      BreakContinueStack.back().ContinueCount += PGO.getCurrentRegionCount();
+      PGO.setCurrentRegionUnreachable();
+      RecordNextStmtCount = true;
+    }
+
+    void VisitWhileStmt(const WhileStmt *S) {
+      RecordStmtCount(S);
+      RegionCounter Cnt(PGO, S);
+      BreakContinueStack.push_back(BreakContinue());
+      // Visit the body region first so the break/continue adjustments can be
+      // included when visiting the condition.
+      Cnt.beginRegion();
+      CountMap[S->getBody()] = PGO.getCurrentRegionCount();
+      Visit(S->getBody());
+      Cnt.adjustForControlFlow();
+
+      // ...then go back and propagate counts through the condition. The count
+      // at the start of the condition is the sum of the incoming edges,
+      // the backedge from the end of the loop body, and the edges from
+      // continue statements.
+      BreakContinue BC = BreakContinueStack.pop_back_val();
+      Cnt.setCurrentRegionCount(Cnt.getParentCount() +
+                                Cnt.getAdjustedCount() + BC.ContinueCount);
+      CountMap[S->getCond()] = PGO.getCurrentRegionCount();
+      Visit(S->getCond());
+      Cnt.adjustForControlFlow();
+      Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount);
+      RecordNextStmtCount = true;
+    }
+
+    void VisitDoStmt(const DoStmt *S) {
+      RecordStmtCount(S);
+      RegionCounter Cnt(PGO, S);
+      BreakContinueStack.push_back(BreakContinue());
+      Cnt.beginRegion(/*AddIncomingFallThrough=*/true);
+      CountMap[S->getBody()] = PGO.getCurrentRegionCount();
+      Visit(S->getBody());
+      Cnt.adjustForControlFlow();
+
+      BreakContinue BC = BreakContinueStack.pop_back_val();
+      // The count at the start of the condition is equal to the count at the
+      // end of the body. The adjusted count does not include either the
+      // fall-through count coming into the loop or the continue count, so add
+      // both of those separately. This is coincidentally the same equation as
+      // with while loops but for different reasons.
+      Cnt.setCurrentRegionCount(Cnt.getParentCount() +
+                                Cnt.getAdjustedCount() + BC.ContinueCount);
+      CountMap[S->getCond()] = PGO.getCurrentRegionCount();
+      Visit(S->getCond());
+      Cnt.adjustForControlFlow();
+      Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount);
+      RecordNextStmtCount = true;
+    }
+
+    void VisitForStmt(const ForStmt *S) {
+      RecordStmtCount(S);
+      if (S->getInit())
+        Visit(S->getInit());
+      RegionCounter Cnt(PGO, S);
+      BreakContinueStack.push_back(BreakContinue());
+      // Visit the body region first. (This is basically the same as a while
+      // loop; see further comments in VisitWhileStmt.)
+      Cnt.beginRegion();
+      CountMap[S->getBody()] = PGO.getCurrentRegionCount();
+      Visit(S->getBody());
+      Cnt.adjustForControlFlow();
+
+      // The increment is essentially part of the body but it needs to include
+      // the count for all the continue statements.
+      if (S->getInc()) {
+        Cnt.setCurrentRegionCount(PGO.getCurrentRegionCount() +
+                                  BreakContinueStack.back().ContinueCount);
+        CountMap[S->getInc()] = PGO.getCurrentRegionCount();
+        Visit(S->getInc());
+        Cnt.adjustForControlFlow();
+      }
+
+      BreakContinue BC = BreakContinueStack.pop_back_val();
+
+      // ...then go back and propagate counts through the condition.
+      if (S->getCond()) {
+        Cnt.setCurrentRegionCount(Cnt.getParentCount() +
+                                  Cnt.getAdjustedCount() +
+                                  BC.ContinueCount);
+        CountMap[S->getCond()] = PGO.getCurrentRegionCount();
+        Visit(S->getCond());
+        Cnt.adjustForControlFlow();
+      }
+      Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount);
+      RecordNextStmtCount = true;
+    }
+
+    void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
+      RecordStmtCount(S);
+      Visit(S->getRangeStmt());
+      Visit(S->getBeginEndStmt());
+      RegionCounter Cnt(PGO, S);
+      BreakContinueStack.push_back(BreakContinue());
+      // Visit the body region first. (This is basically the same as a while
+      // loop; see further comments in VisitWhileStmt.)
+      Cnt.beginRegion();
+      CountMap[S->getLoopVarStmt()] = PGO.getCurrentRegionCount();
+      Visit(S->getLoopVarStmt());
+      Visit(S->getBody());
+      Cnt.adjustForControlFlow();
+
+      // The increment is essentially part of the body but it needs to include
+      // the count for all the continue statements.
+      Cnt.setCurrentRegionCount(PGO.getCurrentRegionCount() +
+                                BreakContinueStack.back().ContinueCount);
+      CountMap[S->getInc()] = PGO.getCurrentRegionCount();
+      Visit(S->getInc());
+      Cnt.adjustForControlFlow();
+
+      BreakContinue BC = BreakContinueStack.pop_back_val();
+
+      // ...then go back and propagate counts through the condition.
+      Cnt.setCurrentRegionCount(Cnt.getParentCount() +
+                                Cnt.getAdjustedCount() +
+                                BC.ContinueCount);
+      CountMap[S->getCond()] = PGO.getCurrentRegionCount();
+      Visit(S->getCond());
+      Cnt.adjustForControlFlow();
+      Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount);
+      RecordNextStmtCount = true;
+    }
+
+    void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
+      RecordStmtCount(S);
+      Visit(S->getElement());
+      RegionCounter Cnt(PGO, S);
+      BreakContinueStack.push_back(BreakContinue());
+      Cnt.beginRegion();
+      CountMap[S->getBody()] = PGO.getCurrentRegionCount();
+      Visit(S->getBody());
+      BreakContinue BC = BreakContinueStack.pop_back_val();
+      Cnt.adjustForControlFlow();
+      Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount);
+      RecordNextStmtCount = true;
+    }
+
+    void VisitSwitchStmt(const SwitchStmt *S) {
+      RecordStmtCount(S);
+      Visit(S->getCond());
+      PGO.setCurrentRegionUnreachable();
+      BreakContinueStack.push_back(BreakContinue());
+      Visit(S->getBody());
+      // If the switch is inside a loop, add the continue counts.
+      BreakContinue BC = BreakContinueStack.pop_back_val();
+      if (!BreakContinueStack.empty())
+        BreakContinueStack.back().ContinueCount += BC.ContinueCount;
+      RegionCounter ExitCnt(PGO, S);
+      ExitCnt.beginRegion();
+      RecordNextStmtCount = true;
+    }
+
+    void VisitCaseStmt(const CaseStmt *S) {
+      RecordNextStmtCount = false;
+      RegionCounter Cnt(PGO, S);
+      Cnt.beginRegion(/*AddIncomingFallThrough=*/true);
+      CountMap[S] = Cnt.getCount();
+      RecordNextStmtCount = true;
+      Visit(S->getSubStmt());
+    }
+
+    void VisitDefaultStmt(const DefaultStmt *S) {
+      RecordNextStmtCount = false;
+      RegionCounter Cnt(PGO, S);
+      Cnt.beginRegion(/*AddIncomingFallThrough=*/true);
+      CountMap[S] = Cnt.getCount();
+      RecordNextStmtCount = true;
+      Visit(S->getSubStmt());
+    }
+
+    void VisitIfStmt(const IfStmt *S) {
+      RecordStmtCount(S);
+      RegionCounter Cnt(PGO, S);
+      Visit(S->getCond());
+
+      Cnt.beginRegion();
+      CountMap[S->getThen()] = PGO.getCurrentRegionCount();
+      Visit(S->getThen());
+      Cnt.adjustForControlFlow();
+
+      if (S->getElse()) {
+        Cnt.beginElseRegion();
+        CountMap[S->getElse()] = PGO.getCurrentRegionCount();
+        Visit(S->getElse());
+        Cnt.adjustForControlFlow();
+      }
+      Cnt.applyAdjustmentsToRegion(0);
+      RecordNextStmtCount = true;
+    }
+
+    void VisitCXXTryStmt(const CXXTryStmt *S) {
+      RecordStmtCount(S);
+      Visit(S->getTryBlock());
+      for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I)
+        Visit(S->getHandler(I));
+      RegionCounter Cnt(PGO, S);
+      Cnt.beginRegion();
+      RecordNextStmtCount = true;
+    }
+
+    void VisitCXXCatchStmt(const CXXCatchStmt *S) {
+      RecordNextStmtCount = false;
+      RegionCounter Cnt(PGO, S);
+      Cnt.beginRegion();
+      CountMap[S] = PGO.getCurrentRegionCount();
+      Visit(S->getHandlerBlock());
+    }
+
+    void VisitConditionalOperator(const ConditionalOperator *E) {
+      RecordStmtCount(E);
+      RegionCounter Cnt(PGO, E);
+      Visit(E->getCond());
+
+      Cnt.beginRegion();
+      CountMap[E->getTrueExpr()] = PGO.getCurrentRegionCount();
+      Visit(E->getTrueExpr());
+      Cnt.adjustForControlFlow();
+
+      Cnt.beginElseRegion();
+      CountMap[E->getFalseExpr()] = PGO.getCurrentRegionCount();
+      Visit(E->getFalseExpr());
+      Cnt.adjustForControlFlow();
+
+      Cnt.applyAdjustmentsToRegion(0);
+      RecordNextStmtCount = true;
+    }
+
+    void VisitBinLAnd(const BinaryOperator *E) {
+      RecordStmtCount(E);
+      RegionCounter Cnt(PGO, E);
+      Visit(E->getLHS());
+      Cnt.beginRegion();
+      CountMap[E->getRHS()] = PGO.getCurrentRegionCount();
+      Visit(E->getRHS());
+      Cnt.adjustForControlFlow();
+      Cnt.applyAdjustmentsToRegion(0);
+      RecordNextStmtCount = true;
+    }
+
+    void VisitBinLOr(const BinaryOperator *E) {
+      RecordStmtCount(E);
+      RegionCounter Cnt(PGO, E);
+      Visit(E->getLHS());
+      Cnt.beginRegion();
+      CountMap[E->getRHS()] = PGO.getCurrentRegionCount();
+      Visit(E->getRHS());
+      Cnt.adjustForControlFlow();
+      Cnt.applyAdjustmentsToRegion(0);
+      RecordNextStmtCount = true;
+    }
+  };
+}
+
+static void emitRuntimeHook(CodeGenModule &CGM) {
+  LLVM_CONSTEXPR const char *RuntimeVarName = "__llvm_profile_runtime";
+  LLVM_CONSTEXPR const char *RuntimeUserName = "__llvm_profile_runtime_user";
+  if (CGM.getModule().getGlobalVariable(RuntimeVarName))
+    return;
+
+  // Declare the runtime hook.
+  llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+  auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
+  auto *Var = new llvm::GlobalVariable(CGM.getModule(), Int32Ty, false,
+                                       llvm::GlobalValue::ExternalLinkage,
+                                       nullptr, RuntimeVarName);
+
+  // Make a function that uses it.
+  auto *User = llvm::Function::Create(llvm::FunctionType::get(Int32Ty, false),
+                                      llvm::GlobalValue::LinkOnceODRLinkage,
+                                      RuntimeUserName, &CGM.getModule());
+  User->addFnAttr(llvm::Attribute::NoInline);
+  if (CGM.getCodeGenOpts().DisableRedZone)
+    User->addFnAttr(llvm::Attribute::NoRedZone);
+  CGBuilderTy Builder(llvm::BasicBlock::Create(CGM.getLLVMContext(), "", User));
+  auto *Load = Builder.CreateLoad(Var);
+  Builder.CreateRet(Load);
+
+  // Create a use of the function.  Now the definition of the runtime variable
+  // should get pulled in, along with any static initializears.
+  CGM.addUsedGlobal(User);
+}
+
+void CodeGenPGO::assignRegionCounters(const Decl *D, llvm::Function *Fn) {
+  bool InstrumentRegions = CGM.getCodeGenOpts().ProfileInstrGenerate;
+  PGOProfileData *PGOData = CGM.getPGOData();
+  if (!InstrumentRegions && !PGOData)
+    return;
+  if (!D)
+    return;
+  setFuncName(Fn);
+
+  // Set the linkage for variables based on the function linkage.  Usually, we
+  // want to match it, but available_externally and extern_weak both have the
+  // wrong semantics.
+  VarLinkage = Fn->getLinkage();
+  switch (VarLinkage) {
+  case llvm::GlobalValue::ExternalWeakLinkage:
+    VarLinkage = llvm::GlobalValue::LinkOnceAnyLinkage;
+    break;
+  case llvm::GlobalValue::AvailableExternallyLinkage:
+    VarLinkage = llvm::GlobalValue::LinkOnceODRLinkage;
+    break;
+  default:
+    break;
+  }
+
+  mapRegionCounters(D);
+  if (InstrumentRegions) {
+    emitRuntimeHook(CGM);
+    emitCounterVariables();
+  }
+  if (PGOData) {
+    loadRegionCounts(PGOData);
+    computeRegionCounts(D);
+    applyFunctionAttributes(PGOData, Fn);
+  }
+}
+
+void CodeGenPGO::mapRegionCounters(const Decl *D) {
+  RegionCounterMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
+  MapRegionCounters Walker(*RegionCounterMap);
+  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+    Walker.VisitFunctionDecl(FD);
+  else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
+    Walker.VisitObjCMethodDecl(MD);
+  else if (const BlockDecl *BD = dyn_cast_or_null<BlockDecl>(D))
+    Walker.VisitBlockDecl(BD);
+  NumRegionCounters = Walker.NextCounter;
+  // FIXME: The number of counters isn't sufficient for the hash
+  FunctionHash = NumRegionCounters;
+}
+
+void CodeGenPGO::computeRegionCounts(const Decl *D) {
+  StmtCountMap.reset(new llvm::DenseMap<const Stmt *, uint64_t>);
+  ComputeRegionCounts Walker(*StmtCountMap, *this);
+  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+    Walker.VisitFunctionDecl(FD);
+  else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
+    Walker.VisitObjCMethodDecl(MD);
+  else if (const BlockDecl *BD = dyn_cast_or_null<BlockDecl>(D))
+    Walker.VisitBlockDecl(BD);
+}
+
+void CodeGenPGO::applyFunctionAttributes(PGOProfileData *PGOData,
+                                         llvm::Function *Fn) {
+  if (!haveRegionCounts())
+    return;
+
+  uint64_t MaxFunctionCount = PGOData->getMaximumFunctionCount();
+  uint64_t FunctionCount = getRegionCount(0);
+  if (FunctionCount >= (uint64_t)(0.3 * (double)MaxFunctionCount))
+    // Turn on InlineHint attribute for hot functions.
+    // FIXME: 30% is from preliminary tuning on SPEC, it may not be optimal.
+    Fn->addFnAttr(llvm::Attribute::InlineHint);
+  else if (FunctionCount <= (uint64_t)(0.01 * (double)MaxFunctionCount))
+    // Turn on Cold attribute for cold functions.
+    // FIXME: 1% is from preliminary tuning on SPEC, it may not be optimal.
+    Fn->addFnAttr(llvm::Attribute::Cold);
+}
+
+void CodeGenPGO::emitCounterVariables() {
+  llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+  llvm::ArrayType *CounterTy = llvm::ArrayType::get(llvm::Type::getInt64Ty(Ctx),
+                                                    NumRegionCounters);
+  RegionCounters =
+    new llvm::GlobalVariable(CGM.getModule(), CounterTy, false, VarLinkage,
+                             llvm::Constant::getNullValue(CounterTy),
+                             getFuncVarName("counters"));
+  RegionCounters->setAlignment(8);
+  RegionCounters->setSection(getCountersSection(CGM));
+}
+
+void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, unsigned Counter) {
+  if (!RegionCounters)
+    return;
+  llvm::Value *Addr =
+    Builder.CreateConstInBoundsGEP2_64(RegionCounters, 0, Counter);
+  llvm::Value *Count = Builder.CreateLoad(Addr, "pgocount");
+  Count = Builder.CreateAdd(Count, Builder.getInt64(1));
+  Builder.CreateStore(Count, Addr);
+}
+
+void CodeGenPGO::loadRegionCounts(PGOProfileData *PGOData) {
+  // For now, ignore the counts from the PGO data file only if the number of
+  // counters does not match. This could be tightened down in the future to
+  // ignore counts when the input changes in various ways, e.g., by comparing a
+  // hash value based on some characteristics of the input.
+  RegionCounts.reset(new std::vector<uint64_t>);
+  uint64_t Hash;
+  if (PGOData->getFunctionCounts(getFuncName(), Hash, *RegionCounts) ||
+      Hash != FunctionHash || RegionCounts->size() != NumRegionCounters)
+    RegionCounts.reset();
+}
+
+void CodeGenPGO::destroyRegionCounters() {
+  RegionCounterMap.reset();
+  StmtCountMap.reset();
+  RegionCounts.reset();
+}
+
+/// \brief Calculate what to divide by to scale weights.
+///
+/// Given the maximum weight, calculate a divisor that will scale all the
+/// weights to strictly less than UINT32_MAX.
+static uint64_t calculateWeightScale(uint64_t MaxWeight) {
+  return MaxWeight < UINT32_MAX ? 1 : MaxWeight / UINT32_MAX + 1;
+}
+
+/// \brief Scale an individual branch weight (and add 1).
+///
+/// Scale a 64-bit weight down to 32-bits using \c Scale.
+///
+/// According to Laplace's Rule of Succession, it is better to compute the
+/// weight based on the count plus 1, so universally add 1 to the value.
+///
+/// \pre \c Scale was calculated by \a calculateWeightScale() with a weight no
+/// greater than \c Weight.
+static uint32_t scaleBranchWeight(uint64_t Weight, uint64_t Scale) {
+  assert(Scale && "scale by 0?");
+  uint64_t Scaled = Weight / Scale + 1;
+  assert(Scaled <= UINT32_MAX && "overflow 32-bits");
+  return Scaled;
+}
+
+llvm::MDNode *CodeGenPGO::createBranchWeights(uint64_t TrueCount,
+                                              uint64_t FalseCount) {
+  // Check for empty weights.
+  if (!TrueCount && !FalseCount)
+    return nullptr;
+
+  // Calculate how to scale down to 32-bits.
+  uint64_t Scale = calculateWeightScale(std::max(TrueCount, FalseCount));
+
+  llvm::MDBuilder MDHelper(CGM.getLLVMContext());
+  return MDHelper.createBranchWeights(scaleBranchWeight(TrueCount, Scale),
+                                      scaleBranchWeight(FalseCount, Scale));
+}
+
+llvm::MDNode *CodeGenPGO::createBranchWeights(ArrayRef<uint64_t> Weights) {
+  // We need at least two elements to create meaningful weights.
+  if (Weights.size() < 2)
+    return nullptr;
+
+  // Calculate how to scale down to 32-bits.
+  uint64_t Scale = calculateWeightScale(*std::max_element(Weights.begin(),
+                                                          Weights.end()));
+
+  SmallVector<uint32_t, 16> ScaledWeights;
+  ScaledWeights.reserve(Weights.size());
+  for (uint64_t W : Weights)
+    ScaledWeights.push_back(scaleBranchWeight(W, Scale));
+
+  llvm::MDBuilder MDHelper(CGM.getLLVMContext());
+  return MDHelper.createBranchWeights(ScaledWeights);
+}
+
+llvm::MDNode *CodeGenPGO::createLoopWeights(const Stmt *Cond,
+                                            RegionCounter &Cnt) {
+  if (!haveRegionCounts())
+    return nullptr;
+  uint64_t LoopCount = Cnt.getCount();
+  uint64_t CondCount = 0;
+  bool Found = getStmtCount(Cond, CondCount);
+  assert(Found && "missing expected loop condition count");
+  (void)Found;
+  if (CondCount == 0)
+    return nullptr;
+  return createBranchWeights(LoopCount,
+                             std::max(CondCount, LoopCount) - LoopCount);
+}
diff --git a/lib/CodeGen/CodeGenPGO.h b/lib/CodeGen/CodeGenPGO.h
new file mode 100644
index 0000000..c59a58e
--- /dev/null
+++ b/lib/CodeGen/CodeGenPGO.h
@@ -0,0 +1,256 @@
+//===--- CodeGenPGO.h - PGO Instrumentation for LLVM CodeGen ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based profile-guided optimization
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENPGO_H
+#define CLANG_CODEGEN_CODEGENPGO_H
+
+#include "CGBuilder.h"
+#include "CodeGenModule.h"
+#include "CodeGenTypes.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+
+namespace clang {
+namespace CodeGen {
+class RegionCounter;
+
+/// The raw counter data from an instrumented PGO binary
+class PGOProfileData {
+private:
+  /// The PGO data
+  std::unique_ptr<llvm::MemoryBuffer> DataBuffer;
+  /// Offsets into DataBuffer for each function's counters
+  llvm::StringMap<unsigned> DataOffsets;
+  /// Execution counts for each function.
+  llvm::StringMap<uint64_t> FunctionCounts;
+  /// The maximal execution count among all functions.
+  uint64_t MaxFunctionCount;
+  CodeGenModule &CGM;
+public:
+  PGOProfileData(CodeGenModule &CGM, std::string Path);
+  /// Fill Counts with the profile data for the given function name. Returns
+  /// false on success.
+  bool getFunctionCounts(StringRef FuncName, uint64_t &FuncHash,
+                         std::vector<uint64_t> &Counts);
+  /// Return the maximum of all known function counts.
+  uint64_t getMaximumFunctionCount() { return MaxFunctionCount; }
+};
+
+/// Per-function PGO state. This class should generally not be used directly,
+/// but instead through the CodeGenFunction and RegionCounter types.
+class CodeGenPGO {
+private:
+  CodeGenModule &CGM;
+  std::unique_ptr<std::string> PrefixedFuncName;
+  StringRef RawFuncName;
+  llvm::GlobalValue::LinkageTypes VarLinkage;
+
+  unsigned NumRegionCounters;
+  uint64_t FunctionHash;
+  llvm::GlobalVariable *RegionCounters;
+  std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCounterMap;
+  std::unique_ptr<llvm::DenseMap<const Stmt *, uint64_t>> StmtCountMap;
+  std::unique_ptr<std::vector<uint64_t>> RegionCounts;
+  uint64_t CurrentRegionCount;
+
+public:
+  CodeGenPGO(CodeGenModule &CGM)
+      : CGM(CGM), NumRegionCounters(0), FunctionHash(0), RegionCounters(0),
+        CurrentRegionCount(0) {}
+
+  /// Whether or not we have PGO region data for the current function. This is
+  /// false both when we have no data at all and when our data has been
+  /// discarded.
+  bool haveRegionCounts() const { return RegionCounts != 0; }
+
+  /// Get the string used to identify this function in the profile data.
+  /// For functions with local linkage, this includes the main file name.
+  StringRef getFuncName() const { return StringRef(*PrefixedFuncName); }
+  std::string getFuncVarName(StringRef VarName) const {
+    return ("__llvm_profile_" + VarName + "_" + RawFuncName).str();
+  }
+
+  /// Return the counter value of the current region.
+  uint64_t getCurrentRegionCount() const { return CurrentRegionCount; }
+
+  /// Set the counter value for the current region. This is used to keep track
+  /// of changes to the most recent counter from control flow and non-local
+  /// exits.
+  void setCurrentRegionCount(uint64_t Count) { CurrentRegionCount = Count; }
+
+  /// Indicate that the current region is never reached, and thus should have a
+  /// counter value of zero. This is important so that subsequent regions can
+  /// correctly track their parent counts.
+  void setCurrentRegionUnreachable() { setCurrentRegionCount(0); }
+
+  /// Check if an execution count is known for a given statement. If so, return
+  /// true and put the value in Count; else return false.
+  bool getStmtCount(const Stmt *S, uint64_t &Count) {
+    if (!StmtCountMap)
+      return false;
+    llvm::DenseMap<const Stmt*, uint64_t>::const_iterator
+      I = StmtCountMap->find(S);
+    if (I == StmtCountMap->end())
+      return false;
+    Count = I->second;
+    return true;
+  }
+
+  /// If the execution count for the current statement is known, record that
+  /// as the current count.
+  void setCurrentStmt(const Stmt *S) {
+    uint64_t Count;
+    if (getStmtCount(S, Count))
+      setCurrentRegionCount(Count);
+  }
+
+  /// Calculate branch weights appropriate for PGO data
+  llvm::MDNode *createBranchWeights(uint64_t TrueCount, uint64_t FalseCount);
+  llvm::MDNode *createBranchWeights(ArrayRef<uint64_t> Weights);
+  llvm::MDNode *createLoopWeights(const Stmt *Cond, RegionCounter &Cnt);
+
+  /// Assign counters to regions and configure them for PGO of a given
+  /// function. Does nothing if instrumentation is not enabled and either
+  /// generates global variables or associates PGO data with each of the
+  /// counters depending on whether we are generating or using instrumentation.
+  void assignRegionCounters(const Decl *D, llvm::Function *Fn);
+  /// Emit static data structures for instrumentation data.
+  void emitInstrumentationData();
+  /// Clean up region counter state. Must be called if assignRegionCounters is
+  /// used.
+  void destroyRegionCounters();
+  /// Emit static initialization code, if any.
+  static llvm::Function *emitInitialization(CodeGenModule &CGM);
+
+private:
+  void setFuncName(llvm::Function *Fn);
+  void mapRegionCounters(const Decl *D);
+  void computeRegionCounts(const Decl *D);
+  void applyFunctionAttributes(PGOProfileData *PGOData, llvm::Function *Fn);
+  void loadRegionCounts(PGOProfileData *PGOData);
+  void emitCounterVariables();
+  llvm::GlobalVariable *buildDataVar();
+
+  /// Emit code to increment the counter at the given index
+  void emitCounterIncrement(CGBuilderTy &Builder, unsigned Counter);
+
+  /// Return the region counter for the given statement. This should only be
+  /// called on statements that have a dedicated counter.
+  unsigned getRegionCounter(const Stmt *S) {
+    if (RegionCounterMap == 0)
+      return 0;
+    return (*RegionCounterMap)[S];
+  }
+
+  /// Return the region count for the counter at the given index.
+  uint64_t getRegionCount(unsigned Counter) {
+    if (!haveRegionCounts())
+      return 0;
+    return (*RegionCounts)[Counter];
+  }
+
+  friend class RegionCounter;
+};
+
+/// A counter for a particular region. This is the primary interface through
+/// which clients manage PGO counters and their values.
+class RegionCounter {
+  CodeGenPGO *PGO;
+  unsigned Counter;
+  uint64_t Count;
+  uint64_t ParentCount;
+  uint64_t RegionCount;
+  int64_t Adjust;
+
+  RegionCounter(CodeGenPGO &PGO, unsigned CounterIndex)
+    : PGO(&PGO), Counter(CounterIndex), Count(PGO.getRegionCount(Counter)),
+      ParentCount(PGO.getCurrentRegionCount()), Adjust(0) {}
+
+public:
+  RegionCounter(CodeGenPGO &PGO, const Stmt *S)
+    : PGO(&PGO), Counter(PGO.getRegionCounter(S)),
+      Count(PGO.getRegionCount(Counter)),
+      ParentCount(PGO.getCurrentRegionCount()), Adjust(0) {}
+
+  /// Get the value of the counter. In most cases this is the number of times
+  /// the region of the counter was entered, but for switch labels it's the
+  /// number of direct jumps to that label.
+  uint64_t getCount() const { return Count; }
+
+  /// Get the value of the counter with adjustments applied. Adjustments occur
+  /// when control enters or leaves the region abnormally; i.e., if there is a
+  /// jump to a label within the region, or if the function can return from
+  /// within the region. The adjusted count, then, is the value of the counter
+  /// at the end of the region.
+  uint64_t getAdjustedCount() const {
+    return Count + Adjust;
+  }
+
+  /// Get the value of the counter in this region's parent, i.e., the region
+  /// that was active when this region began. This is useful for deriving
+  /// counts in implicitly counted regions, like the false case of a condition
+  /// or the normal exits of a loop.
+  uint64_t getParentCount() const { return ParentCount; }
+
+  /// Activate the counter by emitting an increment and starting to track
+  /// adjustments. If AddIncomingFallThrough is true, the current region count
+  /// will be added to the counter for the purposes of tracking the region.
+  void beginRegion(CGBuilderTy &Builder, bool AddIncomingFallThrough=false) {
+    beginRegion(AddIncomingFallThrough);
+    PGO->emitCounterIncrement(Builder, Counter);
+  }
+  void beginRegion(bool AddIncomingFallThrough=false) {
+    RegionCount = Count;
+    if (AddIncomingFallThrough)
+      RegionCount += PGO->getCurrentRegionCount();
+    PGO->setCurrentRegionCount(RegionCount);
+  }
+
+  /// For counters on boolean branches, begins tracking adjustments for the
+  /// uncounted path.
+  void beginElseRegion() {
+    RegionCount = ParentCount - Count;
+    PGO->setCurrentRegionCount(RegionCount);
+  }
+
+  /// Reset the current region count.
+  void setCurrentRegionCount(uint64_t CurrentCount) {
+    RegionCount = CurrentCount;
+    PGO->setCurrentRegionCount(RegionCount);
+  }
+
+  /// Adjust for non-local control flow after emitting a subexpression or
+  /// substatement. This must be called to account for constructs such as gotos,
+  /// labels, and returns, so that we can ensure that our region's count is
+  /// correct in the code that follows.
+  void adjustForControlFlow() {
+    Adjust += PGO->getCurrentRegionCount() - RegionCount;
+    // Reset the region count in case this is called again later.
+    RegionCount = PGO->getCurrentRegionCount();
+  }
+
+  /// Commit all adjustments to the current region. If the region is a loop,
+  /// the LoopAdjust value should be the count of all the breaks and continues
+  /// from the loop, to compensate for those counts being deducted from the
+  /// adjustments for the body of the loop.
+  void applyAdjustmentsToRegion(uint64_t LoopAdjust) {
+    PGO->setCurrentRegionCount(ParentCount + Adjust + LoopAdjust);
+  }
+};
+
+}  // end namespace CodeGen
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 5f3c59c..b7f3746 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -38,10 +38,7 @@
 }
 
 CodeGenTypes::~CodeGenTypes() {
-  for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
-         I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
-      I != E; ++I)
-    delete I->second;
+  llvm::DeleteContainerSeconds(CGRecordLayouts);
 
   for (llvm::FoldingSet<CGFunctionInfo>::iterator
        I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
@@ -134,17 +131,15 @@
   // when a class is translated, even though they aren't embedded by-value into
   // the class.
   if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
-    for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(),
-         E = CRD->bases_end(); I != E; ++I)
-      if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(),
+    for (const auto &I : CRD->bases())
+      if (!isSafeToConvert(I.getType()->getAs<RecordType>()->getDecl(),
                            CGT, AlreadyChecked))
         return false;
   }
   
   // If this type would require laying out members that are currently being laid
   // out, don't do it.
-  for (RecordDecl::field_iterator I = RD->field_begin(),
-       E = RD->field_end(); I != E; ++I)
+  for (const auto *I : RD->fields())
     if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
       return false;
   
@@ -186,13 +181,12 @@
   return isSafeToConvert(RD, CGT, AlreadyChecked);
 }
 
-
-/// isFuncTypeArgumentConvertible - Return true if the specified type in a 
-/// function argument or result position can be converted to an IR type at this
+/// isFuncParamTypeConvertible - Return true if the specified type in a
+/// function parameter or result position can be converted to an IR type at this
 /// point.  This boils down to being whether it is complete, as well as whether
 /// we've temporarily deferred expanding the type because we're in a recursive
 /// context.
-bool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty) {
+bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
   // If this isn't a tagged type, we can convert it!
   const TagType *TT = Ty->getAs<TagType>();
   if (TT == 0) return true;
@@ -217,17 +211,17 @@
 
 
 /// Code to verify a given function type is complete, i.e. the return type
-/// and all of the argument types are complete.  Also check to see if we are in
+/// and all of the parameter types are complete.  Also check to see if we are in
 /// a RS_StructPointer context, and if so whether any struct types have been
 /// pended.  If so, we don't want to ask the ABI lowering code to handle a type
 /// that cannot be converted to an IR type.
 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
-  if (!isFuncTypeArgumentConvertible(FT->getResultType()))
+  if (!isFuncParamTypeConvertible(FT->getReturnType()))
     return false;
   
   if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
-    for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++)
-      if (!isFuncTypeArgumentConvertible(FPT->getArgType(i)))
+    for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
+      if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
         return false;
 
   return true;
@@ -479,11 +473,11 @@
 
       // Force conversion of all the relevant record types, to make sure
       // we re-convert the FunctionType when appropriate.
-      if (const RecordType *RT = FT->getResultType()->getAs<RecordType>())
+      if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
         ConvertRecordDeclType(RT->getDecl());
       if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
-        for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++)
-          if (const RecordType *RT = FPT->getArgType(i)->getAs<RecordType>())
+        for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
+          if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
             ConvertRecordDeclType(RT->getDecl());
 
       // Return a placeholder type.
@@ -493,7 +487,7 @@
       break;
     }
 
-    // While we're converting the argument types for a function, we don't want
+    // While we're converting the parameter types for a function, we don't want
     // to recursively convert any pointed-to structs.  Converting directly-used
     // structs is ok though.
     if (!RecordsBeingLaidOut.insert(Ty)) {
@@ -655,11 +649,10 @@
   
   // Force conversion of non-virtual base classes recursively.
   if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
-    for (CXXRecordDecl::base_class_const_iterator i = CRD->bases_begin(),
-         e = CRD->bases_end(); i != e; ++i) {
-      if (i->isVirtual()) continue;
+    for (const auto &I : CRD->bases()) {
+      if (I.isVirtual()) continue;
       
-      ConvertRecordDeclType(i->getType()->getAs<RecordType>()->getDecl());
+      ConvertRecordDeclType(I.getType()->getAs<RecordType>()->getDecl());
     }
   }
 
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 94ca9e2..59e3089 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -136,8 +136,8 @@
   /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
   /// type).
   bool isFuncTypeConvertible(const FunctionType *FT);
-  bool isFuncTypeArgumentConvertible(QualType Ty);
-  
+  bool isFuncParamTypeConvertible(QualType Ty);
+
   /// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
   /// given a CXXMethodDecl. If the method to has an incomplete return type,
   /// and/or incomplete argument types, this will return the opaque type.
@@ -175,10 +175,10 @@
 
   const CGFunctionInfo &arrangeGlobalDeclaration(GlobalDecl GD);
   const CGFunctionInfo &arrangeFunctionDeclaration(const FunctionDecl *FD);
-  const CGFunctionInfo &arrangeFunctionDeclaration(QualType ResTy,
-                                                   const FunctionArgList &Args,
-                                             const FunctionType::ExtInfo &Info,
-                                                   bool isVariadic);
+  const CGFunctionInfo &
+  arrangeFreeFunctionDeclaration(QualType ResTy, const FunctionArgList &Args,
+                                 const FunctionType::ExtInfo &Info,
+                                 bool isVariadic);
 
   const CGFunctionInfo &arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD);
   const CGFunctionInfo &arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
@@ -188,6 +188,10 @@
   const CGFunctionInfo &arrangeCXXConstructorDeclaration(
                                                     const CXXConstructorDecl *D,
                                                     CXXCtorType Type);
+  const CGFunctionInfo &arrangeCXXConstructorCall(const CallArgList &Args,
+                                                  const CXXConstructorDecl *D,
+                                                  CXXCtorType CtorKind,
+                                                  unsigned ExtraArgs);
   const CGFunctionInfo &arrangeCXXDestructor(const CXXDestructorDecl *D,
                                              CXXDtorType Type);
 
@@ -216,6 +220,7 @@
   ///
   /// \param argTypes - must all actually be canonical as params
   const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType,
+                                                bool IsInstanceMethod,
                                                 ArrayRef<CanQualType> argTypes,
                                                 FunctionType::ExtInfo info,
                                                 RequiredArgs args);
diff --git a/lib/CodeGen/EHScopeStack.h b/lib/CodeGen/EHScopeStack.h
index e9d9a33..166d420 100644
--- a/lib/CodeGen/EHScopeStack.h
+++ b/lib/CodeGen/EHScopeStack.h
@@ -19,8 +19,8 @@
 #include "clang/Basic/LLVM.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/Value.h"
 #include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
 
 namespace clang {
 namespace CodeGen {
@@ -65,9 +65,9 @@
 template <class T> struct DominatingValue : InvariantValue<T> {};
 
 template <class T, bool mightBeInstruction =
-            llvm::is_base_of<llvm::Value, T>::value &&
-            !llvm::is_base_of<llvm::Constant, T>::value &&
-            !llvm::is_base_of<llvm::BasicBlock, T>::value>
+            std::is_base_of<llvm::Value, T>::value &&
+            !std::is_base_of<llvm::Constant, T>::value &&
+            !std::is_base_of<llvm::BasicBlock, T>::value>
 struct DominatingPointer;
 template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
 // template <class T> struct DominatingPointer<T,true> at end of file
@@ -182,7 +182,7 @@
     typedef typename DominatingValue<A0>::saved_type A0_saved;
     A0_saved a0_saved;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
       T(a0).Emit(CGF, flags);
     }
@@ -199,7 +199,7 @@
     A0_saved a0_saved;
     A1_saved a1_saved;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
       T(a0, a1).Emit(CGF, flags);
@@ -219,7 +219,7 @@
     A1_saved a1_saved;
     A2_saved a2_saved;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
       A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
@@ -242,7 +242,7 @@
     A2_saved a2_saved;
     A3_saved a3_saved;
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
       A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 0e8f31a..27825ab 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -52,13 +52,13 @@
     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
     UseARMGuardVarABI(UseARMGuardVarABI) { }
 
-  bool isReturnTypeIndirect(const CXXRecordDecl *RD) const {
+  bool isReturnTypeIndirect(const CXXRecordDecl *RD) const override {
     // Structures with either a non-trivial destructor or a non-trivial
     // copy constructor are always indirect.
     return !RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor();
   }
 
-  RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const {
+  RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
     // Structures with either a non-trivial destructor or a non-trivial
     // copy constructor are always indirect.
     if (!RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor())
@@ -66,114 +66,117 @@
     return RAA_Default;
   }
 
-  bool isZeroInitializable(const MemberPointerType *MPT);
+  bool isZeroInitializable(const MemberPointerType *MPT) override;
 
-  llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+  llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
 
-  llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
-                                               llvm::Value *&This,
-                                               llvm::Value *MemFnPtr,
-                                               const MemberPointerType *MPT);
+  llvm::Value *
+    EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+                                    const Expr *E,
+                                    llvm::Value *&This,
+                                    llvm::Value *MemFnPtr,
+                                    const MemberPointerType *MPT) override;
 
-  llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
-                                            llvm::Value *Base,
-                                            llvm::Value *MemPtr,
-                                            const MemberPointerType *MPT);
+  llvm::Value *
+    EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
+                                 llvm::Value *Base,
+                                 llvm::Value *MemPtr,
+                                 const MemberPointerType *MPT) override;
 
   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
                                            const CastExpr *E,
-                                           llvm::Value *Src);
+                                           llvm::Value *Src) override;
   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
-                                              llvm::Constant *Src);
+                                              llvm::Constant *Src) override;
 
-  llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+  llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
 
-  llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+  llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD) override;
   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
-                                        CharUnits offset);
-  llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+                                        CharUnits offset) override;
+  llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
                                      CharUnits ThisAdjustment);
 
   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
-                                           llvm::Value *L,
-                                           llvm::Value *R,
+                                           llvm::Value *L, llvm::Value *R,
                                            const MemberPointerType *MPT,
-                                           bool Inequality);
+                                           bool Inequality) override;
 
   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
-                                          llvm::Value *Addr,
-                                          const MemberPointerType *MPT);
+                                         llvm::Value *Addr,
+                                         const MemberPointerType *MPT) override;
 
-  llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
-                                      llvm::Value *ptr,
-                                      QualType type);
+  llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF, llvm::Value *ptr,
+                                      QualType type) override;
 
-  llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF,
-                                         llvm::Value *This,
-                                         const CXXRecordDecl *ClassDecl,
-                                         const CXXRecordDecl *BaseClassDecl);
+  llvm::Value *
+    GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This,
+                              const CXXRecordDecl *ClassDecl,
+                              const CXXRecordDecl *BaseClassDecl) override;
 
   void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
-                                 CXXCtorType T,
-                                 CanQualType &ResTy,
-                                 SmallVectorImpl<CanQualType> &ArgTys);
+                                 CXXCtorType T, CanQualType &ResTy,
+                                 SmallVectorImpl<CanQualType> &ArgTys) override;
 
-  void EmitCXXConstructors(const CXXConstructorDecl *D);
+  void EmitCXXConstructors(const CXXConstructorDecl *D) override;
 
   void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
-                                CXXDtorType T,
-                                CanQualType &ResTy,
-                                SmallVectorImpl<CanQualType> &ArgTys);
+                                CXXDtorType T, CanQualType &ResTy,
+                                SmallVectorImpl<CanQualType> &ArgTys) override;
 
   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
-                              CXXDtorType DT) const {
+                              CXXDtorType DT) const override {
     // Itanium does not emit any destructor variant as an inline thunk.
     // Delegating may occur as an optimization, but all variants are either
     // emitted with external linkage or as linkonce if they are inline and used.
     return false;
   }
 
-  void EmitCXXDestructors(const CXXDestructorDecl *D);
+  void EmitCXXDestructors(const CXXDestructorDecl *D) override;
 
-  void BuildInstanceFunctionParams(CodeGenFunction &CGF,
-                                   QualType &ResTy,
-                                   FunctionArgList &Params);
+  void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
+                                 FunctionArgList &Params) override;
 
-  void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+  void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
 
-  void EmitConstructorCall(CodeGenFunction &CGF,
-                           const CXXConstructorDecl *D, CXXCtorType Type,
-                           bool ForVirtualBase, bool Delegating,
-                           llvm::Value *This,
-                           CallExpr::const_arg_iterator ArgBeg,
-                           CallExpr::const_arg_iterator ArgEnd);
+  unsigned addImplicitConstructorArgs(CodeGenFunction &CGF,
+                                      const CXXConstructorDecl *D,
+                                      CXXCtorType Type, bool ForVirtualBase,
+                                      bool Delegating,
+                                      CallArgList &Args) override;
 
-  void emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD);
+  void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
+                          CXXDtorType Type, bool ForVirtualBase,
+                          bool Delegating, llvm::Value *This) override;
+
+  void emitVTableDefinitions(CodeGenVTables &CGVT,
+                             const CXXRecordDecl *RD) override;
 
   llvm::Value *getVTableAddressPointInStructor(
       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
       BaseSubobject Base, const CXXRecordDecl *NearestVBase,
-      bool &NeedsVirtualOffset);
+      bool &NeedsVirtualOffset) override;
 
   llvm::Constant *
   getVTableAddressPointForConstExpr(BaseSubobject Base,
-                                    const CXXRecordDecl *VTableClass);
+                                    const CXXRecordDecl *VTableClass) override;
 
   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
-                                        CharUnits VPtrOffset);
+                                        CharUnits VPtrOffset) override;
 
   llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
-                                         llvm::Value *This, llvm::Type *Ty);
+                                         llvm::Value *This,
+                                         llvm::Type *Ty) override;
 
   void EmitVirtualDestructorCall(CodeGenFunction &CGF,
                                  const CXXDestructorDecl *Dtor,
                                  CXXDtorType DtorType, SourceLocation CallLoc,
-                                 llvm::Value *This);
+                                 llvm::Value *This) override;
 
-  void emitVirtualInheritanceTables(const CXXRecordDecl *RD);
+  void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
 
-  void setThunkLinkage(llvm::Function *Thunk, bool ForVTable) {
+  void setThunkLinkage(llvm::Function *Thunk, bool ForVTable) override {
     // Allow inlining of thunks by emitting them with available_externally
     // linkage together with vtables when needed.
     if (ForVTable)
@@ -181,38 +184,40 @@
   }
 
   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This,
-                                     const ThisAdjustment &TA);
+                                     const ThisAdjustment &TA) override;
 
   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
-                                       const ReturnAdjustment &RA);
+                                       const ReturnAdjustment &RA) override;
 
-  StringRef GetPureVirtualCallName() { return "__cxa_pure_virtual"; }
-  StringRef GetDeletedVirtualCallName() { return "__cxa_deleted_virtual"; }
+  StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
+  StringRef GetDeletedVirtualCallName() override
+    { return "__cxa_deleted_virtual"; }
 
-  CharUnits getArrayCookieSizeImpl(QualType elementType);
+  CharUnits getArrayCookieSizeImpl(QualType elementType) override;
   llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
                                      llvm::Value *NewPtr,
                                      llvm::Value *NumElements,
                                      const CXXNewExpr *expr,
-                                     QualType ElementType);
+                                     QualType ElementType) override;
   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
                                    llvm::Value *allocPtr,
-                                   CharUnits cookieSize);
+                                   CharUnits cookieSize) override;
 
   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
-                       llvm::GlobalVariable *DeclPtr, bool PerformInit);
+                       llvm::GlobalVariable *DeclPtr,
+                       bool PerformInit) override;
   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
-                          llvm::Constant *dtor, llvm::Constant *addr);
+                          llvm::Constant *dtor, llvm::Constant *addr) override;
 
   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
                                                 llvm::GlobalVariable *Var);
   void EmitThreadLocalInitFuncs(
       llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
-      llvm::Function *InitFunc);
-  LValue EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
-                                    const DeclRefExpr *DRE);
+      llvm::Function *InitFunc) override;
+  LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
+                                      QualType LValType) override;
 
-  bool NeedsVTTParameter(GlobalDecl GD);
+  bool NeedsVTTParameter(GlobalDecl GD) override;
 };
 
 class ARMCXXABI : public ItaniumCXXABI {
@@ -221,22 +226,31 @@
     ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
                   /* UseARMGuardVarABI = */ true) {}
 
-  bool HasThisReturn(GlobalDecl GD) const {
+  bool HasThisReturn(GlobalDecl GD) const override {
     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
               isa<CXXDestructorDecl>(GD.getDecl()) &&
               GD.getDtorType() != Dtor_Deleting));
   }
 
-  void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
+  void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
+                           QualType ResTy) override;
 
-  CharUnits getArrayCookieSizeImpl(QualType elementType);
+  CharUnits getArrayCookieSizeImpl(QualType elementType) override;
   llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
                                      llvm::Value *NewPtr,
                                      llvm::Value *NumElements,
                                      const CXXNewExpr *expr,
-                                     QualType ElementType);
+                                     QualType ElementType) override;
   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr,
-                                   CharUnits cookieSize);
+                                   CharUnits cookieSize) override;
+};
+
+class iOS64CXXABI : public ARMCXXABI {
+public:
+  iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {}
+
+  // ARM64 libraries are prepared for non-unique RTTI.
+  bool shouldRTTIBeUnique() override { return false; }
 };
 }
 
@@ -248,6 +262,9 @@
   case TargetCXXABI::iOS:
     return new ARMCXXABI(CGM);
 
+  case TargetCXXABI::iOS64:
+    return new iOS64CXXABI(CGM);
+
   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
   // include the other 32-bit ARM oddities: constructor/destructor return values
   // and array cookies.
@@ -299,11 +316,9 @@
 ///
 /// If the member is non-virtual, memptr.ptr is the address of
 /// the function to call.
-llvm::Value *
-ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
-                                               llvm::Value *&This,
-                                               llvm::Value *MemFnPtr,
-                                               const MemberPointerType *MPT) {
+llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
+    CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
+    llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
   CGBuilderTy &Builder = CGF.Builder;
 
   const FunctionProtoType *FPT = 
@@ -355,8 +370,7 @@
 
   // Cast the adjusted this to a pointer to vtable pointer and load.
   llvm::Type *VTableTy = Builder.getInt8PtrTy();
-  llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
-  VTable = Builder.CreateLoad(VTable, "memptr.vtable");
+  llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy);
 
   // Apply the offset.
   llvm::Value *VTableOffset = FnAsInt;
@@ -385,10 +399,9 @@
 
 /// Compute an l-value by applying the given pointer-to-member to a
 /// base object.
-llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
-                                                         llvm::Value *Base,
-                                                         llvm::Value *MemPtr,
-                                           const MemberPointerType *MPT) {
+llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
+    CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr,
+    const MemberPointerType *MPT) {
   assert(MemPtr->getType() == CGM.PtrDiffTy);
 
   CGBuilderTy &Builder = CGF.Builder;
@@ -797,34 +810,35 @@
 
 /// The generic ABI passes 'this', plus a VTT if it's initializing a
 /// base subobject.
-void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
-                                              CXXCtorType Type,
-                                              CanQualType &ResTy,
-                                SmallVectorImpl<CanQualType> &ArgTys) {
+void
+ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+                                         CXXCtorType Type, CanQualType &ResTy,
+                                         SmallVectorImpl<CanQualType> &ArgTys) {
   ASTContext &Context = getContext();
 
-  // 'this' parameter is already there, as well as 'this' return if
-  // HasThisReturn(GlobalDecl(Ctor, Type)) is true
+  // All parameters are already in place except VTT, which goes after 'this'.
+  // These are Clang types, so we don't need to worry about sret yet.
 
   // Check if we need to add a VTT parameter (which has type void **).
   if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0)
-    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+    ArgTys.insert(ArgTys.begin() + 1,
+                  Context.getPointerType(Context.VoidPtrTy));
 }
 
 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
   // Just make sure we're in sync with TargetCXXABI.
   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
 
+  // The constructor used for constructing this as a base class;
+  // ignores virtual bases.
+  CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
+
   // The constructor used for constructing this as a complete class;
   // constucts the virtual bases, then calls the base constructor.
   if (!D->getParent()->isAbstract()) {
     // We don't need to emit the complete ctor if the class is abstract.
     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
   }
-
-  // The constructor used for constructing this as a base class;
-  // ignores virtual bases.
-  CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
 }
 
 /// The generic ABI passes 'this', plus a VTT if it's destroying a
@@ -844,29 +858,26 @@
 }
 
 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
-  // The destructor in a virtual table is always a 'deleting'
-  // destructor, which calls the complete destructor and then uses the
-  // appropriate operator delete.
-  if (D->isVirtual())
-    CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
+  // The destructor used for destructing this as a base class; ignores
+  // virtual bases.
+  CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
 
   // The destructor used for destructing this as a most-derived class;
   // call the base destructor and then destructs any virtual bases.
   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
 
-  // The destructor used for destructing this as a base class; ignores
-  // virtual bases.
-  CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
+  // The destructor in a virtual table is always a 'deleting'
+  // destructor, which calls the complete destructor and then uses the
+  // appropriate operator delete.
+  if (D->isVirtual())
+    CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
 }
 
-void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
-                                                QualType &ResTy,
-                                                FunctionArgList &Params) {
-  /// Create the 'this' variable.
-  BuildThisParam(CGF, Params);
-
+void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
+                                              QualType &ResTy,
+                                              FunctionArgList &Params) {
   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
-  assert(MD->isInstance());
+  assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
 
   // Check if we need a VTT parameter as well.
   if (NeedsVTTParameter(CGF.CurGD)) {
@@ -877,8 +888,8 @@
     ImplicitParamDecl *VTTDecl
       = ImplicitParamDecl::Create(Context, 0, MD->getLocation(),
                                   &Context.Idents.get("vtt"), T);
-    Params.push_back(VTTDecl);
-    getVTTDecl(CGF) = VTTDecl;
+    Params.insert(Params.begin() + 1, VTTDecl);
+    getStructorImplicitParamDecl(CGF) = VTTDecl;
   }
 }
 
@@ -887,10 +898,9 @@
   EmitThisParam(CGF);
 
   /// Initialize the 'vtt' slot if needed.
-  if (getVTTDecl(CGF)) {
-    getVTTValue(CGF)
-      = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)),
-                               "vtt");
+  if (getStructorImplicitParamDecl(CGF)) {
+    getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
+        CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
   }
 
   /// If this is a function that the ABI specifies returns 'this', initialize
@@ -905,21 +915,39 @@
     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
 }
 
-void ItaniumCXXABI::EmitConstructorCall(CodeGenFunction &CGF,
-                                        const CXXConstructorDecl *D,
-                                        CXXCtorType Type,
-                                        bool ForVirtualBase, bool Delegating,
-                                        llvm::Value *This,
-                                        CallExpr::const_arg_iterator ArgBeg,
-                                        CallExpr::const_arg_iterator ArgEnd) {
-  llvm::Value *VTT = CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase,
-                                         Delegating);
+unsigned ItaniumCXXABI::addImplicitConstructorArgs(
+    CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
+    bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+  if (!NeedsVTTParameter(GlobalDecl(D, Type)))
+    return 0;
+
+  // Insert the implicit 'vtt' argument as the second argument.
+  llvm::Value *VTT =
+      CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
-  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+  Args.insert(Args.begin() + 1,
+              CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
+  return 1;  // Added one arg.
+}
+
+void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
+                                       const CXXDestructorDecl *DD,
+                                       CXXDtorType Type, bool ForVirtualBase,
+                                       bool Delegating, llvm::Value *This) {
+  GlobalDecl GD(DD, Type);
+  llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
+  QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
+
+  llvm::Value *Callee = 0;
+  if (getContext().getLangOpts().AppleKext)
+    Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
+
+  if (!Callee)
+    Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
 
   // FIXME: Provide a source location here.
-  CGF.EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(),
-                        This, VTT, VTTTy, ArgBeg, ArgEnd);
+  CGF.EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
+                        VTT, VTTTy, 0, 0);
 }
 
 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -942,7 +970,7 @@
   VTable->setLinkage(Linkage);
 
   // Set the right visibility.
-  CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable);
+  CGM.setGlobalVisibility(VTable, RD);
 
   // If this is the magic class __cxxabiv1::__fundamental_type_info,
   // we will emit the typeinfo for the fundamental types. This is the
@@ -1305,7 +1333,7 @@
     llvm::GlobalVariable *Guard;
     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) {
+    void Emit(CodeGenFunction &CGF, Flags flags) override {
       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
                                   Guard);
     }
@@ -1398,6 +1426,13 @@
   //         __cxa_guard_release (&obj_guard);
   //       }
   //     }
+
+    // ARM64 C++ ABI 3.2.2:
+    // This ABI instead only specifies the value bit 0 of the static guard
+    // variable; all other bits are platform defined. Bit 0 shall be 0 when the
+    // variable is not initialized and 1 when it is.
+    // FIXME: Reading one bit is no more efficient than reading one byte so
+    // the codegen is same as generic Itanium ABI.
   } else {
     // Load the first byte of the guard variable.
     llvm::LoadInst *LI = 
@@ -1523,8 +1558,6 @@
 /// the wrapper emits a copy, and we want the linker to merge them.
 static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage(
     llvm::GlobalValue::LinkageTypes VarLinkage) {
-  if (llvm::GlobalValue::isLinkerPrivateLinkage(VarLinkage))
-    return llvm::GlobalValue::LinkerPrivateWeakLinkage;
   // For internal linkage variables, we don't need an external or weak wrapper.
   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
     return VarLinkage;
@@ -1632,9 +1665,9 @@
   }
 }
 
-LValue ItaniumCXXABI::EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
-                                                 const DeclRefExpr *DRE) {
-  const VarDecl *VD = cast<VarDecl>(DRE->getDecl());
+LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
+                                                   const VarDecl *VD,
+                                                   QualType LValType) {
   QualType T = VD->getType();
   llvm::Type *Ty = CGF.getTypes().ConvertTypeForMem(T);
   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty);
@@ -1645,10 +1678,9 @@
 
   LValue LV;
   if (VD->getType()->isReferenceType())
-    LV = CGF.MakeNaturalAlignAddrLValue(Val, T);
+    LV = CGF.MakeNaturalAlignAddrLValue(Val, LValType);
   else
-    LV = CGF.MakeAddrLValue(Val, DRE->getType(),
-                            CGF.getContext().getDeclAlign(VD));
+    LV = CGF.MakeAddrLValue(Val, LValType, CGF.getContext().getDeclAlign(VD));
   // FIXME: need setObjCGCLValueClass?
   return LV;
 }
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 7318fe7..9832969 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -15,9 +15,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "CGCXXABI.h"
-#include "CodeGenModule.h"
 #include "CGVTables.h"
-#include "MicrosoftVBTables.h"
+#include "CodeGenModule.h"
 #include "clang/AST/Decl.h"
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/VTableBuilder.h"
@@ -28,51 +27,63 @@
 
 namespace {
 
+/// Holds all the vbtable globals for a given class.
+struct VBTableGlobals {
+  const VPtrInfoVector *VBTables;
+  SmallVector<llvm::GlobalVariable *, 2> Globals;
+};
+
 class MicrosoftCXXABI : public CGCXXABI {
 public:
   MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
 
-  bool HasThisReturn(GlobalDecl GD) const;
+  bool HasThisReturn(GlobalDecl GD) const override;
 
-  bool isReturnTypeIndirect(const CXXRecordDecl *RD) const {
+  bool isReturnTypeIndirect(const CXXRecordDecl *RD) const override {
     // Structures that are not C++03 PODs are always indirect.
     return !RD->isPOD();
   }
 
-  RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const {
-    if (RD->hasNonTrivialCopyConstructor() || RD->hasNonTrivialDestructor())
-      return RAA_DirectInMemory;
+  RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
+    if (RD->hasNonTrivialCopyConstructor() || RD->hasNonTrivialDestructor()) {
+      llvm::Triple::ArchType Arch = CGM.getTarget().getTriple().getArch();
+      if (Arch == llvm::Triple::x86)
+        return RAA_DirectInMemory;
+      // On x64, pass non-trivial records indirectly.
+      // FIXME: Test other Windows architectures.
+      return RAA_Indirect;
+    }
     return RAA_Default;
   }
 
-  StringRef GetPureVirtualCallName() { return "_purecall"; }
+  StringRef GetPureVirtualCallName() override { return "_purecall"; }
   // No known support for deleted functions in MSVC yet, so this choice is
   // arbitrary.
-  StringRef GetDeletedVirtualCallName() { return "_purecall"; }
+  StringRef GetDeletedVirtualCallName() override { return "_purecall"; }
 
-  bool isInlineInitializedStaticDataMemberLinkOnce() { return true; }
+  bool isInlineInitializedStaticDataMemberLinkOnce()  override{ return true; }
 
   llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
                                       llvm::Value *ptr,
-                                      QualType type);
+                                      QualType type) override;
 
-  llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF,
-                                         llvm::Value *This,
-                                         const CXXRecordDecl *ClassDecl,
-                                         const CXXRecordDecl *BaseClassDecl);
+  llvm::Value *
+  GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This,
+                            const CXXRecordDecl *ClassDecl,
+                            const CXXRecordDecl *BaseClassDecl) override;
 
   void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
-                                 CXXCtorType Type,
-                                 CanQualType &ResTy,
-                                 SmallVectorImpl<CanQualType> &ArgTys);
+                                 CXXCtorType Type, CanQualType &ResTy,
+                                 SmallVectorImpl<CanQualType> &ArgTys) override;
 
-  llvm::BasicBlock *EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
-                                                  const CXXRecordDecl *RD);
+  llvm::BasicBlock *
+  EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
+                                const CXXRecordDecl *RD) override;
 
   void initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF,
-                                                 const CXXRecordDecl *RD);
+                                              const CXXRecordDecl *RD) override;
 
-  void EmitCXXConstructors(const CXXConstructorDecl *D);
+  void EmitCXXConstructors(const CXXConstructorDecl *D) override;
 
   // Background on MSVC destructors
   // ==============================
@@ -109,17 +120,18 @@
   void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
                                 CXXDtorType Type,
                                 CanQualType &ResTy,
-                                SmallVectorImpl<CanQualType> &ArgTys);
+                                SmallVectorImpl<CanQualType> &ArgTys) override;
 
   /// Non-base dtors should be emitted as delegating thunks in this ABI.
   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
-                              CXXDtorType DT) const {
+                              CXXDtorType DT) const override {
     return DT != Dtor_Base;
   }
 
-  void EmitCXXDestructors(const CXXDestructorDecl *D);
+  void EmitCXXDestructors(const CXXDestructorDecl *D) override;
 
-  const CXXRecordDecl *getThisArgumentTypeForMethod(const CXXMethodDecl *MD) {
+  const CXXRecordDecl *
+  getThisArgumentTypeForMethod(const CXXMethodDecl *MD) override {
     MD = MD->getCanonicalDecl();
     if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD)) {
       MicrosoftVTableContext::MethodVFTableLocation ML =
@@ -136,71 +148,83 @@
     return MD->getParent();
   }
 
-  llvm::Value *adjustThisArgumentForVirtualCall(CodeGenFunction &CGF,
-                                                GlobalDecl GD,
-                                                llvm::Value *This);
+  llvm::Value *
+  adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
+                                           llvm::Value *This,
+                                           bool VirtualCall) override;
 
-  void BuildInstanceFunctionParams(CodeGenFunction &CGF,
-                                   QualType &ResTy,
-                                   FunctionArgList &Params);
+  void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
+                                 FunctionArgList &Params) override;
 
   llvm::Value *adjustThisParameterInVirtualFunctionPrologue(
-      CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This);
+      CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) override;
 
-  void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+  void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
 
-  void EmitConstructorCall(CodeGenFunction &CGF,
-                           const CXXConstructorDecl *D, CXXCtorType Type,
-                           bool ForVirtualBase, bool Delegating,
-                           llvm::Value *This,
-                           CallExpr::const_arg_iterator ArgBeg,
-                           CallExpr::const_arg_iterator ArgEnd);
+  unsigned addImplicitConstructorArgs(CodeGenFunction &CGF,
+                                      const CXXConstructorDecl *D,
+                                      CXXCtorType Type, bool ForVirtualBase,
+                                      bool Delegating,
+                                      CallArgList &Args) override;
 
-  void emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD);
+  void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
+                          CXXDtorType Type, bool ForVirtualBase,
+                          bool Delegating, llvm::Value *This) override;
+
+  void emitVTableDefinitions(CodeGenVTables &CGVT,
+                             const CXXRecordDecl *RD) override;
 
   llvm::Value *getVTableAddressPointInStructor(
       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
       BaseSubobject Base, const CXXRecordDecl *NearestVBase,
-      bool &NeedsVirtualOffset);
+      bool &NeedsVirtualOffset) override;
 
   llvm::Constant *
   getVTableAddressPointForConstExpr(BaseSubobject Base,
-                                    const CXXRecordDecl *VTableClass);
+                                    const CXXRecordDecl *VTableClass) override;
 
   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
-                                        CharUnits VPtrOffset);
+                                        CharUnits VPtrOffset) override;
 
   llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
-                                         llvm::Value *This, llvm::Type *Ty);
+                                         llvm::Value *This,
+                                         llvm::Type *Ty) override;
 
   void EmitVirtualDestructorCall(CodeGenFunction &CGF,
                                  const CXXDestructorDecl *Dtor,
                                  CXXDtorType DtorType, SourceLocation CallLoc,
-                                 llvm::Value *This);
+                                 llvm::Value *This) override;
 
   void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD,
-                                        CallArgList &CallArgs) {
+                                        CallArgList &CallArgs) override {
     assert(GD.getDtorType() == Dtor_Deleting &&
            "Only deleting destructor thunks are available in this ABI");
     CallArgs.add(RValue::get(getStructorImplicitParamValue(CGF)),
                              CGM.getContext().IntTy);
   }
 
-  void emitVirtualInheritanceTables(const CXXRecordDecl *RD);
+  void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
 
-  void setThunkLinkage(llvm::Function *Thunk, bool ForVTable) {
+  llvm::GlobalVariable *
+  getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
+                   llvm::GlobalVariable::LinkageTypes Linkage);
+
+  void emitVBTableDefinition(const VPtrInfo &VBT, const CXXRecordDecl *RD,
+                             llvm::GlobalVariable *GV) const;
+
+  void setThunkLinkage(llvm::Function *Thunk, bool ForVTable) override {
     Thunk->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
   }
 
   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This,
-                                     const ThisAdjustment &TA);
+                                     const ThisAdjustment &TA) override;
 
   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
-                                       const ReturnAdjustment &RA);
+                                       const ReturnAdjustment &RA) override;
 
   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
                        llvm::GlobalVariable *DeclPtr,
-                       bool PerformInit);
+                       bool PerformInit) override;
 
   // ==== Notes on array cookies =========
   //
@@ -225,17 +249,18 @@
   //   }
   // Whereas it prints "104" and "104" if you give A a destructor.
 
-  bool requiresArrayCookie(const CXXDeleteExpr *expr, QualType elementType);
-  bool requiresArrayCookie(const CXXNewExpr *expr);
-  CharUnits getArrayCookieSizeImpl(QualType type);
+  bool requiresArrayCookie(const CXXDeleteExpr *expr,
+                           QualType elementType) override;
+  bool requiresArrayCookie(const CXXNewExpr *expr) override;
+  CharUnits getArrayCookieSizeImpl(QualType type) override;
   llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
                                      llvm::Value *NewPtr,
                                      llvm::Value *NumElements,
                                      const CXXNewExpr *expr,
-                                     QualType ElementType);
+                                     QualType ElementType) override;
   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
                                    llvm::Value *allocPtr,
-                                   CharUnits cookieSize);
+                                   CharUnits cookieSize) override;
 
 private:
   MicrosoftMangleContext &getMangleContext() {
@@ -258,15 +283,12 @@
     return C ? C : getZeroInt();
   }
 
+  CharUnits getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD);
+
   void
   GetNullMemberPointerFields(const MemberPointerType *MPT,
                              llvm::SmallVectorImpl<llvm::Constant *> &fields);
 
-  /// \brief Finds the offset from the base of RD to the vbptr it uses, even if
-  /// it is reusing a vbptr from a non-virtual base.  RD must have morally
-  /// virtual bases.
-  CharUnits GetVBPtrOffsetFromBases(const CXXRecordDecl *RD);
-
   /// \brief Shared code for virtual base adjustment.  Returns the offset from
   /// the vbptr to the virtual base.  Optionally returns the address of the
   /// vbptr itself.
@@ -288,8 +310,8 @@
 
   /// \brief Performs a full virtual base adjustment.  Used to dereference
   /// pointers to members of virtual bases.
-  llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const CXXRecordDecl *RD,
-                                 llvm::Value *Base,
+  llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E,
+                                 const CXXRecordDecl *RD, llvm::Value *Base,
                                  llvm::Value *VirtualBaseAdjustmentOffset,
                                  llvm::Value *VBPtrOffset /* optional */);
 
@@ -311,51 +333,51 @@
   void EmitVBPtrStores(CodeGenFunction &CGF, const CXXRecordDecl *RD);
 
   /// \brief Caching wrapper around VBTableBuilder::enumerateVBTables().
-  const VBTableVector &EnumerateVBTables(const CXXRecordDecl *RD);
+  const VBTableGlobals &enumerateVBTables(const CXXRecordDecl *RD);
 
   /// \brief Generate a thunk for calling a virtual member function MD.
-  llvm::Function *EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
-                                         StringRef ThunkName);
+  llvm::Function *EmitVirtualMemPtrThunk(
+      const CXXMethodDecl *MD,
+      const MicrosoftVTableContext::MethodVFTableLocation &ML);
 
 public:
-  virtual llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+  llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
 
-  virtual bool isZeroInitializable(const MemberPointerType *MPT);
+  bool isZeroInitializable(const MemberPointerType *MPT) override;
 
-  virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+  llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
 
-  virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
-                                                CharUnits offset);
-  virtual llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
-  virtual llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+  llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+                                        CharUnits offset) override;
+  llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD) override;
+  llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
 
-  virtual llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
-                                                   llvm::Value *L,
-                                                   llvm::Value *R,
-                                                   const MemberPointerType *MPT,
-                                                   bool Inequality);
+  llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
+                                           llvm::Value *L,
+                                           llvm::Value *R,
+                                           const MemberPointerType *MPT,
+                                           bool Inequality) override;
 
-  virtual llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
-                                                  llvm::Value *MemPtr,
-                                                  const MemberPointerType *MPT);
+  llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+                                          llvm::Value *MemPtr,
+                                          const MemberPointerType *MPT) override;
 
-  virtual llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
-                                                    llvm::Value *Base,
-                                                    llvm::Value *MemPtr,
-                                                  const MemberPointerType *MPT);
+  llvm::Value *
+  EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
+                               llvm::Value *Base, llvm::Value *MemPtr,
+                               const MemberPointerType *MPT) override;
 
-  virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
-                                                   const CastExpr *E,
-                                                   llvm::Value *Src);
+  llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+                                           const CastExpr *E,
+                                           llvm::Value *Src) override;
 
-  virtual llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
-                                                      llvm::Constant *Src);
+  llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+                                              llvm::Constant *Src) override;
 
-  virtual llvm::Value *
-  EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
-                                  llvm::Value *&This,
-                                  llvm::Value *MemPtr,
-                                  const MemberPointerType *MPT);
+  llvm::Value *
+  EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E,
+                                  llvm::Value *&This, llvm::Value *MemPtr,
+                                  const MemberPointerType *MPT) override;
 
 private:
   typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy;
@@ -368,7 +390,7 @@
 
 
   /// \brief All the vbtables which have been referenced.
-  llvm::DenseMap<const CXXRecordDecl *, VBTableVector> VBTablesMap;
+  llvm::DenseMap<const CXXRecordDecl *, VBTableGlobals> VBTablesMap;
 
   /// Info on the global variable used to guard initialization of static locals.
   /// The BitIndex field is only used for externally invisible declarations.
@@ -392,41 +414,13 @@
   return ptr;
 }
 
-/// \brief Finds the first non-virtual base of RD that has virtual bases.  If RD
-/// doesn't have a vbptr, it will reuse the vbptr of the returned class.
-static const CXXRecordDecl *FindFirstNVBaseWithVBases(const CXXRecordDecl *RD) {
-  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-       E = RD->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
-    if (!I->isVirtual() && Base->getNumVBases() > 0)
-      return Base;
-  }
-  llvm_unreachable("RD must have an nv base with vbases");
-}
-
-CharUnits MicrosoftCXXABI::GetVBPtrOffsetFromBases(const CXXRecordDecl *RD) {
-  assert(RD->getNumVBases());
-  CharUnits Total = CharUnits::Zero();
-  while (RD) {
-    const ASTRecordLayout &RDLayout = getContext().getASTRecordLayout(RD);
-    CharUnits VBPtrOffset = RDLayout.getVBPtrOffset();
-    // -1 is the sentinel for no vbptr.
-    if (VBPtrOffset != CharUnits::fromQuantity(-1)) {
-      Total += VBPtrOffset;
-      break;
-    }
-    RD = FindFirstNVBaseWithVBases(RD);
-    Total += RDLayout.getBaseClassOffset(RD);
-  }
-  return Total;
-}
-
 llvm::Value *
 MicrosoftCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
                                            llvm::Value *This,
                                            const CXXRecordDecl *ClassDecl,
                                            const CXXRecordDecl *BaseClassDecl) {
-  int64_t VBPtrChars = GetVBPtrOffsetFromBases(ClassDecl).getQuantity();
+  int64_t VBPtrChars =
+      getContext().getASTRecordLayout(ClassDecl).getVBPtrOffset().getQuantity();
   llvm::Value *VBPtrOffset = llvm::ConstantInt::get(CGM.PtrDiffTy, VBPtrChars);
   CharUnits IntSize = getContext().getTypeSizeInChars(getContext().IntTy);
   CharUnits VBTableChars =
@@ -446,16 +440,20 @@
   return isa<CXXConstructorDecl>(GD.getDecl());
 }
 
-void MicrosoftCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
-                                 CXXCtorType Type,
-                                 CanQualType &ResTy,
-                                 SmallVectorImpl<CanQualType> &ArgTys) {
-  // 'this' parameter and 'this' return are already in place
+void MicrosoftCXXABI::BuildConstructorSignature(
+    const CXXConstructorDecl *Ctor, CXXCtorType Type, CanQualType &ResTy,
+    SmallVectorImpl<CanQualType> &ArgTys) {
+
+  // All parameters are already in place except is_most_derived, which goes
+  // after 'this' if it's variadic and last if it's not.
 
   const CXXRecordDecl *Class = Ctor->getParent();
+  const FunctionProtoType *FPT = Ctor->getType()->castAs<FunctionProtoType>();
   if (Class->getNumVBases()) {
-    // Constructors of classes with virtual bases take an implicit parameter.
-    ArgTys.push_back(CGM.getContext().IntTy);
+    if (FPT->isVariadic())
+      ArgTys.insert(ArgTys.begin() + 1, CGM.getContext().IntTy);
+    else
+      ArgTys.push_back(CGM.getContext().IntTy);
   }
 }
 
@@ -547,19 +545,23 @@
                                       const CXXRecordDecl *RD) {
   llvm::Value *ThisInt8Ptr =
     CGF.Builder.CreateBitCast(getThisValue(CGF), CGM.Int8PtrTy, "this.int8");
+  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
 
-  const VBTableVector &VBTables = EnumerateVBTables(RD);
-  for (VBTableVector::const_iterator I = VBTables.begin(), E = VBTables.end();
-       I != E; ++I) {
+  const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
+  for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
+    const VPtrInfo *VBT = (*VBGlobals.VBTables)[I];
+    llvm::GlobalVariable *GV = VBGlobals.Globals[I];
     const ASTRecordLayout &SubobjectLayout =
-      CGM.getContext().getASTRecordLayout(I->VBPtrSubobject.getBase());
-    uint64_t Offs = (I->VBPtrSubobject.getBaseOffset() +
-                     SubobjectLayout.getVBPtrOffset()).getQuantity();
+        CGM.getContext().getASTRecordLayout(VBT->BaseWithVPtr);
+    CharUnits Offs = VBT->NonVirtualOffset;
+    Offs += SubobjectLayout.getVBPtrOffset();
+    if (VBT->getVBaseWithVPtr())
+      Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
     llvm::Value *VBPtr =
-        CGF.Builder.CreateConstInBoundsGEP1_64(ThisInt8Ptr, Offs);
-    VBPtr = CGF.Builder.CreateBitCast(VBPtr, I->GV->getType()->getPointerTo(0),
-                                      "vbptr." + I->ReusingBase->getName());
-    CGF.Builder.CreateStore(I->GV, VBPtr);
+        CGF.Builder.CreateConstInBoundsGEP1_64(ThisInt8Ptr, Offs.getQuantity());
+    VBPtr = CGF.Builder.CreateBitCast(VBPtr, GV->getType()->getPointerTo(0),
+                                      "vbptr." + VBT->ReusingBase->getName());
+    CGF.Builder.CreateStore(GV, VBPtr);
   }
 }
 
@@ -583,12 +585,61 @@
   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
 }
 
-llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualCall(
-    CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) {
+CharUnits
+MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) {
   GD = GD.getCanonicalDecl();
   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-  // FIXME: consider splitting the vdtor vs regular method code into two
-  // functions.
+
+  GlobalDecl LookupGD = GD;
+  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+    // Complete destructors take a pointer to the complete object as a
+    // parameter, thus don't need this adjustment.
+    if (GD.getDtorType() == Dtor_Complete)
+      return CharUnits();
+
+    // There's no Dtor_Base in vftable but it shares the this adjustment with
+    // the deleting one, so look it up instead.
+    LookupGD = GlobalDecl(DD, Dtor_Deleting);
+  }
+
+  MicrosoftVTableContext::MethodVFTableLocation ML =
+      CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
+  CharUnits Adjustment = ML.VFPtrOffset;
+
+  // Normal virtual instance methods need to adjust from the vfptr that first
+  // defined the virtual method to the virtual base subobject, but destructors
+  // do not.  The vector deleting destructor thunk applies this adjustment for
+  // us if necessary.
+  if (isa<CXXDestructorDecl>(MD))
+    Adjustment = CharUnits::Zero();
+
+  if (ML.VBase) {
+    const ASTRecordLayout &DerivedLayout =
+        CGM.getContext().getASTRecordLayout(MD->getParent());
+    Adjustment += DerivedLayout.getVBaseClassOffset(ML.VBase);
+  }
+
+  return Adjustment;
+}
+
+llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
+    CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, bool VirtualCall) {
+  if (!VirtualCall) {
+    // If the call of a virtual function is not virtual, we just have to
+    // compensate for the adjustment the virtual function does in its prologue.
+    CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(GD);
+    if (Adjustment.isZero())
+      return This;
+
+    unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
+    llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
+    This = CGF.Builder.CreateBitCast(This, charPtrTy);
+    assert(Adjustment.isPositive());
+    return CGF.Builder.CreateConstGEP1_32(This, Adjustment.getQuantity());
+  }
+
+  GD = GD.getCanonicalDecl();
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
 
   GlobalDecl LookupGD = GD;
   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
@@ -607,51 +658,18 @@
   unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
   llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
   CharUnits StaticOffset = ML.VFPtrOffset;
+
+  // Base destructors expect 'this' to point to the beginning of the base
+  // subobject, not the first vfptr that happens to contain the virtual dtor.
+  // However, we still need to apply the virtual base adjustment.
+  if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+    StaticOffset = CharUnits::Zero();
+
   if (ML.VBase) {
-    bool AvoidVirtualOffset = false;
-    if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) {
-      // A base destructor can only be called from a complete destructor of the
-      // same record type or another destructor of a more derived type;
-      // or a constructor of the same record type if an exception is thrown.
-      assert(isa<CXXDestructorDecl>(CGF.CurGD.getDecl()) ||
-             isa<CXXConstructorDecl>(CGF.CurGD.getDecl()));
-      const CXXRecordDecl *CurRD =
-          cast<CXXMethodDecl>(CGF.CurGD.getDecl())->getParent();
-
-      if (MD->getParent() == CurRD) {
-        if (isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
-          assert(CGF.CurGD.getDtorType() == Dtor_Complete);
-        if (isa<CXXConstructorDecl>(CGF.CurGD.getDecl()))
-          assert(CGF.CurGD.getCtorType() == Ctor_Complete);
-        // We're calling the main base dtor from a complete structor,
-        // so we know the "this" offset statically.
-        AvoidVirtualOffset = true;
-      } else {
-        // Let's see if we try to call a destructor of a non-virtual base.
-        for (CXXRecordDecl::base_class_const_iterator I = CurRD->bases_begin(),
-             E = CurRD->bases_end(); I != E; ++I) {
-          if (I->getType()->getAsCXXRecordDecl() != MD->getParent())
-            continue;
-          // If we call a base destructor for a non-virtual base, we statically
-          // know where it expects the vfptr and "this" to be.
-          // The total offset should reflect the adjustment done by
-          // adjustThisParameterInVirtualFunctionPrologue().
-          AvoidVirtualOffset = true;
-          break;
-        }
-      }
-    }
-
-    if (AvoidVirtualOffset) {
-      const ASTRecordLayout &Layout =
-          CGF.getContext().getASTRecordLayout(MD->getParent());
-      StaticOffset += Layout.getVBaseClassOffset(ML.VBase);
-    } else {
-      This = CGF.Builder.CreateBitCast(This, charPtrTy);
-      llvm::Value *VBaseOffset =
-          GetVirtualBaseClassOffset(CGF, This, MD->getParent(), ML.VBase);
-      This = CGF.Builder.CreateInBoundsGEP(This, VBaseOffset);
-    }
+    This = CGF.Builder.CreateBitCast(This, charPtrTy);
+    llvm::Value *VBaseOffset =
+        GetVirtualBaseClassOffset(CGF, This, MD->getParent(), ML.VBase);
+    This = CGF.Builder.CreateInBoundsGEP(This, VBaseOffset);
   }
   if (!StaticOffset.isZero()) {
     assert(StaticOffset.isPositive());
@@ -678,20 +696,25 @@
   return false;
 }
 
-void MicrosoftCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
-                                                  QualType &ResTy,
-                                                  FunctionArgList &Params) {
-  BuildThisParam(CGF, Params);
-
+void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
+                                                QualType &ResTy,
+                                                FunctionArgList &Params) {
   ASTContext &Context = getContext();
   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+  assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
   if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
     ImplicitParamDecl *IsMostDerived
       = ImplicitParamDecl::Create(Context, 0,
                                   CGF.CurGD.getDecl()->getLocation(),
                                   &Context.Idents.get("is_most_derived"),
                                   Context.IntTy);
-    Params.push_back(IsMostDerived);
+    // The 'most_derived' parameter goes second if the ctor is variadic and last
+    // if it's not.  Dtors can't be variadic.
+    const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+    if (FPT->isVariadic())
+      Params.insert(Params.begin() + 1, IsMostDerived);
+    else
+      Params.push_back(IsMostDerived);
     getStructorImplicitParamDecl(CGF) = IsMostDerived;
   } else if (IsDeletingDtor(CGF.CurGD)) {
     ImplicitParamDecl *ShouldDelete
@@ -706,36 +729,12 @@
 
 llvm::Value *MicrosoftCXXABI::adjustThisParameterInVirtualFunctionPrologue(
     CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) {
-  GD = GD.getCanonicalDecl();
-  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
-  GlobalDecl LookupGD = GD;
-  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
-    // Complete destructors take a pointer to the complete object as a
-    // parameter, thus don't need this adjustment.
-    if (GD.getDtorType() == Dtor_Complete)
-      return This;
-
-    // There's no Dtor_Base in vftable but it shares the this adjustment with
-    // the deleting one, so look it up instead.
-    LookupGD = GlobalDecl(DD, Dtor_Deleting);
-  }
-
   // In this ABI, every virtual function takes a pointer to one of the
   // subobjects that first defines it as the 'this' parameter, rather than a
-  // pointer to ther final overrider subobject. Thus, we need to adjust it back
+  // pointer to the final overrider subobject. Thus, we need to adjust it back
   // to the final overrider subobject before use.
   // See comments in the MicrosoftVFTableContext implementation for the details.
-
-  MicrosoftVTableContext::MethodVFTableLocation ML =
-      CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
-  CharUnits Adjustment = ML.VFPtrOffset;
-  if (ML.VBase) {
-    const ASTRecordLayout &DerivedLayout =
-        CGF.getContext().getASTRecordLayout(MD->getParent());
-    Adjustment += DerivedLayout.getVBaseClassOffset(ML.VBase);
-  }
-
+  CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(GD);
   if (Adjustment.isZero())
     return This;
 
@@ -784,43 +783,63 @@
   }
 }
 
-void MicrosoftCXXABI::EmitConstructorCall(CodeGenFunction &CGF,
-                                          const CXXConstructorDecl *D,
-                                          CXXCtorType Type, 
-                                          bool ForVirtualBase,
-                                          bool Delegating,
-                                          llvm::Value *This,
-                                          CallExpr::const_arg_iterator ArgBeg,
-                                          CallExpr::const_arg_iterator ArgEnd) {
+unsigned MicrosoftCXXABI::addImplicitConstructorArgs(
+    CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
+    bool ForVirtualBase, bool Delegating, CallArgList &Args) {
   assert(Type == Ctor_Complete || Type == Ctor_Base);
-  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Ctor_Complete);
 
-  llvm::Value *ImplicitParam = 0;
-  QualType ImplicitParamTy;
-  if (D->getParent()->getNumVBases()) {
-    ImplicitParam = llvm::ConstantInt::get(CGM.Int32Ty, Type == Ctor_Complete);
-    ImplicitParamTy = getContext().IntTy;
+  // Check if we need a 'most_derived' parameter.
+  if (!D->getParent()->getNumVBases())
+    return 0;
+
+  // Add the 'most_derived' argument second if we are variadic or last if not.
+  const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
+  llvm::Value *MostDerivedArg =
+      llvm::ConstantInt::get(CGM.Int32Ty, Type == Ctor_Complete);
+  RValue RV = RValue::get(MostDerivedArg);
+  if (MostDerivedArg) {
+    if (FPT->isVariadic())
+      Args.insert(Args.begin() + 1,
+                  CallArg(RV, getContext().IntTy, /*needscopy=*/false));
+    else
+      Args.add(RV, getContext().IntTy);
+  }
+
+  return 1;  // Added one arg.
+}
+
+void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
+                                         const CXXDestructorDecl *DD,
+                                         CXXDtorType Type, bool ForVirtualBase,
+                                         bool Delegating, llvm::Value *This) {
+  llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+
+  if (DD->isVirtual()) {
+    assert(Type != CXXDtorType::Dtor_Deleting &&
+           "The deleting destructor should only be called via a virtual call");
+    This = adjustThisArgumentForVirtualFunctionCall(CGF, GlobalDecl(DD, Type),
+                                                    This, false);
   }
 
   // FIXME: Provide a source location here.
-  CGF.EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), This,
-                        ImplicitParam, ImplicitParamTy, ArgBeg, ArgEnd);
+  CGF.EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
+                        /*ImplicitParam=*/0, /*ImplicitParamTy=*/QualType(), 0, 0);
 }
 
 void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
                                             const CXXRecordDecl *RD) {
   MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
-  MicrosoftVTableContext::VFPtrListTy VFPtrs = VFTContext.getVFPtrOffsets(RD);
+  VPtrInfoVector VFPtrs = VFTContext.getVFPtrOffsets(RD);
   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
 
-  for (MicrosoftVTableContext::VFPtrListTy::iterator I = VFPtrs.begin(),
-       E = VFPtrs.end(); I != E; ++I) {
-    llvm::GlobalVariable *VTable = getAddrOfVTable(RD, I->VFPtrFullOffset);
+  for (VPtrInfoVector::iterator I = VFPtrs.begin(), E = VFPtrs.end(); I != E;
+       ++I) {
+    llvm::GlobalVariable *VTable = getAddrOfVTable(RD, (*I)->FullOffsetInMDC);
     if (VTable->hasInitializer())
       continue;
 
     const VTableLayout &VTLayout =
-        VFTContext.getVFTableLayout(RD, I->VFPtrFullOffset);
+        VFTContext.getVFTableLayout(RD, (*I)->FullOffsetInMDC);
     llvm::Constant *Init = CGVT.CreateVTableInitializer(
         RD, VTLayout.vtable_component_begin(),
         VTLayout.getNumVTableComponents(), VTLayout.vtable_thunk_begin(),
@@ -828,7 +847,7 @@
     VTable->setInitializer(Init);
 
     VTable->setLinkage(Linkage);
-    CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable);
+    CGM.setGlobalVisibility(VTable, RD);
   }
 }
 
@@ -847,10 +866,10 @@
 }
 
 static void mangleVFTableName(MicrosoftMangleContext &MangleContext,
-                              const CXXRecordDecl *RD, const VFPtrInfo &VFPtr,
+                              const CXXRecordDecl *RD, const VPtrInfo *VFPtr,
                               SmallString<256> &Name) {
   llvm::raw_svector_ostream Out(Name);
-  MangleContext.mangleCXXVFTable(RD, VFPtr.PathToMangle, Out);
+  MangleContext.mangleCXXVFTable(RD, VFPtr->MangledPath, Out);
 }
 
 llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr(
@@ -868,7 +887,7 @@
   VFTableIdTy ID(RD, VPtrOffset);
   VFTablesMapTy::iterator I;
   bool Inserted;
-  llvm::tie(I, Inserted) = VFTablesMap.insert(
+  std::tie(I, Inserted) = VFTablesMap.insert(
       std::make_pair(ID, static_cast<llvm::GlobalVariable *>(0)));
   if (!Inserted)
     return I->second;
@@ -876,8 +895,7 @@
   llvm::GlobalVariable *&VTable = I->second;
 
   MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext();
-  const MicrosoftVTableContext::VFPtrListTy &VFPtrs =
-      VTContext.getVFPtrOffsets(RD);
+  const VPtrInfoVector &VFPtrs = VTContext.getVFPtrOffsets(RD);
 
   if (DeferredVFTables.insert(RD)) {
     // We haven't processed this record type before.
@@ -898,12 +916,12 @@
   }
 
   for (size_t J = 0, F = VFPtrs.size(); J != F; ++J) {
-    if (VFPtrs[J].VFPtrFullOffset != VPtrOffset)
+    if (VFPtrs[J]->FullOffsetInMDC != VPtrOffset)
       continue;
 
     llvm::ArrayType *ArrayType = llvm::ArrayType::get(
         CGM.Int8PtrTy,
-        VTContext.getVFTableLayout(RD, VFPtrs[J].VFPtrFullOffset)
+        VTContext.getVFTableLayout(RD, VFPtrs[J]->FullOffsetInMDC)
             .getNumVTableComponents());
 
     SmallString<256> Name;
@@ -925,7 +943,8 @@
   CGBuilderTy &Builder = CGF.Builder;
 
   Ty = Ty->getPointerTo()->getPointerTo();
-  llvm::Value *VPtr = adjustThisArgumentForVirtualCall(CGF, GD, This);
+  llvm::Value *VPtr =
+      adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
   llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty);
 
   MicrosoftVTableContext::MethodVFTableLocation ML =
@@ -955,30 +974,47 @@
       llvm::ConstantInt::get(llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()),
                              DtorType == Dtor_Deleting);
 
-  This = adjustThisArgumentForVirtualCall(CGF, GD, This);
+  This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
   CGF.EmitCXXMemberCall(Dtor, CallLoc, Callee, ReturnValueSlot(), This,
                         ImplicitParam, Context.IntTy, 0, 0);
 }
 
-const VBTableVector &
-MicrosoftCXXABI::EnumerateVBTables(const CXXRecordDecl *RD) {
+const VBTableGlobals &
+MicrosoftCXXABI::enumerateVBTables(const CXXRecordDecl *RD) {
   // At this layer, we can key the cache off of a single class, which is much
-  // easier than caching at the GlobalVariable layer.
-  llvm::DenseMap<const CXXRecordDecl*, VBTableVector>::iterator I;
-  bool added;
-  llvm::tie(I, added) = VBTablesMap.insert(std::make_pair(RD, VBTableVector()));
-  VBTableVector &VBTables = I->second;
-  if (!added)
-    return VBTables;
+  // easier than caching each vbtable individually.
+  llvm::DenseMap<const CXXRecordDecl*, VBTableGlobals>::iterator Entry;
+  bool Added;
+  std::tie(Entry, Added) =
+      VBTablesMap.insert(std::make_pair(RD, VBTableGlobals()));
+  VBTableGlobals &VBGlobals = Entry->second;
+  if (!Added)
+    return VBGlobals;
 
-  VBTableBuilder(CGM, RD).enumerateVBTables(VBTables);
+  MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
+  VBGlobals.VBTables = &Context.enumerateVBTables(RD);
 
-  return VBTables;
+  // Cache the globals for all vbtables so we don't have to recompute the
+  // mangled names.
+  llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
+  for (VPtrInfoVector::const_iterator I = VBGlobals.VBTables->begin(),
+                                      E = VBGlobals.VBTables->end();
+       I != E; ++I) {
+    VBGlobals.Globals.push_back(getAddrOfVBTable(**I, RD, Linkage));
+  }
+
+  return VBGlobals;
 }
 
-llvm::Function *
-MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
-                                        StringRef ThunkName) {
+llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
+    const CXXMethodDecl *MD,
+    const MicrosoftVTableContext::MethodVFTableLocation &ML) {
+  // Calculate the mangled name.
+  SmallString<256> ThunkName;
+  llvm::raw_svector_ostream Out(ThunkName);
+  getMangleContext().mangleVirtualMemPtrThunk(MD, Out);
+  Out.flush();
+
   // If the thunk has been generated previously, just return it.
   if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
     return cast<llvm::Function>(GV);
@@ -1002,9 +1038,14 @@
   CodeGenFunction CGF(CGM);
   CGF.StartThunk(ThunkFn, MD, FnInfo);
 
-  // Get to the Callee.
+  // Load the vfptr and then callee from the vftable.  The callee should have
+  // adjusted 'this' so that the vfptr is at offset zero.
   llvm::Value *This = CGF.LoadCXXThis();
-  llvm::Value *Callee = getVirtualFunctionPointer(CGF, MD, This, ThunkTy);
+  llvm::Value *VTable =
+      CGF.GetVTablePtr(This, ThunkTy->getPointerTo()->getPointerTo());
+  llvm::Value *VFuncPtr =
+      CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
+  llvm::Value *Callee = CGF.Builder.CreateLoad(VFuncPtr);
 
   // Make the call and return the result.
   CGF.EmitCallAndReturnForThunk(MD, Callee, 0);
@@ -1013,15 +1054,85 @@
 }
 
 void MicrosoftCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
-  const VBTableVector &VBTables = EnumerateVBTables(RD);
-  llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
-
-  for (VBTableVector::const_iterator I = VBTables.begin(), E = VBTables.end();
-       I != E; ++I) {
-    I->EmitVBTableDefinition(CGM, RD, Linkage);
+  const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
+  for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
+    const VPtrInfo *VBT = (*VBGlobals.VBTables)[I];
+    llvm::GlobalVariable *GV = VBGlobals.Globals[I];
+    emitVBTableDefinition(*VBT, RD, GV);
   }
 }
 
+llvm::GlobalVariable *
+MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
+                                  llvm::GlobalVariable::LinkageTypes Linkage) {
+  SmallString<256> OutName;
+  llvm::raw_svector_ostream Out(OutName);
+  MicrosoftMangleContext &Mangler =
+      cast<MicrosoftMangleContext>(CGM.getCXXABI().getMangleContext());
+  Mangler.mangleCXXVBTable(RD, VBT.MangledPath, Out);
+  Out.flush();
+  StringRef Name = OutName.str();
+
+  llvm::ArrayType *VBTableType =
+      llvm::ArrayType::get(CGM.IntTy, 1 + VBT.ReusingBase->getNumVBases());
+
+  assert(!CGM.getModule().getNamedGlobal(Name) &&
+         "vbtable with this name already exists: mangling bug?");
+  llvm::GlobalVariable *GV =
+      CGM.CreateOrReplaceCXXRuntimeVariable(Name, VBTableType, Linkage);
+  GV->setUnnamedAddr(true);
+  return GV;
+}
+
+void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
+                                            const CXXRecordDecl *RD,
+                                            llvm::GlobalVariable *GV) const {
+  const CXXRecordDecl *ReusingBase = VBT.ReusingBase;
+
+  assert(RD->getNumVBases() && ReusingBase->getNumVBases() &&
+         "should only emit vbtables for classes with vbtables");
+
+  const ASTRecordLayout &BaseLayout =
+      CGM.getContext().getASTRecordLayout(VBT.BaseWithVPtr);
+  const ASTRecordLayout &DerivedLayout =
+    CGM.getContext().getASTRecordLayout(RD);
+
+  SmallVector<llvm::Constant *, 4> Offsets(1 + ReusingBase->getNumVBases(), 0);
+
+  // The offset from ReusingBase's vbptr to itself always leads.
+  CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset();
+  Offsets[0] = llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity());
+
+  MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
+  for (const auto &I : ReusingBase->vbases()) {
+    const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
+    CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase);
+    assert(!Offset.isNegative());
+
+    // Make it relative to the subobject vbptr.
+    CharUnits CompleteVBPtrOffset = VBT.NonVirtualOffset + VBPtrOffset;
+    if (VBT.getVBaseWithVPtr())
+      CompleteVBPtrOffset +=
+          DerivedLayout.getVBaseClassOffset(VBT.getVBaseWithVPtr());
+    Offset -= CompleteVBPtrOffset;
+
+    unsigned VBIndex = Context.getVBTableIndex(ReusingBase, VBase);
+    assert(Offsets[VBIndex] == 0 && "The same vbindex seen twice?");
+    Offsets[VBIndex] = llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity());
+  }
+
+  assert(Offsets.size() ==
+         cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType())
+                               ->getElementType())->getNumElements());
+  llvm::ArrayType *VBTableType =
+    llvm::ArrayType::get(CGM.IntTy, Offsets.size());
+  llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets);
+  GV->setInitializer(Init);
+
+  // Set the right visibility.
+  CGM.setGlobalVisibility(GV, RD);
+}
+
 llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
                                                     llvm::Value *This,
                                                     const ThisAdjustment &TA) {
@@ -1168,7 +1279,7 @@
   if (D.isExternallyVisible()) {
     // Externally visible variables have to be numbered in Sema to properly
     // handle unreachable VarDecls.
-    BitIndex = getContext().getManglingNumber(&D);
+    BitIndex = getContext().getStaticLocalNumber(&D);
     assert(BitIndex > 0);
     BitIndex--;
   } else {
@@ -1229,38 +1340,6 @@
   CGF.EmitBlock(EndBlock);
 }
 
-// Member pointer helpers.
-static bool hasVBPtrOffsetField(MSInheritanceModel Inheritance) {
-  return Inheritance == MSIM_Unspecified;
-}
-
-static bool hasOnlyOneField(bool IsMemberFunction,
-                            MSInheritanceModel Inheritance) {
-  return Inheritance <= MSIM_SinglePolymorphic ||
-      (!IsMemberFunction && Inheritance <= MSIM_MultiplePolymorphic);
-}
-
-// Only member pointers to functions need a this adjustment, since it can be
-// combined with the field offset for data pointers.
-static bool hasNonVirtualBaseAdjustmentField(bool IsMemberFunction,
-                                             MSInheritanceModel Inheritance) {
-  return (IsMemberFunction && Inheritance >= MSIM_Multiple);
-}
-
-static bool hasVirtualBaseAdjustmentField(MSInheritanceModel Inheritance) {
-  return Inheritance >= MSIM_Virtual;
-}
-
-// Use zero for the field offset of a null data member pointer if we can
-// guarantee that zero is not a valid field offset, or if the member pointer has
-// multiple fields.  Polymorphic classes have a vfptr at offset zero, so we can
-// use zero for null.  If there are multiple fields, we can use zero even if it
-// is a valid field offset because null-ness testing will check the other
-// fields.
-static bool nullFieldOffsetIsZero(MSInheritanceModel Inheritance) {
-  return Inheritance != MSIM_Multiple && Inheritance != MSIM_Single;
-}
-
 bool MicrosoftCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
   // Null-ness for function memptrs only depends on the first field, which is
   // the function pointer.  The rest don't matter, so we can zero initialize.
@@ -1270,28 +1349,28 @@
   // The virtual base adjustment field is always -1 for null, so if we have one
   // we can't zero initialize.  The field offset is sometimes also -1 if 0 is a
   // valid field offset.
-  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
-  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
-  return (!hasVirtualBaseAdjustmentField(Inheritance) &&
-          nullFieldOffsetIsZero(Inheritance));
+  const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+  MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+  return (!MSInheritanceAttr::hasVBTableOffsetField(Inheritance) &&
+          RD->nullFieldOffsetIsZero());
 }
 
 llvm::Type *
 MicrosoftCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
-  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
-  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+  const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+  MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
   llvm::SmallVector<llvm::Type *, 4> fields;
   if (MPT->isMemberFunctionPointer())
     fields.push_back(CGM.VoidPtrTy);  // FunctionPointerOrVirtualThunk
   else
     fields.push_back(CGM.IntTy);  // FieldOffset
 
-  if (hasNonVirtualBaseAdjustmentField(MPT->isMemberFunctionPointer(),
-                                       Inheritance))
+  if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
+                                          Inheritance))
     fields.push_back(CGM.IntTy);
-  if (hasVBPtrOffsetField(Inheritance))
+  if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
     fields.push_back(CGM.IntTy);
-  if (hasVirtualBaseAdjustmentField(Inheritance))
+  if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
     fields.push_back(CGM.IntTy);  // VirtualBaseAdjustmentOffset
 
   if (fields.size() == 1)
@@ -1303,24 +1382,24 @@
 GetNullMemberPointerFields(const MemberPointerType *MPT,
                            llvm::SmallVectorImpl<llvm::Constant *> &fields) {
   assert(fields.empty());
-  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
-  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+  const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+  MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
   if (MPT->isMemberFunctionPointer()) {
     // FunctionPointerOrVirtualThunk
     fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
   } else {
-    if (nullFieldOffsetIsZero(Inheritance))
+    if (RD->nullFieldOffsetIsZero())
       fields.push_back(getZeroInt());  // FieldOffset
     else
       fields.push_back(getAllOnesInt());  // FieldOffset
   }
 
-  if (hasNonVirtualBaseAdjustmentField(MPT->isMemberFunctionPointer(),
-                                       Inheritance))
+  if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
+                                          Inheritance))
     fields.push_back(getZeroInt());
-  if (hasVBPtrOffsetField(Inheritance))
+  if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
     fields.push_back(getZeroInt());
-  if (hasVirtualBaseAdjustmentField(Inheritance))
+  if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
     fields.push_back(getAllOnesInt());
 }
 
@@ -1341,29 +1420,29 @@
                                        const CXXRecordDecl *RD,
                                        CharUnits NonVirtualBaseAdjustment)
 {
-  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+  MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
 
   // Single inheritance class member pointer are represented as scalars instead
   // of aggregates.
-  if (hasOnlyOneField(IsMemberFunction, Inheritance))
+  if (MSInheritanceAttr::hasOnlyOneField(IsMemberFunction, Inheritance))
     return FirstField;
 
   llvm::SmallVector<llvm::Constant *, 4> fields;
   fields.push_back(FirstField);
 
-  if (hasNonVirtualBaseAdjustmentField(IsMemberFunction, Inheritance))
+  if (MSInheritanceAttr::hasNVOffsetField(IsMemberFunction, Inheritance))
     fields.push_back(llvm::ConstantInt::get(
       CGM.IntTy, NonVirtualBaseAdjustment.getQuantity()));
 
-  if (hasVBPtrOffsetField(Inheritance)) {
+  if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance)) {
     CharUnits Offs = CharUnits::Zero();
     if (RD->getNumVBases())
-      Offs = GetVBPtrOffsetFromBases(RD);
+      Offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
     fields.push_back(llvm::ConstantInt::get(CGM.IntTy, Offs.getQuantity()));
   }
 
   // The rest of the fields are adjusted by conversions to a more derived class.
-  if (hasVirtualBaseAdjustmentField(Inheritance))
+  if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
     fields.push_back(getZeroInt());
 
   return llvm::ConstantStruct::getAnon(fields);
@@ -1372,7 +1451,7 @@
 llvm::Constant *
 MicrosoftCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
                                        CharUnits offset) {
-  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+  const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
   llvm::Constant *FirstField =
     llvm::ConstantInt::get(CGM.IntTy, offset.getQuantity());
   return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/false, RD,
@@ -1395,8 +1474,8 @@
   // FIXME PR15713: Support virtual inheritance paths.
 
   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
-    return BuildMemberPointer(MPT->getClass()->getAsCXXRecordDecl(),
-                              MD, ThisAdjustment);
+    return BuildMemberPointer(MPT->getMostRecentCXXRecordDecl(), MD,
+                              ThisAdjustment);
 
   CharUnits FieldOffset =
     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
@@ -1409,6 +1488,7 @@
                                     CharUnits NonVirtualBaseAdjustment) {
   assert(MD->isInstance() && "Member function must not be static!");
   MD = MD->getCanonicalDecl();
+  RD = RD->getMostRecentDecl();
   CodeGenTypes &Types = CGM.getTypes();
 
   llvm::Constant *FirstField;
@@ -1442,16 +1522,10 @@
                                "member function in virtual base class");
       FirstField = llvm::Constant::getNullValue(CGM.VoidPtrTy);
     } else {
-      SmallString<256> ThunkName;
-      CharUnits PointerWidth = getContext().toCharUnitsFromBits(
-          getContext().getTargetInfo().getPointerWidth(0));
-      uint64_t OffsetInVFTable = ML.Index * PointerWidth.getQuantity();
-      llvm::raw_svector_ostream Out(ThunkName);
-      getMangleContext().mangleVirtualMemPtrThunk(MD, OffsetInVFTable, Out);
-      Out.flush();
-
-      llvm::Function *Thunk = EmitVirtualMemPtrThunk(MD, ThunkName.str());
+      llvm::Function *Thunk = EmitVirtualMemPtrThunk(MD, ML);
       FirstField = llvm::ConstantExpr::getBitCast(Thunk, CGM.VoidPtrTy);
+      // Include the vfptr adjustment if the method is in a non-primary vftable.
+      NonVirtualBaseAdjustment += ML.VFPtrOffset;
     }
   }
 
@@ -1486,9 +1560,10 @@
 
   // If this is a single field member pointer (single inheritance), this is a
   // single icmp.
-  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
-  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
-  if (hasOnlyOneField(MPT->isMemberFunctionPointer(), Inheritance))
+  const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+  MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+  if (MSInheritanceAttr::hasOnlyOneField(MPT->isMemberFunctionPointer(),
+                                         Inheritance))
     return Builder.CreateICmp(Eq, L, R);
 
   // Compare the first field.
@@ -1607,11 +1682,9 @@
 
 // Returns an adjusted base cast to i8*, since we do more address arithmetic on
 // it.
-llvm::Value *
-MicrosoftCXXABI::AdjustVirtualBase(CodeGenFunction &CGF,
-                                   const CXXRecordDecl *RD, llvm::Value *Base,
-                                   llvm::Value *VBTableOffset,
-                                   llvm::Value *VBPtrOffset) {
+llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
+    CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD,
+    llvm::Value *Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
   CGBuilderTy &Builder = CGF.Builder;
   Base = Builder.CreateBitCast(Base, CGM.Int8PtrTy);
   llvm::BasicBlock *OriginalBB = 0;
@@ -1637,9 +1710,15 @@
   // know the vbptr offset.
   if (!VBPtrOffset) {
     CharUnits offs = CharUnits::Zero();
-    if (RD->getNumVBases()) {
-      offs = GetVBPtrOffsetFromBases(RD);
-    }
+    if (!RD->hasDefinition()) {
+      DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+      unsigned DiagID = Diags.getCustomDiagID(
+          DiagnosticsEngine::Error,
+          "member pointer representation requires a "
+          "complete class type for %0 to perform this expression");
+      Diags.Report(E->getExprLoc(), DiagID) << RD << E->getSourceRange();
+    } else if (RD->getNumVBases())
+      offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
     VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity());
   }
   llvm::Value *VBPtr = 0;
@@ -1659,18 +1738,16 @@
   return AdjustedBase;
 }
 
-llvm::Value *
-MicrosoftCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
-                                              llvm::Value *Base,
-                                              llvm::Value *MemPtr,
-                                              const MemberPointerType *MPT) {
+llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
+    CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr,
+    const MemberPointerType *MPT) {
   assert(MPT->isMemberDataPointer());
   unsigned AS = Base->getType()->getPointerAddressSpace();
   llvm::Type *PType =
       CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
   CGBuilderTy &Builder = CGF.Builder;
-  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
-  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+  const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+  MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
 
   // Extract the fields we need, regardless of model.  We'll apply them if we
   // have them.
@@ -1681,16 +1758,21 @@
     // We need to extract values.
     unsigned I = 0;
     FieldOffset = Builder.CreateExtractValue(MemPtr, I++);
-    if (hasVBPtrOffsetField(Inheritance))
+    if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
       VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
-    if (hasVirtualBaseAdjustmentField(Inheritance))
+    if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
       VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
   }
 
   if (VirtualBaseAdjustmentOffset) {
-    Base = AdjustVirtualBase(CGF, RD, Base, VirtualBaseAdjustmentOffset,
+    Base = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
                              VBPtrOffset);
   }
+
+  // Cast to char*.
+  Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+
+  // Apply the offset, which we assume is non-null.
   llvm::Value *Addr =
     Builder.CreateInBoundsGEP(Base, FieldOffset, "memptr.offset");
 
@@ -1699,9 +1781,9 @@
   return Builder.CreateBitCast(Addr, PType);
 }
 
-static MSInheritanceModel
+static MSInheritanceAttr::Spelling
 getInheritanceFromMemptr(const MemberPointerType *MPT) {
-  return MPT->getClass()->getAsCXXRecordDecl()->getMSInheritanceModel();
+  return MPT->getMostRecentCXXRecordDecl()->getMSInheritanceModel();
 }
 
 llvm::Value *
@@ -1721,15 +1803,17 @@
   const MemberPointerType *SrcTy =
     E->getSubExpr()->getType()->castAs<MemberPointerType>();
   const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>();
-  MSInheritanceModel SrcInheritance = getInheritanceFromMemptr(SrcTy);
-  MSInheritanceModel DstInheritance = getInheritanceFromMemptr(DstTy);
   bool IsFunc = SrcTy->isMemberFunctionPointer();
 
   // If the classes use the same null representation, reinterpret_cast is a nop.
   bool IsReinterpret = E->getCastKind() == CK_ReinterpretMemberPointer;
-  if (IsReinterpret && (IsFunc ||
-                        nullFieldOffsetIsZero(SrcInheritance) ==
-                        nullFieldOffsetIsZero(DstInheritance)))
+  if (IsReinterpret && IsFunc)
+    return Src;
+
+  CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl();
+  CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl();
+  if (IsReinterpret &&
+      SrcRD->nullFieldOffsetIsZero() == DstRD->nullFieldOffsetIsZero())
     return Src;
 
   CGBuilderTy &Builder = CGF.Builder;
@@ -1758,15 +1842,16 @@
   llvm::Value *NonVirtualBaseAdjustment = 0;
   llvm::Value *VirtualBaseAdjustmentOffset = 0;
   llvm::Value *VBPtrOffset = 0;
-  if (!hasOnlyOneField(IsFunc, SrcInheritance)) {
+  MSInheritanceAttr::Spelling SrcInheritance = SrcRD->getMSInheritanceModel();
+  if (!MSInheritanceAttr::hasOnlyOneField(IsFunc, SrcInheritance)) {
     // We need to extract values.
     unsigned I = 0;
     FirstField = Builder.CreateExtractValue(Src, I++);
-    if (hasNonVirtualBaseAdjustmentField(IsFunc, SrcInheritance))
+    if (MSInheritanceAttr::hasNVOffsetField(IsFunc, SrcInheritance))
       NonVirtualBaseAdjustment = Builder.CreateExtractValue(Src, I++);
-    if (hasVBPtrOffsetField(SrcInheritance))
+    if (MSInheritanceAttr::hasVBPtrOffsetField(SrcInheritance))
       VBPtrOffset = Builder.CreateExtractValue(Src, I++);
-    if (hasVirtualBaseAdjustmentField(SrcInheritance))
+    if (MSInheritanceAttr::hasVBTableOffsetField(SrcInheritance))
       VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(Src, I++);
   }
 
@@ -1788,20 +1873,21 @@
   // FIXME PR15713: Support conversions through virtually derived classes.
 
   // Recompose dst from the null struct and the adjusted fields from src.
+  MSInheritanceAttr::Spelling DstInheritance = DstRD->getMSInheritanceModel();
   llvm::Value *Dst;
-  if (hasOnlyOneField(IsFunc, DstInheritance)) {
+  if (MSInheritanceAttr::hasOnlyOneField(IsFunc, DstInheritance)) {
     Dst = FirstField;
   } else {
     Dst = llvm::UndefValue::get(DstNull->getType());
     unsigned Idx = 0;
     Dst = Builder.CreateInsertValue(Dst, FirstField, Idx++);
-    if (hasNonVirtualBaseAdjustmentField(IsFunc, DstInheritance))
+    if (MSInheritanceAttr::hasNVOffsetField(IsFunc, DstInheritance))
       Dst = Builder.CreateInsertValue(
         Dst, getValueOrZeroInt(NonVirtualBaseAdjustment), Idx++);
-    if (hasVBPtrOffsetField(DstInheritance))
+    if (MSInheritanceAttr::hasVBPtrOffsetField(DstInheritance))
       Dst = Builder.CreateInsertValue(
         Dst, getValueOrZeroInt(VBPtrOffset), Idx++);
-    if (hasVirtualBaseAdjustmentField(DstInheritance))
+    if (MSInheritanceAttr::hasVBTableOffsetField(DstInheritance))
       Dst = Builder.CreateInsertValue(
         Dst, getValueOrZeroInt(VirtualBaseAdjustmentOffset), Idx++);
   }
@@ -1833,8 +1919,8 @@
   if (E->getCastKind() == CK_ReinterpretMemberPointer)
     return Src;
 
-  MSInheritanceModel SrcInheritance = getInheritanceFromMemptr(SrcTy);
-  MSInheritanceModel DstInheritance = getInheritanceFromMemptr(DstTy);
+  MSInheritanceAttr::Spelling SrcInheritance = getInheritanceFromMemptr(SrcTy);
+  MSInheritanceAttr::Spelling DstInheritance = getInheritanceFromMemptr(DstTy);
 
   // Decompose src.
   llvm::Constant *FirstField = Src;
@@ -1842,15 +1928,15 @@
   llvm::Constant *VirtualBaseAdjustmentOffset = 0;
   llvm::Constant *VBPtrOffset = 0;
   bool IsFunc = SrcTy->isMemberFunctionPointer();
-  if (!hasOnlyOneField(IsFunc, SrcInheritance)) {
+  if (!MSInheritanceAttr::hasOnlyOneField(IsFunc, SrcInheritance)) {
     // We need to extract values.
     unsigned I = 0;
     FirstField = Src->getAggregateElement(I++);
-    if (hasNonVirtualBaseAdjustmentField(IsFunc, SrcInheritance))
+    if (MSInheritanceAttr::hasNVOffsetField(IsFunc, SrcInheritance))
       NonVirtualBaseAdjustment = Src->getAggregateElement(I++);
-    if (hasVBPtrOffsetField(SrcInheritance))
+    if (MSInheritanceAttr::hasVBPtrOffsetField(SrcInheritance))
       VBPtrOffset = Src->getAggregateElement(I++);
-    if (hasVirtualBaseAdjustmentField(SrcInheritance))
+    if (MSInheritanceAttr::hasVBTableOffsetField(SrcInheritance))
       VirtualBaseAdjustmentOffset = Src->getAggregateElement(I++);
   }
 
@@ -1873,35 +1959,33 @@
   // FIXME PR15713: Support conversions through virtually derived classes.
 
   // Recompose dst from the null struct and the adjusted fields from src.
-  if (hasOnlyOneField(IsFunc, DstInheritance))
+  if (MSInheritanceAttr::hasOnlyOneField(IsFunc, DstInheritance))
     return FirstField;
 
   llvm::SmallVector<llvm::Constant *, 4> Fields;
   Fields.push_back(FirstField);
-  if (hasNonVirtualBaseAdjustmentField(IsFunc, DstInheritance))
+  if (MSInheritanceAttr::hasNVOffsetField(IsFunc, DstInheritance))
     Fields.push_back(getConstantOrZeroInt(NonVirtualBaseAdjustment));
-  if (hasVBPtrOffsetField(DstInheritance))
+  if (MSInheritanceAttr::hasVBPtrOffsetField(DstInheritance))
     Fields.push_back(getConstantOrZeroInt(VBPtrOffset));
-  if (hasVirtualBaseAdjustmentField(DstInheritance))
+  if (MSInheritanceAttr::hasVBTableOffsetField(DstInheritance))
     Fields.push_back(getConstantOrZeroInt(VirtualBaseAdjustmentOffset));
   return llvm::ConstantStruct::getAnon(Fields);
 }
 
-llvm::Value *
-MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
-                                                 llvm::Value *&This,
-                                                 llvm::Value *MemPtr,
-                                                 const MemberPointerType *MPT) {
+llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
+    CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
+    llvm::Value *MemPtr, const MemberPointerType *MPT) {
   assert(MPT->isMemberFunctionPointer());
   const FunctionProtoType *FPT =
     MPT->getPointeeType()->castAs<FunctionProtoType>();
-  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+  const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
   llvm::FunctionType *FTy =
     CGM.getTypes().GetFunctionType(
       CGM.getTypes().arrangeCXXMethodType(RD, FPT));
   CGBuilderTy &Builder = CGF.Builder;
 
-  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+  MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
 
   // Extract the fields we need, regardless of model.  We'll apply them if we
   // have them.
@@ -1913,16 +1997,16 @@
     // We need to extract values.
     unsigned I = 0;
     FunctionPointer = Builder.CreateExtractValue(MemPtr, I++);
-    if (hasNonVirtualBaseAdjustmentField(MPT, Inheritance))
+    if (MSInheritanceAttr::hasNVOffsetField(MPT, Inheritance))
       NonVirtualBaseAdjustment = Builder.CreateExtractValue(MemPtr, I++);
-    if (hasVBPtrOffsetField(Inheritance))
+    if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
       VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
-    if (hasVirtualBaseAdjustmentField(Inheritance))
+    if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
       VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
   }
 
   if (VirtualBaseAdjustmentOffset) {
-    This = AdjustVirtualBase(CGF, RD, This, VirtualBaseAdjustmentOffset,
+    This = AdjustVirtualBase(CGF, E, RD, This, VirtualBaseAdjustmentOffset,
                              VBPtrOffset);
   }
 
diff --git a/lib/CodeGen/MicrosoftVBTables.cpp b/lib/CodeGen/MicrosoftVBTables.cpp
deleted file mode 100644
index dabf52c..0000000
--- a/lib/CodeGen/MicrosoftVBTables.cpp
+++ /dev/null
@@ -1,233 +0,0 @@
-//===--- MicrosoftVBTables.cpp - Virtual Base Table Emission --------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class generates data about MSVC virtual base tables.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MicrosoftVBTables.h"
-#include "CodeGenModule.h"
-#include "CGCXXABI.h"
-
-namespace clang {
-namespace CodeGen {
-
-/// Holds intermediate data about a path to a vbptr inside a base subobject.
-struct VBTablePath {
-  VBTablePath(const VBTableInfo &VBInfo)
-    : VBInfo(VBInfo), NextBase(VBInfo.VBPtrSubobject.getBase()) { }
-
-  /// All the data needed to build a vbtable, minus the GlobalVariable whose
-  /// name we haven't computed yet.
-  VBTableInfo VBInfo;
-
-  /// Next base to use for disambiguation.  Can be null if we've already
-  /// disambiguated this path once.
-  const CXXRecordDecl *NextBase;
-
-  /// Path is not really a full path like a CXXBasePath.  It holds the subset of
-  /// records that need to be mangled into the vbtable symbol name in order to get
-  /// a unique name.
-  llvm::SmallVector<const CXXRecordDecl *, 1> Path;
-};
-
-VBTableBuilder::VBTableBuilder(CodeGenModule &CGM,
-                               const CXXRecordDecl *MostDerived)
-    : CGM(CGM), MostDerived(MostDerived),
-      DerivedLayout(CGM.getContext().getASTRecordLayout(MostDerived)) {}
-
-void VBTableBuilder::enumerateVBTables(VBTableVector &VBTables) {
-  VBTablePathVector Paths;
-  findUnambiguousPaths(MostDerived, BaseSubobject(MostDerived,
-                                                  CharUnits::Zero()), Paths);
-  for (VBTablePathVector::iterator I = Paths.begin(), E = Paths.end();
-       I != E; ++I) {
-    VBTablePath *P = *I;
-    P->VBInfo.GV = getAddrOfVBTable(P->VBInfo.ReusingBase, P->Path);
-    VBTables.push_back(P->VBInfo);
-  }
-}
-
-
-void VBTableBuilder::findUnambiguousPaths(const CXXRecordDecl *ReusingBase,
-                                          BaseSubobject CurSubobject,
-                                          VBTablePathVector &Paths) {
-  size_t PathsStart = Paths.size();
-  bool ReuseVBPtrFromBase = true;
-  const CXXRecordDecl *CurBase = CurSubobject.getBase();
-  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(CurBase);
-
-  // If this base has a vbptr, then we've found a path.  These are not full
-  // paths, so we don't use CXXBasePath.
-  if (Layout.hasOwnVBPtr()) {
-    ReuseVBPtrFromBase = false;
-    VBTablePath *Info = new VBTablePath(
-      VBTableInfo(ReusingBase, CurSubobject, /*GV=*/0));
-    Paths.push_back(Info);
-  }
-
-  // Recurse onto any bases which themselves have virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator I = CurBase->bases_begin(),
-       E = CurBase->bases_end(); I != E; ++I) {
-    const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
-    if (!Base->getNumVBases())
-      continue;  // Bases without virtual bases have no vbptrs.
-    CharUnits NextOffset;
-    const CXXRecordDecl *NextReusingBase = Base;
-    if (I->isVirtual()) {
-      if (!VBasesSeen.insert(Base))
-        continue;  // Don't visit virtual bases twice.
-      NextOffset = DerivedLayout.getVBaseClassOffset(Base);
-    } else {
-      NextOffset = (CurSubobject.getBaseOffset() +
-                    Layout.getBaseClassOffset(Base));
-
-      // If CurBase didn't have a vbptr, then ReusingBase will reuse the vbptr
-      // from the first non-virtual base with vbases for its vbptr.
-      if (ReuseVBPtrFromBase) {
-        NextReusingBase = ReusingBase;
-        ReuseVBPtrFromBase = false;
-      }
-    }
-
-    size_t NumPaths = Paths.size();
-    findUnambiguousPaths(NextReusingBase, BaseSubobject(Base, NextOffset),
-                         Paths);
-
-    // Tag paths through this base with the base itself.  We might use it to
-    // disambiguate.
-    for (size_t I = NumPaths, E = Paths.size(); I != E; ++I)
-      Paths[I]->NextBase = Base;
-  }
-
-  bool AmbiguousPaths = rebucketPaths(Paths, PathsStart);
-  if (AmbiguousPaths)
-    rebucketPaths(Paths, PathsStart, /*SecondPass=*/true);
-
-#ifndef NDEBUG
-  // Check that the paths are in fact unique.
-  for (size_t I = PathsStart + 1, E = Paths.size(); I != E; ++I) {
-    assert(Paths[I]->Path != Paths[I - 1]->Path && "vbtable paths are not unique");
-  }
-#endif
-}
-
-static bool pathCompare(VBTablePath *LHS, VBTablePath *RHS) {
-  return LHS->Path < RHS->Path;
-}
-
-void VBTableBuilder::extendPath(VBTablePath *P, bool SecondPass) {
-  assert(P->NextBase || SecondPass);
-  if (P->NextBase) {
-    P->Path.push_back(P->NextBase);
-    P->NextBase = 0;  // Prevent the path from being extended twice.
-  }
-}
-
-bool VBTableBuilder::rebucketPaths(VBTablePathVector &Paths, size_t PathsStart,
-                                   bool SecondPass) {
-  // What we're essentially doing here is bucketing together ambiguous paths.
-  // Any bucket with more than one path in it gets extended by NextBase, which
-  // is usually the direct base of the inherited the vbptr.  This code uses a
-  // sorted vector to implement a multiset to form the buckets.  Note that the
-  // ordering is based on pointers, but it doesn't change our output order.  The
-  // current algorithm is designed to match MSVC 2012's names.
-  // TODO: Implement MSVC 2010 or earlier names to avoid extra vbtable cruft.
-  VBTablePathVector PathsSorted(&Paths[PathsStart], &Paths.back() + 1);
-  std::sort(PathsSorted.begin(), PathsSorted.end(), pathCompare);
-  bool AmbiguousPaths = false;
-  for (size_t I = 0, E = PathsSorted.size(); I != E;) {
-    // Scan forward to find the end of the bucket.
-    size_t BucketStart = I;
-    do {
-      ++I;
-    } while (I != E && PathsSorted[BucketStart]->Path == PathsSorted[I]->Path);
-
-    // If this bucket has multiple paths, extend them all.
-    if (I - BucketStart > 1) {
-      AmbiguousPaths = true;
-      for (size_t II = BucketStart; II != I; ++II)
-        extendPath(PathsSorted[II], SecondPass);
-    }
-  }
-  return AmbiguousPaths;
-}
-
-llvm::GlobalVariable *
-VBTableBuilder::getAddrOfVBTable(const CXXRecordDecl *ReusingBase,
-                                 ArrayRef<const CXXRecordDecl *> BasePath) {
-  // Caching at this layer is redundant with the caching in EnumerateVBTables().
-
-  SmallString<256> OutName;
-  llvm::raw_svector_ostream Out(OutName);
-  MicrosoftMangleContext &Mangler =
-      cast<MicrosoftMangleContext>(CGM.getCXXABI().getMangleContext());
-  Mangler.mangleCXXVBTable(MostDerived, BasePath, Out);
-  Out.flush();
-  StringRef Name = OutName.str();
-
-  llvm::ArrayType *VBTableType =
-    llvm::ArrayType::get(CGM.IntTy, 1 + ReusingBase->getNumVBases());
-
-  assert(!CGM.getModule().getNamedGlobal(Name) &&
-         "vbtable with this name already exists: mangling bug?");
-  llvm::GlobalVariable *VBTable =
-    CGM.CreateOrReplaceCXXRuntimeVariable(Name, VBTableType,
-                                          llvm::GlobalValue::ExternalLinkage);
-  VBTable->setUnnamedAddr(true);
-  return VBTable;
-}
-
-void VBTableInfo::EmitVBTableDefinition(
-    CodeGenModule &CGM, const CXXRecordDecl *RD,
-    llvm::GlobalVariable::LinkageTypes Linkage) const {
-  assert(RD->getNumVBases() && ReusingBase->getNumVBases() &&
-         "should only emit vbtables for classes with vbtables");
-
-  const ASTRecordLayout &BaseLayout =
-    CGM.getContext().getASTRecordLayout(VBPtrSubobject.getBase());
-  const ASTRecordLayout &DerivedLayout =
-    CGM.getContext().getASTRecordLayout(RD);
-
-  SmallVector<llvm::Constant *, 4> Offsets(1 + ReusingBase->getNumVBases(), 0);
-
-  // The offset from ReusingBase's vbptr to itself always leads.
-  CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset();
-  Offsets[0] = llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity());
-
-  MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
-  for (CXXRecordDecl::base_class_const_iterator I = ReusingBase->vbases_begin(),
-       E = ReusingBase->vbases_end(); I != E; ++I) {
-    const CXXRecordDecl *VBase = I->getType()->getAsCXXRecordDecl();
-    CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase);
-    assert(!Offset.isNegative());
-    // Make it relative to the subobject vbptr.
-    Offset -= VBPtrSubobject.getBaseOffset() + VBPtrOffset;
-    unsigned VBIndex = Context.getVBTableIndex(ReusingBase, VBase);
-    assert(Offsets[VBIndex] == 0 && "The same vbindex seen twice?");
-    Offsets[VBIndex] = llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity());
-  }
-
-  assert(Offsets.size() ==
-         cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType())
-                               ->getElementType())->getNumElements());
-  llvm::ArrayType *VBTableType =
-    llvm::ArrayType::get(CGM.IntTy, Offsets.size());
-  llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets);
-  GV->setInitializer(Init);
-
-  // Set the correct linkage.
-  GV->setLinkage(Linkage);
-
-  // Set the right visibility.
-  CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForVTable);
-}
-
-} // namespace CodeGen
-} // namespace clang
diff --git a/lib/CodeGen/MicrosoftVBTables.h b/lib/CodeGen/MicrosoftVBTables.h
deleted file mode 100644
index 4ad8e07..0000000
--- a/lib/CodeGen/MicrosoftVBTables.h
+++ /dev/null
@@ -1,129 +0,0 @@
-//===--- MicrosoftVBTables.h - Virtual Base Table Emission ----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class generates data about MSVC virtual base tables.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/AST/BaseSubobject.h"
-#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/IR/GlobalVariable.h"
-#include <vector>
-
-namespace clang {
-
-class ASTRecordLayout;
-
-namespace CodeGen {
-
-class CodeGenModule;
-
-struct VBTableInfo {
-  VBTableInfo(const CXXRecordDecl *ReusingBase, BaseSubobject VBPtrSubobject,
-              llvm::GlobalVariable *GV)
-    : ReusingBase(ReusingBase), VBPtrSubobject(VBPtrSubobject), GV(GV) { }
-
-  /// The vbtable will hold all of the virtual bases of ReusingBase.  This may
-  /// or may not be the same class as VBPtrSubobject.Base.  A derived class will
-  /// reuse the vbptr of the first non-virtual base subobject that has one.
-  const CXXRecordDecl *ReusingBase;
-
-  /// The vbptr is stored inside this subobject.
-  BaseSubobject VBPtrSubobject;
-
-  /// The GlobalVariable for this vbtable.
-  llvm::GlobalVariable *GV;
-
-  /// \brief Emits a definition for GV by setting it's initializer.
-  void EmitVBTableDefinition(CodeGenModule &CGM, const CXXRecordDecl *RD,
-                             llvm::GlobalVariable::LinkageTypes Linkage) const;
-};
-
-// These are embedded in a DenseMap and the elements are large, so we don't want
-// SmallVector.
-typedef std::vector<VBTableInfo> VBTableVector;
-
-struct VBTablePath;
-
-typedef llvm::SmallVector<VBTablePath *, 6> VBTablePathVector;
-
-/// Produces MSVC-compatible vbtable data.  The symbols produced by this builder
-/// match those produced by MSVC 2012, which is different from MSVC 2010.
-///
-/// Unlike Itanium, which uses only one vtable per class, MSVC uses a different
-/// symbol for every "address point" installed in base subobjects.  As a result,
-/// we have to compute unique symbols for every table.  Since there can be
-/// multiple non-virtual base subobjects of the same class, combining the most
-/// derived class with the base containing the vtable is insufficient.  The most
-/// trivial algorithm would be to mangle in the entire path from base to most
-/// derived, but that would be too easy and would create unnecessarily large
-/// symbols.  ;)
-///
-/// MSVC 2012 appears to minimize the vbtable names using the following
-/// algorithm.  First, walk the class hierarchy in the usual order, depth first,
-/// left to right, to find all of the subobjects which contain a vbptr field.
-/// Visiting each class node yields a list of inheritance paths to vbptrs.  Each
-/// record with a vbptr creates an initially empty path.
-///
-/// To combine paths from child nodes, the paths are compared to check for
-/// ambiguity.  Paths are "ambiguous" if multiple paths have the same set of
-/// components in the same order.  Each group of ambiguous paths is extended by
-/// appending the class of the base from which it came.  If the current class
-/// node produced an ambiguous path, its path is extended with the current class.
-/// After extending paths, MSVC again checks for ambiguity, and extends any
-/// ambiguous path which wasn't already extended.  Because each node yields an
-/// unambiguous set of paths, MSVC doesn't need to extend any path more than once
-/// to produce an unambiguous set of paths.
-///
-/// The VBTableBuilder class attempts to implement this algorithm by repeatedly
-/// bucketing paths together by sorting them.
-///
-/// TODO: Presumably vftables use the same algorithm.
-///
-/// TODO: Implement the MSVC 2010 name mangling scheme to avoid emitting
-/// duplicate vbtables with different symbols.
-class VBTableBuilder {
-public:
-  VBTableBuilder(CodeGenModule &CGM, const CXXRecordDecl *MostDerived);
-
-  void enumerateVBTables(VBTableVector &VBTables);
-
-private:
-  bool hasVBPtr(const CXXRecordDecl *RD);
-
-  llvm::GlobalVariable *getAddrOfVBTable(const CXXRecordDecl *ReusingBase,
-                                      ArrayRef<const CXXRecordDecl *> BasePath);
-
-  /// Enumerates paths to bases with vbptrs.  The paths elements are compressed
-  /// to contain only the classes necessary to form an unambiguous path.
-  void findUnambiguousPaths(const CXXRecordDecl *ReusingBase,
-                            BaseSubobject CurSubobject,
-                            VBTablePathVector &Paths);
-
-  void extendPath(VBTablePath *Info, bool SecondPass);
-
-  bool rebucketPaths(VBTablePathVector &Paths, size_t PathsStart,
-                     bool SecondPass = false);
-
-  CodeGenModule &CGM;
-
-  const CXXRecordDecl *MostDerived;
-
-  /// Caches the layout of the most derived class.
-  const ASTRecordLayout &DerivedLayout;
-
-  /// Set of vbases to avoid re-visiting the same vbases.
-  llvm::SmallPtrSet<const CXXRecordDecl*, 4> VBasesSeen;
-};
-
-} // namespace CodeGen
-
-} // namespace clang
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index bc7acbc..7873f44 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -12,30 +12,31 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/CodeGen/ModuleBuilder.h"
-#include "CodeGenModule.h"
 #include "CGDebugInfo.h"
+#include "CodeGenModule.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/DeclObjC.h"
 #include "clang/AST/Expr.h"
 #include "clang/Basic/Diagnostic.h"
 #include "clang/Basic/TargetInfo.h"
 #include "clang/Frontend/CodeGenOptions.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
+#include <memory>
 using namespace clang;
 
 namespace {
   class CodeGeneratorImpl : public CodeGenerator {
     DiagnosticsEngine &Diags;
-    OwningPtr<const llvm::DataLayout> TD;
+    std::unique_ptr<const llvm::DataLayout> TD;
     ASTContext *Ctx;
     const CodeGenOptions CodeGenOpts;  // Intentionally copied in.
   protected:
-    OwningPtr<llvm::Module> M;
-    OwningPtr<CodeGen::CodeGenModule> Builder;
+    std::unique_ptr<llvm::Module> M;
+    std::unique_ptr<CodeGen::CodeGenModule> Builder;
+
   public:
     CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string& ModuleName,
                       const CodeGenOptions &CGO, llvm::LLVMContext& C)
@@ -44,15 +45,13 @@
 
     virtual ~CodeGeneratorImpl() {}
 
-    virtual llvm::Module* GetModule() {
+    llvm::Module* GetModule() override {
       return M.get();
     }
 
-    virtual llvm::Module* ReleaseModule() {
-      return M.take();
-    }
+    llvm::Module *ReleaseModule() override { return M.release(); }
 
-    virtual void Initialize(ASTContext &Context) {
+    void Initialize(ASTContext &Context) override {
       Ctx = &Context;
 
       M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
@@ -65,14 +64,14 @@
         HandleDependentLibrary(CodeGenOpts.DependentLibraries[i]);
     }
 
-    virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+    void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override {
       if (Diags.hasErrorOccurred())
         return;
 
       Builder->HandleCXXStaticMemberVarInstantiation(VD);
     }
 
-    virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
+    bool HandleTopLevelDecl(DeclGroupRef DG) override {
       if (Diags.hasErrorOccurred())
         return true;
 
@@ -86,7 +85,7 @@
     /// to (e.g. struct, union, enum, class) is completed. This allows the
     /// client hack on the type, which can occur at any point in the file
     /// (because these can be defined in declspecs).
-    virtual void HandleTagDeclDefinition(TagDecl *D) {
+    void HandleTagDeclDefinition(TagDecl *D) override {
       if (Diags.hasErrorOccurred())
         return;
 
@@ -95,10 +94,8 @@
       // In C++, we may have member functions that need to be emitted at this 
       // point.
       if (Ctx->getLangOpts().CPlusPlus && !D->isDependentContext()) {
-        for (DeclContext::decl_iterator M = D->decls_begin(), 
-                                     MEnd = D->decls_end();
-             M != MEnd; ++M)
-          if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*M))
+        for (auto *M : D->decls())
+          if (auto *Method = dyn_cast<CXXMethodDecl>(M))
             if (Method->doesThisDeclarationHaveABody() &&
                 (Method->hasAttr<UsedAttr>() || 
                  Method->hasAttr<ConstructorAttr>()))
@@ -106,7 +103,7 @@
       }
     }
 
-    virtual void HandleTagDeclRequiredDefinition(const TagDecl *D) LLVM_OVERRIDE {
+    void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
       if (Diags.hasErrorOccurred())
         return;
 
@@ -115,8 +112,10 @@
           DI->completeRequiredType(RD);
     }
 
-    virtual void HandleTranslationUnit(ASTContext &Ctx) {
+    void HandleTranslationUnit(ASTContext &Ctx) override {
       if (Diags.hasErrorOccurred()) {
+        if (Builder)
+          Builder->clear();
         M.reset();
         return;
       }
@@ -125,30 +124,30 @@
         Builder->Release();
     }
 
-    virtual void CompleteTentativeDefinition(VarDecl *D) {
+    void CompleteTentativeDefinition(VarDecl *D) override {
       if (Diags.hasErrorOccurred())
         return;
 
       Builder->EmitTentativeDefinition(D);
     }
 
-    virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+    void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) override {
       if (Diags.hasErrorOccurred())
         return;
 
       Builder->EmitVTable(RD, DefinitionRequired);
     }
 
-    virtual void HandleLinkerOptionPragma(llvm::StringRef Opts) {
+    void HandleLinkerOptionPragma(llvm::StringRef Opts) override {
       Builder->AppendLinkerOptions(Opts);
     }
 
-    virtual void HandleDetectMismatch(llvm::StringRef Name,
-                                      llvm::StringRef Value) {
+    void HandleDetectMismatch(llvm::StringRef Name,
+                              llvm::StringRef Value) override {
       Builder->AddDetectMismatch(Name, Value);
     }
 
-    virtual void HandleDependentLibrary(llvm::StringRef Lib) {
+    void HandleDependentLibrary(llvm::StringRef Lib) override {
       Builder->AddDependentLib(Lib);
     }
   };
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 7446e67..c602b1f 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -114,6 +114,9 @@
   case Ignore:
     OS << "Ignore";
     break;
+  case InAlloca:
+    OS << "InAlloca Offset=" << getInAllocaFieldIndex();
+    break;
   case Indirect:
     OS << "Indirect Align=" << getIndirectAlign()
        << " ByVal=" << getIndirectByVal()
@@ -206,14 +209,12 @@
 
   // If this is a C++ record, check the bases first.
   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
-    for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
-           e = CXXRD->bases_end(); i != e; ++i)
-      if (!isEmptyRecord(Context, i->getType(), true))
+    for (const auto &I : CXXRD->bases())
+      if (!isEmptyRecord(Context, I.getType(), true))
         return false;
 
-  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-         i != e; ++i)
-    if (!isEmptyField(Context, *i, AllowArrays))
+  for (const auto *I : RD->fields())
+    if (!isEmptyField(Context, I, AllowArrays))
       return false;
   return true;
 }
@@ -239,10 +240,9 @@
 
   // If this is a C++ record, check the bases first.
   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
-    for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
-           e = CXXRD->bases_end(); i != e; ++i) {
+    for (const auto &I : CXXRD->bases()) {
       // Ignore empty records.
-      if (isEmptyRecord(Context, i->getType(), true))
+      if (isEmptyRecord(Context, I.getType(), true))
         continue;
 
       // If we already found an element then this isn't a single-element struct.
@@ -251,16 +251,14 @@
 
       // If this is non-empty and not a single element struct, the composite
       // cannot be a single element struct.
-      Found = isSingleElementStruct(i->getType(), Context);
+      Found = isSingleElementStruct(I.getType(), Context);
       if (!Found)
         return 0;
     }
   }
 
   // Check for single element.
-  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-         i != e; ++i) {
-    const FieldDecl *FD = *i;
+  for (const auto *FD : RD->fields()) {
     QualType FT = FD->getType();
 
     // Ignore empty fields.
@@ -336,10 +334,7 @@
 
   uint64_t Size = 0;
 
-  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-         i != e; ++i) {
-    const FieldDecl *FD = *i;
-
+  for (const auto *FD : RD->fields()) {
     if (!is32Or64BitBasicType(FD->getType(), Context))
       return false;
 
@@ -371,15 +366,14 @@
   ABIArgInfo classifyReturnType(QualType RetTy) const;
   ABIArgInfo classifyArgumentType(QualType RetTy) const;
 
-  virtual void computeInfo(CGFunctionInfo &FI) const {
+  void computeInfo(CGFunctionInfo &FI) const override {
     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-         it != ie; ++it)
-      it->info = classifyArgumentType(it->type);
+    for (auto &I : FI.arguments())
+      I.info = classifyArgumentType(I.type);
   }
 
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -395,7 +389,7 @@
 
 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
   if (isAggregateTypeForABI(Ty)) {
-    // Records with non trivial destructors/constructors should not be passed
+    // Records with non-trivial destructors/constructors should not be passed
     // by value.
     if (isRecordReturnIndirect(Ty, getCXXABI()))
       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
@@ -440,9 +434,9 @@
   ABIArgInfo classifyReturnType(QualType RetTy) const;
   ABIArgInfo classifyArgumentType(QualType RetTy) const;
 
-  virtual void computeInfo(CGFunctionInfo &FI) const;
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -454,9 +448,8 @@
 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
 
-    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-         it != ie; ++it)
-      it->info = classifyArgumentType(it->type);
+    for (auto &I : FI.arguments())
+      I.info = classifyArgumentType(I.type);
   }
 
 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -526,6 +519,16 @@
 // X86-32 ABI Implementation
 //===----------------------------------------------------------------------===//
 
+/// \brief Similar to llvm::CCState, but for Clang.
+struct CCState {
+  CCState(unsigned CC) : CC(CC), FreeRegs(0) {}
+
+  unsigned CC;
+  unsigned FreeRegs;
+  unsigned StackOffset;
+  bool UseInAlloca;
+};
+
 /// X86_32ABIInfo - The X86-32 ABI information.
 class X86_32ABIInfo : public ABIInfo {
   enum Class {
@@ -544,30 +547,37 @@
     return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
   }
 
-  static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 
-                                          unsigned callingConvention);
+  bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
+                                  bool IsInstanceMethod) const;
 
   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
   /// such that the argument will be passed in memory.
-  ABIArgInfo getIndirectResult(QualType Ty, bool ByVal,
-                               unsigned &FreeRegs) const;
+  ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
+
+  ABIArgInfo getIndirectReturnResult(CCState &State) const;
 
   /// \brief Return the alignment to use for the given type on the stack.
   unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
 
   Class classify(QualType Ty) const;
-  ABIArgInfo classifyReturnType(QualType RetTy,
-                                unsigned callingConvention) const;
-  ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs,
-                                  bool IsFastCall) const;
-  bool shouldUseInReg(QualType Ty, unsigned &FreeRegs,
-                      bool IsFastCall, bool &NeedsPadding) const;
+  ABIArgInfo classifyReturnType(QualType RetTy, CCState &State,
+                                bool IsInstanceMethod) const;
+  ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
+  bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
+
+  /// \brief Rewrite the function info so that all memory arguments use
+  /// inalloca.
+  void rewriteWithInAlloca(CGFunctionInfo &FI) const;
+
+  void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
+                           unsigned &StackOffset, ABIArgInfo &Info,
+                           QualType Type) const;
 
 public:
 
-  virtual void computeInfo(CGFunctionInfo &FI) const;
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 
   X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
                 unsigned r)
@@ -585,24 +595,25 @@
       const llvm::Triple &Triple, const CodeGenOptions &Opts);
 
   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
-                           CodeGen::CodeGenModule &CGM) const;
+                           CodeGen::CodeGenModule &CGM) const override;
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
     // Darwin uses different dwarf register numbers for EH.
     if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
     return 4;
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const;
+                               llvm::Value *Address) const override;
 
   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
                                   StringRef Constraint,
-                                  llvm::Type* Ty) const {
+                                  llvm::Type* Ty) const override {
     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
   }
 
-  llvm::Constant *getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
+  llvm::Constant *
+  getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
     unsigned Sig = (0xeb << 0) |  // jmp rel8
                    (0x06 << 8) |  //           .+0x08
                    ('F' << 16) |
@@ -616,9 +627,8 @@
 
 /// shouldReturnTypeInRegister - Determine if the given type should be
 /// passed in a register (for the Darwin ABI).
-bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
-                                               ASTContext &Context,
-                                               unsigned callingConvention) {
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
+                                               bool IsInstanceMethod) const {
   uint64_t Size = Context.getTypeSize(Ty);
 
   // Type must be register sized.
@@ -644,7 +654,7 @@
   // Arrays are treated like records.
   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
     return shouldReturnTypeInRegister(AT->getElementType(), Context,
-                                      callingConvention);
+                                      IsInstanceMethod);
 
   // Otherwise, it must be a record type.
   const RecordType *RT = Ty->getAs<RecordType>();
@@ -654,31 +664,35 @@
 
   // For thiscall conventions, structures will never be returned in
   // a register.  This is for compatibility with the MSVC ABI
-  if (callingConvention == llvm::CallingConv::X86_ThisCall && 
-      RT->isStructureType()) {
+  if (IsWin32StructABI && IsInstanceMethod && RT->isStructureType())
     return false;
-  }
 
   // Structure types are passed in register if all fields would be
   // passed in a register.
-  for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
-         e = RT->getDecl()->field_end(); i != e; ++i) {
-    const FieldDecl *FD = *i;
-
+  for (const auto *FD : RT->getDecl()->fields()) {
     // Empty fields are ignored.
     if (isEmptyField(Context, FD, true))
       continue;
 
     // Check fields recursively.
-    if (!shouldReturnTypeInRegister(FD->getType(), Context, 
-                                    callingConvention))
+    if (!shouldReturnTypeInRegister(FD->getType(), Context, IsInstanceMethod))
       return false;
   }
   return true;
 }
 
-ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 
-                                            unsigned callingConvention) const {
+ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
+  // If the return value is indirect, then the hidden argument is consuming one
+  // integer register.
+  if (State.FreeRegs) {
+    --State.FreeRegs;
+    return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
+  }
+  return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State,
+                                             bool IsInstanceMethod) const {
   if (RetTy->isVoidType())
     return ABIArgInfo::getIgnore();
 
@@ -701,7 +715,7 @@
         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
                                                             Size));
 
-      return ABIArgInfo::getIndirect(0);
+      return getIndirectReturnResult(State);
     }
 
     return ABIArgInfo::getDirect();
@@ -710,21 +724,20 @@
   if (isAggregateTypeForABI(RetTy)) {
     if (const RecordType *RT = RetTy->getAs<RecordType>()) {
       if (isRecordReturnIndirect(RT, getCXXABI()))
-        return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+        return getIndirectReturnResult(State);
 
       // Structures with flexible arrays are always indirect.
       if (RT->getDecl()->hasFlexibleArrayMember())
-        return ABIArgInfo::getIndirect(0);
+        return getIndirectReturnResult(State);
     }
 
     // If specified, structs and unions are always indirect.
     if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
-      return ABIArgInfo::getIndirect(0);
+      return getIndirectReturnResult(State);
 
     // Small structures which are register sized are generally returned
     // in a register.
-    if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 
-                                                  callingConvention)) {
+    if (shouldReturnTypeInRegister(RetTy, getContext(), IsInstanceMethod)) {
       uint64_t Size = getContext().getTypeSize(RetTy);
 
       // As a special-case, if the struct is a "single-element" struct, and
@@ -742,7 +755,7 @@
       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
     }
 
-    return ABIArgInfo::getIndirect(0);
+    return getIndirectReturnResult(State);
   }
 
   // Treat an enum type as its underlying type.
@@ -765,13 +778,11 @@
 
   // If this is a C++ record, check the bases first.
   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
-    for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
-           e = CXXRD->bases_end(); i != e; ++i)
-      if (!isRecordWithSSEVectorType(Context, i->getType()))
+    for (const auto &I : CXXRD->bases())
+      if (!isRecordWithSSEVectorType(Context, I.getType()))
         return false;
 
-  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-       i != e; ++i) {
+  for (const auto *i : RD->fields()) {
     QualType FT = i->getType();
 
     if (isSSEVectorType(Context, FT))
@@ -806,10 +817,10 @@
 }
 
 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
-                                            unsigned &FreeRegs) const {
+                                            CCState &State) const {
   if (!ByVal) {
-    if (FreeRegs) {
-      --FreeRegs; // Non byval indirects just use one pointer.
+    if (State.FreeRegs) {
+      --State.FreeRegs; // Non-byval indirects just use one pointer.
       return ABIArgInfo::getIndirectInReg(0, false);
     }
     return ABIArgInfo::getIndirect(0, false);
@@ -819,15 +830,12 @@
   unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
   unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
   if (StackAlign == 0)
-    return ABIArgInfo::getIndirect(4);
+    return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
 
   // If the stack alignment is less than the type alignment, realign the
   // argument.
-  if (StackAlign < TypeAlign)
-    return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
-                                   /*Realign=*/true);
-
-  return ABIArgInfo::getIndirect(StackAlign);
+  bool Realign = TypeAlign > StackAlign;
+  return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
 }
 
 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
@@ -843,8 +851,8 @@
   return Integer;
 }
 
-bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs,
-                                   bool IsFastCall, bool &NeedsPadding) const {
+bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
+                                   bool &NeedsPadding) const {
   NeedsPadding = false;
   Class C = classify(Ty);
   if (C == Float)
@@ -856,14 +864,14 @@
   if (SizeInRegs == 0)
     return false;
 
-  if (SizeInRegs > FreeRegs) {
-    FreeRegs = 0;
+  if (SizeInRegs > State.FreeRegs) {
+    State.FreeRegs = 0;
     return false;
   }
 
-  FreeRegs -= SizeInRegs;
+  State.FreeRegs -= SizeInRegs;
 
-  if (IsFastCall) {
+  if (State.CC == llvm::CallingConv::X86_FastCall) {
     if (Size > 32)
       return false;
 
@@ -876,7 +884,7 @@
     if (Ty->isReferenceType())
       return true;
 
-    if (FreeRegs)
+    if (State.FreeRegs)
       NeedsPadding = true;
 
     return false;
@@ -886,20 +894,26 @@
 }
 
 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
-                                               unsigned &FreeRegs,
-                                               bool IsFastCall) const {
+                                               CCState &State) const {
   // FIXME: Set alignment on indirect arguments.
   if (isAggregateTypeForABI(Ty)) {
     if (const RecordType *RT = Ty->getAs<RecordType>()) {
-      if (IsWin32StructABI)
-        return getIndirectResult(Ty, true, FreeRegs);
+      // Check with the C++ ABI first.
+      CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+      if (RAA == CGCXXABI::RAA_Indirect) {
+        return getIndirectResult(Ty, false, State);
+      } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
+        // The field index doesn't matter, we'll fix it up later.
+        return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
+      }
 
-      if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
-        return getIndirectResult(Ty, RAA == CGCXXABI::RAA_DirectInMemory, FreeRegs);
+      // Structs are always byval on win32, regardless of what they contain.
+      if (IsWin32StructABI)
+        return getIndirectResult(Ty, true, State);
 
       // Structures with flexible arrays are always indirect.
       if (RT->getDecl()->hasFlexibleArrayMember())
-        return getIndirectResult(Ty, true, FreeRegs);
+        return getIndirectResult(Ty, true, State);
     }
 
     // Ignore empty structs/unions.
@@ -909,7 +923,7 @@
     llvm::LLVMContext &LLVMContext = getVMContext();
     llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
     bool NeedsPadding;
-    if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) {
+    if (shouldUseInReg(Ty, State, NeedsPadding)) {
       unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
       SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
       llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
@@ -923,9 +937,10 @@
     // optimizations.
     if (getContext().getTypeSize(Ty) <= 4*32 &&
         canExpandIndirectArgument(Ty, getContext()))
-      return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType);
+      return ABIArgInfo::getExpandWithPadding(
+          State.CC == llvm::CallingConv::X86_FastCall, PaddingType);
 
-    return getIndirectResult(Ty, true, FreeRegs);
+    return getIndirectResult(Ty, true, State);
   }
 
   if (const VectorType *VT = Ty->getAs<VectorType>()) {
@@ -950,7 +965,7 @@
     Ty = EnumTy->getDecl()->getIntegerType();
 
   bool NeedsPadding;
-  bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding);
+  bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
 
   if (Ty->isPromotableIntegerType()) {
     if (InReg)
@@ -963,32 +978,107 @@
 }
 
 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
-  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
-                                          FI.getCallingConvention());
-
-  unsigned CC = FI.getCallingConvention();
-  bool IsFastCall = CC == llvm::CallingConv::X86_FastCall;
-  unsigned FreeRegs;
-  if (IsFastCall)
-    FreeRegs = 2;
+  CCState State(FI.getCallingConvention());
+  if (State.CC == llvm::CallingConv::X86_FastCall)
+    State.FreeRegs = 2;
   else if (FI.getHasRegParm())
-    FreeRegs = FI.getRegParm();
+    State.FreeRegs = FI.getRegParm();
   else
-    FreeRegs = DefaultNumRegisterParameters;
+    State.FreeRegs = DefaultNumRegisterParameters;
 
-  // If the return value is indirect, then the hidden argument is consuming one
-  // integer register.
-  if (FI.getReturnInfo().isIndirect() && FreeRegs) {
-    --FreeRegs;
-    ABIArgInfo &Old = FI.getReturnInfo();
-    Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(),
-                                       Old.getIndirectByVal(),
-                                       Old.getIndirectRealign());
+  FI.getReturnInfo() =
+      classifyReturnType(FI.getReturnType(), State, FI.isInstanceMethod());
+
+  // On win32, use the x86_cdeclmethodcc convention for cdecl methods that use
+  // sret.  This convention swaps the order of the first two parameters behind
+  // the scenes to match MSVC.
+  if (IsWin32StructABI && FI.isInstanceMethod() &&
+      FI.getCallingConvention() == llvm::CallingConv::C &&
+      FI.getReturnInfo().isIndirect())
+    FI.setEffectiveCallingConvention(llvm::CallingConv::X86_CDeclMethod);
+
+  bool UsedInAlloca = false;
+  for (auto &I : FI.arguments()) {
+    I.info = classifyArgumentType(I.type, State);
+    UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
   }
 
-  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-       it != ie; ++it)
-    it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall);
+  // If we needed to use inalloca for any argument, do a second pass and rewrite
+  // all the memory arguments to use inalloca.
+  if (UsedInAlloca)
+    rewriteWithInAlloca(FI);
+}
+
+void
+X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
+                                   unsigned &StackOffset,
+                                   ABIArgInfo &Info, QualType Type) const {
+  // Insert padding bytes to respect alignment.  For x86_32, each argument is 4
+  // byte aligned.
+  unsigned Align = 4U;
+  if (Info.getKind() == ABIArgInfo::Indirect && Info.getIndirectByVal())
+    Align = std::max(Align, Info.getIndirectAlign());
+  if (StackOffset & (Align - 1)) {
+    unsigned OldOffset = StackOffset;
+    StackOffset = llvm::RoundUpToAlignment(StackOffset, Align);
+    unsigned NumBytes = StackOffset - OldOffset;
+    assert(NumBytes);
+    llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
+    Ty = llvm::ArrayType::get(Ty, NumBytes);
+    FrameFields.push_back(Ty);
+  }
+
+  Info = ABIArgInfo::getInAlloca(FrameFields.size());
+  FrameFields.push_back(CGT.ConvertTypeForMem(Type));
+  StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
+}
+
+void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
+  assert(IsWin32StructABI && "inalloca only supported on win32");
+
+  // Build a packed struct type for all of the arguments in memory.
+  SmallVector<llvm::Type *, 6> FrameFields;
+
+  unsigned StackOffset = 0;
+
+  // Put the sret parameter into the inalloca struct if it's in memory.
+  ABIArgInfo &Ret = FI.getReturnInfo();
+  if (Ret.isIndirect() && !Ret.getInReg()) {
+    CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
+    addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
+    // On Windows, the hidden sret parameter is always returned in eax.
+    Ret.setInAllocaSRet(IsWin32StructABI);
+  }
+
+  // Skip the 'this' parameter in ecx.
+  CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
+  if (FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall)
+    ++I;
+
+  // Put arguments passed in memory into the struct.
+  for (; I != E; ++I) {
+
+    // Leave ignored and inreg arguments alone.
+    switch (I->info.getKind()) {
+    case ABIArgInfo::Indirect:
+      assert(I->info.getIndirectByVal());
+      break;
+    case ABIArgInfo::Ignore:
+      continue;
+    case ABIArgInfo::Direct:
+    case ABIArgInfo::Extend:
+      if (I->info.getInReg())
+        continue;
+      break;
+    default:
+      break;
+    }
+
+    addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+  }
+
+  FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
+                                        /*isPacked=*/true));
 }
 
 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -1219,10 +1309,10 @@
     return false;
   }
 
-  virtual void computeInfo(CGFunctionInfo &FI) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
 
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
@@ -1233,10 +1323,10 @@
 public:
   WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
 
-  virtual void computeInfo(CGFunctionInfo &FI) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
 
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -1248,12 +1338,12 @@
     return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
   }
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
     return 7;
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const {
+                               llvm::Value *Address) const override {
     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
 
     // 0-15 are the 16 integer registers.
@@ -1264,12 +1354,12 @@
 
   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
                                   StringRef Constraint,
-                                  llvm::Type* Ty) const {
+                                  llvm::Type* Ty) const override {
     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
   }
 
   bool isNoProtoCallVariadic(const CallArgList &args,
-                             const FunctionNoProtoType *fnType) const {
+                             const FunctionNoProtoType *fnType) const override {
     // The default CC on x86-64 sets %al to the number of SSA
     // registers used, and GCC sets this when calling an unprototyped
     // function, so we override the default behavior.  However, don't do
@@ -1293,7 +1383,8 @@
     return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
   }
 
-  llvm::Constant *getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
+  llvm::Constant *
+  getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
     unsigned Sig = (0xeb << 0) |  // jmp rel8
                    (0x0a << 8) |  //           .+0x0c
                    ('F' << 16) |
@@ -1319,14 +1410,14 @@
     : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
 
   void getDependentLibraryOption(llvm::StringRef Lib,
-                                 llvm::SmallString<24> &Opt) const {
+                                 llvm::SmallString<24> &Opt) const override {
     Opt = "/DEFAULTLIB:";
     Opt += qualifyWindowsLibrary(Lib);
   }
 
   void getDetectMismatchOption(llvm::StringRef Name,
                                llvm::StringRef Value,
-                               llvm::SmallString<32> &Opt) const {
+                               llvm::SmallString<32> &Opt) const override {
     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
   }
 };
@@ -1336,12 +1427,12 @@
   WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
     : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
     return 7;
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const {
+                               llvm::Value *Address) const override {
     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
 
     // 0-15 are the 16 integer registers.
@@ -1351,14 +1442,14 @@
   }
 
   void getDependentLibraryOption(llvm::StringRef Lib,
-                                 llvm::SmallString<24> &Opt) const {
+                                 llvm::SmallString<24> &Opt) const override {
     Opt = "/DEFAULTLIB:";
     Opt += qualifyWindowsLibrary(Lib);
   }
 
   void getDetectMismatchOption(llvm::StringRef Name,
                                llvm::StringRef Value,
-                               llvm::SmallString<32> &Opt) const {
+                               llvm::SmallString<32> &Opt) const override {
     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
   }
 };
@@ -1642,12 +1733,11 @@
 
     // If this is a C++ record, classify the bases first.
     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
-      for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
-             e = CXXRD->bases_end(); i != e; ++i) {
-        assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+      for (const auto &I : CXXRD->bases()) {
+        assert(!I.isVirtual() && !I.getType()->isDependentType() &&
                "Unexpected base class!");
         const CXXRecordDecl *Base =
-          cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+          cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
         // Classify this field.
         //
@@ -1657,7 +1747,7 @@
         Class FieldLo, FieldHi;
         uint64_t Offset =
           OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
-        classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
+        classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
         Lo = merge(Lo, FieldLo);
         Hi = merge(Hi, FieldHi);
         if (Lo == Memory || Hi == Memory)
@@ -1887,19 +1977,18 @@
 
     // If this is a C++ record, check the bases first.
     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
-      for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
-           e = CXXRD->bases_end(); i != e; ++i) {
-        assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+      for (const auto &I : CXXRD->bases()) {
+        assert(!I.isVirtual() && !I.getType()->isDependentType() &&
                "Unexpected base class!");
         const CXXRecordDecl *Base =
-          cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+          cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
 
         // If the base is after the span we care about, ignore it.
         unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
         if (BaseOffset >= EndBit) continue;
 
         unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
-        if (!BitsContainNoUserData(i->getType(), BaseStart,
+        if (!BitsContainNoUserData(I.getType(), BaseStart,
                                    EndBit-BaseOffset, Context))
           return false;
       }
@@ -2643,7 +2732,7 @@
       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
 
     // FIXME: mingw-w64-gcc emits 128-bit struct as i128
-    if (Size == 128 && getTarget().getTriple().getOS() == llvm::Triple::MinGW32)
+    if (Size == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
                                                           Size));
 
@@ -2668,9 +2757,8 @@
   QualType RetTy = FI.getReturnType();
   FI.getReturnInfo() = classify(RetTy, true);
 
-  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-       it != ie; ++it)
-    it->info = classify(it->type, false);
+  for (auto &I : FI.arguments())
+    I.info = classify(I.type, false);
 }
 
 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -2701,9 +2789,9 @@
  public:
   NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
-  virtual void computeInfo(CGFunctionInfo &FI) const;
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
  private:
   PNaClABIInfo PInfo;  // Used for generating calls with pnaclcall callingconv.
   X86_64ABIInfo NInfo; // Used for everything else.
@@ -2739,13 +2827,13 @@
 public:
   PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
     // This is recovered from gcc output.
     return 1; // r1 is the dedicated stack pointer
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const;
+                               llvm::Value *Address) const override;
 };
 
 }
@@ -2811,29 +2899,27 @@
   // floating-point value) to avoid pushing them to memory on function
   // entry.  This would require changing the logic in PPCISelLowering
   // when lowering the parameters in the caller and args in the callee.
-  virtual void computeInfo(CGFunctionInfo &FI) const {
+  void computeInfo(CGFunctionInfo &FI) const override {
     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-         it != ie; ++it) {
+    for (auto &I : FI.arguments()) {
       // We rely on the default argument classification for the most part.
       // One exception:  An aggregate containing a single floating-point
       // or vector item must be passed in a register if one is available.
-      const Type *T = isSingleElementStruct(it->type, getContext());
+      const Type *T = isSingleElementStruct(I.type, getContext());
       if (T) {
         const BuiltinType *BT = T->getAs<BuiltinType>();
         if (T->isVectorType() || (BT && BT->isFloatingPoint())) {
           QualType QT(T, 0);
-          it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
+          I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
           continue;
         }
       }
-      it->info = classifyArgumentType(it->type);
+      I.info = classifyArgumentType(I.type);
     }
   }
 
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 
-                                 QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -2841,26 +2927,26 @@
   PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
     : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
     // This is recovered from gcc output.
     return 1; // r1 is the dedicated stack pointer
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const;
+                               llvm::Value *Address) const override;
 };
 
 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
 public:
   PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
     // This is recovered from gcc output.
     return 1; // r1 is the dedicated stack pointer
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const;
+                               llvm::Value *Address) const override;
 };
 
 }
@@ -3049,6 +3135,569 @@
 }
 
 //===----------------------------------------------------------------------===//
+// ARM64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARM64ABIInfo : public ABIInfo {
+public:
+  enum ABIKind {
+    AAPCS = 0,
+    DarwinPCS
+  };
+
+private:
+  ABIKind Kind;
+
+public:
+  ARM64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
+
+private:
+  ABIKind getABIKind() const { return Kind; }
+  bool isDarwinPCS() const { return Kind == DarwinPCS; }
+
+  ABIArgInfo classifyReturnType(QualType RetTy) const;
+  ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &AllocatedVFP,
+                                  bool &IsHA, unsigned &AllocatedGPR,
+                                  bool &IsSmallAggr) const;
+  bool isIllegalVectorType(QualType Ty) const;
+
+  virtual void computeInfo(CGFunctionInfo &FI) const {
+    // To correctly handle Homogeneous Aggregate, we need to keep track of the
+    // number of SIMD and Floating-point registers allocated so far.
+    // If the argument is an HFA or an HVA and there are sufficient unallocated
+    // SIMD and Floating-point registers, then the argument is allocated to SIMD
+    // and Floating-point Registers (with one register per member of the HFA or
+    // HVA). Otherwise, the NSRN is set to 8.
+    unsigned AllocatedVFP = 0;
+    // To correctly handle small aggregates, we need to keep track of the number
+    // of GPRs allocated so far. If the small aggregate can't all fit into
+    // registers, it will be on stack. We don't allow the aggregate to be
+    // partially in registers.
+    unsigned AllocatedGPR = 0;
+    FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+         it != ie; ++it) {
+      unsigned PreAllocation = AllocatedVFP, PreGPR = AllocatedGPR;
+      bool IsHA = false, IsSmallAggr = false;
+      const unsigned NumVFPs = 8;
+      const unsigned NumGPRs = 8;
+      it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
+                                      AllocatedGPR, IsSmallAggr);
+      // If we do not have enough VFP registers for the HA, any VFP registers
+      // that are unallocated are marked as unavailable. To achieve this, we add
+      // padding of (NumVFPs - PreAllocation) floats.
+      if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
+        llvm::Type *PaddingTy = llvm::ArrayType::get(
+            llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
+        if (isDarwinPCS())
+          it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
+        else {
+          // Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
+          // as sequences of floats since they'll get "holes" inserted as
+          // padding by the back end.
+          uint32_t NumStackSlots = getContext().getTypeSize(it->type);
+          NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
+
+          llvm::Type *CoerceTy = llvm::ArrayType::get(
+              llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
+          it->info = ABIArgInfo::getDirect(CoerceTy, 0, PaddingTy);
+        }
+      }
+      // If we do not have enough GPRs for the small aggregate, any GPR regs
+      // that are unallocated are marked as unavailable.
+      if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {
+        llvm::Type *PaddingTy = llvm::ArrayType::get(
+            llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreGPR);
+        it->info =
+            ABIArgInfo::getDirect(it->info.getCoerceToType(), 0, PaddingTy);
+      }
+    }
+  }
+
+  llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
+                               CodeGenFunction &CGF) const;
+
+  llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
+                              CodeGenFunction &CGF) const;
+
+  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                 CodeGenFunction &CGF) const {
+    return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
+                         : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
+  }
+};
+
+class ARM64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  ARM64TargetCodeGenInfo(CodeGenTypes &CGT, ARM64ABIInfo::ABIKind Kind)
+      : TargetCodeGenInfo(new ARM64ABIInfo(CGT, Kind)) {}
+
+  StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+    return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
+  }
+
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
+
+  virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
+};
+}
+
+static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+                                   ASTContext &Context,
+                                   uint64_t *HAMembers = 0);
+
+ABIArgInfo ARM64ABIInfo::classifyArgumentType(QualType Ty,
+                                              unsigned &AllocatedVFP,
+                                              bool &IsHA,
+                                              unsigned &AllocatedGPR,
+                                              bool &IsSmallAggr) const {
+  // Handle illegal vector types here.
+  if (isIllegalVectorType(Ty)) {
+    uint64_t Size = getContext().getTypeSize(Ty);
+    if (Size <= 32) {
+      llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
+      AllocatedGPR++;
+      return ABIArgInfo::getDirect(ResType);
+    }
+    if (Size == 64) {
+      llvm::Type *ResType =
+          llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
+      AllocatedVFP++;
+      return ABIArgInfo::getDirect(ResType);
+    }
+    if (Size == 128) {
+      llvm::Type *ResType =
+          llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
+      AllocatedVFP++;
+      return ABIArgInfo::getDirect(ResType);
+    }
+    AllocatedGPR++;
+    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+  }
+  if (Ty->isVectorType())
+    // Size of a legal vector should be either 64 or 128.
+    AllocatedVFP++;
+  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+    if (BT->getKind() == BuiltinType::Half ||
+        BT->getKind() == BuiltinType::Float ||
+        BT->getKind() == BuiltinType::Double ||
+        BT->getKind() == BuiltinType::LongDouble)
+      AllocatedVFP++;
+  }
+
+  if (!isAggregateTypeForABI(Ty)) {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+      Ty = EnumTy->getDecl()->getIntegerType();
+
+    if (!Ty->isFloatingType() && !Ty->isVectorType()) {
+      int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
+      AllocatedGPR += RegsNeeded;
+    }
+    return (Ty->isPromotableIntegerType() && isDarwinPCS()
+                ? ABIArgInfo::getExtend()
+                : ABIArgInfo::getDirect());
+  }
+
+  // Structures with either a non-trivial destructor or a non-trivial
+  // copy constructor are always indirect.
+  if (isRecordReturnIndirect(Ty, getCXXABI())) {
+    AllocatedGPR++;
+    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+  }
+
+  // Empty records are always ignored on Darwin, but actually passed in C++ mode
+  // elsewhere for GNU compatibility.
+  if (isEmptyRecord(getContext(), Ty, true)) {
+    if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
+      return ABIArgInfo::getIgnore();
+
+    ++AllocatedGPR;
+    return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+  }
+
+  // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
+  const Type *Base = 0;
+  uint64_t Members = 0;
+  if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
+    AllocatedVFP += Members;
+    IsHA = true;
+    return ABIArgInfo::getExpand();
+  }
+
+  // Aggregates <= 16 bytes are passed directly in registers or on the stack.
+  uint64_t Size = getContext().getTypeSize(Ty);
+  if (Size <= 128) {
+    Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
+    AllocatedGPR += Size / 64;
+    IsSmallAggr = true;
+    // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
+    // For aggregates with 16-byte alignment, we use i128.
+    if (getContext().getTypeAlign(Ty) < 128 && Size == 128) {
+      llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
+      return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
+    }
+    return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
+  }
+
+  AllocatedGPR++;
+  return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+}
+
+ABIArgInfo ARM64ABIInfo::classifyReturnType(QualType RetTy) const {
+  if (RetTy->isVoidType())
+    return ABIArgInfo::getIgnore();
+
+  // Large vector types should be returned via memory.
+  if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+    return ABIArgInfo::getIndirect(0);
+
+  if (!isAggregateTypeForABI(RetTy)) {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+      RetTy = EnumTy->getDecl()->getIntegerType();
+
+    return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
+                                             : ABIArgInfo::getDirect());
+  }
+
+  // Structures with either a non-trivial destructor or a non-trivial
+  // copy constructor are always indirect.
+  if (isRecordReturnIndirect(RetTy, getCXXABI()))
+    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+  if (isEmptyRecord(getContext(), RetTy, true))
+    return ABIArgInfo::getIgnore();
+
+  const Type *Base = 0;
+  if (isHomogeneousAggregate(RetTy, Base, getContext()))
+    // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
+    return ABIArgInfo::getDirect();
+
+  // Aggregates <= 16 bytes are returned directly in registers or on the stack.
+  uint64_t Size = getContext().getTypeSize(RetTy);
+  if (Size <= 128) {
+    Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
+    return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
+  }
+
+  return ABIArgInfo::getIndirect(0);
+}
+
+/// isIllegalVectorType - check whether the vector type is legal for ARM64.
+bool ARM64ABIInfo::isIllegalVectorType(QualType Ty) const {
+  if (const VectorType *VT = Ty->getAs<VectorType>()) {
+    // Check whether VT is legal.
+    unsigned NumElements = VT->getNumElements();
+    uint64_t Size = getContext().getTypeSize(VT);
+    // NumElements should be power of 2 between 1 and 16.
+    if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
+      return true;
+    return Size != 64 && (Size != 128 || NumElements == 1);
+  }
+  return false;
+}
+
+static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
+                                     int AllocatedGPR, int AllocatedVFP,
+                                     bool IsIndirect, CodeGenFunction &CGF) {
+  // The AArch64 va_list type and handling is specified in the Procedure Call
+  // Standard, section B.4:
+  //
+  // struct {
+  //   void *__stack;
+  //   void *__gr_top;
+  //   void *__vr_top;
+  //   int __gr_offs;
+  //   int __vr_offs;
+  // };
+
+  llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
+  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+  llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
+  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+  auto &Ctx = CGF.getContext();
+
+  llvm::Value *reg_offs_p = 0, *reg_offs = 0;
+  int reg_top_index;
+  int RegSize;
+  if (AllocatedGPR) {
+    assert(!AllocatedVFP && "Arguments never split between int & VFP regs");
+    // 3 is the field number of __gr_offs
+    reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
+    reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
+    reg_top_index = 1; // field number for __gr_top
+    RegSize = 8 * AllocatedGPR;
+  } else {
+    assert(!AllocatedGPR && "Argument must go in VFP or int regs");
+    // 4 is the field number of __vr_offs.
+    reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
+    reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
+    reg_top_index = 2; // field number for __vr_top
+    RegSize = 16 * AllocatedVFP;
+  }
+
+  //=======================================
+  // Find out where argument was passed
+  //=======================================
+
+  // If reg_offs >= 0 we're already using the stack for this type of
+  // argument. We don't want to keep updating reg_offs (in case it overflows,
+  // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
+  // whatever they get).
+  llvm::Value *UsingStack = 0;
+  UsingStack = CGF.Builder.CreateICmpSGE(
+      reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
+
+  CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
+
+  // Otherwise, at least some kind of argument could go in these registers, the
+  // quesiton is whether this particular type is too big.
+  CGF.EmitBlock(MaybeRegBlock);
+
+  // Integer arguments may need to correct register alignment (for example a
+  // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
+  // align __gr_offs to calculate the potential address.
+  if (AllocatedGPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
+    int Align = Ctx.getTypeAlign(Ty) / 8;
+
+    reg_offs = CGF.Builder.CreateAdd(
+        reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
+        "align_regoffs");
+    reg_offs = CGF.Builder.CreateAnd(
+        reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
+        "aligned_regoffs");
+  }
+
+  // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
+  llvm::Value *NewOffset = 0;
+  NewOffset = CGF.Builder.CreateAdd(
+      reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
+  CGF.Builder.CreateStore(NewOffset, reg_offs_p);
+
+  // Now we're in a position to decide whether this argument really was in
+  // registers or not.
+  llvm::Value *InRegs = 0;
+  InRegs = CGF.Builder.CreateICmpSLE(
+      NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
+
+  CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
+
+  //=======================================
+  // Argument was in registers
+  //=======================================
+
+  // Now we emit the code for if the argument was originally passed in
+  // registers. First start the appropriate block:
+  CGF.EmitBlock(InRegBlock);
+
+  llvm::Value *reg_top_p = 0, *reg_top = 0;
+  reg_top_p =
+      CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
+  reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
+  llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
+  llvm::Value *RegAddr = 0;
+  llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
+
+  if (IsIndirect) {
+    // If it's been passed indirectly (actually a struct), whatever we find from
+    // stored registers or on the stack will actually be a struct **.
+    MemTy = llvm::PointerType::getUnqual(MemTy);
+  }
+
+  const Type *Base = 0;
+  uint64_t NumMembers;
+  if (isHomogeneousAggregate(Ty, Base, Ctx, &NumMembers) && NumMembers > 1) {
+    // Homogeneous aggregates passed in registers will have their elements split
+    // and stored 16-bytes apart regardless of size (they're notionally in qN,
+    // qN+1, ...). We reload and store into a temporary local variable
+    // contiguously.
+    assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
+    llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
+    llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
+    llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
+    int Offset = 0;
+
+    if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
+      Offset = 16 - Ctx.getTypeSize(Base) / 8;
+    for (unsigned i = 0; i < NumMembers; ++i) {
+      llvm::Value *BaseOffset =
+          llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
+      llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
+      LoadAddr = CGF.Builder.CreateBitCast(
+          LoadAddr, llvm::PointerType::getUnqual(BaseTy));
+      llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
+
+      llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
+      CGF.Builder.CreateStore(Elem, StoreAddr);
+    }
+
+    RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
+  } else {
+    // Otherwise the object is contiguous in memory
+    unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
+    if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
+        Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
+      int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
+      BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
+
+      BaseAddr = CGF.Builder.CreateAdd(
+          BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
+
+      BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
+    }
+
+    RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
+  }
+
+  CGF.EmitBranch(ContBlock);
+
+  //=======================================
+  // Argument was on the stack
+  //=======================================
+  CGF.EmitBlock(OnStackBlock);
+
+  llvm::Value *stack_p = 0, *OnStackAddr = 0;
+  stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
+  OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
+
+  // Again, stack arguments may need realigmnent. In this case both integer and
+  // floating-point ones might be affected.
+  if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
+    int Align = Ctx.getTypeAlign(Ty) / 8;
+
+    OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
+
+    OnStackAddr = CGF.Builder.CreateAdd(
+        OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
+        "align_stack");
+    OnStackAddr = CGF.Builder.CreateAnd(
+        OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
+        "align_stack");
+
+    OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
+  }
+
+  uint64_t StackSize;
+  if (IsIndirect)
+    StackSize = 8;
+  else
+    StackSize = Ctx.getTypeSize(Ty) / 8;
+
+  // All stack slots are 8 bytes
+  StackSize = llvm::RoundUpToAlignment(StackSize, 8);
+
+  llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
+  llvm::Value *NewStack =
+      CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
+
+  // Write the new value of __stack for the next call to va_arg
+  CGF.Builder.CreateStore(NewStack, stack_p);
+
+  if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
+      Ctx.getTypeSize(Ty) < 64) {
+    int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
+    OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
+
+    OnStackAddr = CGF.Builder.CreateAdd(
+        OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
+
+    OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
+  }
+
+  OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
+
+  CGF.EmitBranch(ContBlock);
+
+  //=======================================
+  // Tidy up
+  //=======================================
+  CGF.EmitBlock(ContBlock);
+
+  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
+  ResAddr->addIncoming(RegAddr, InRegBlock);
+  ResAddr->addIncoming(OnStackAddr, OnStackBlock);
+
+  if (IsIndirect)
+    return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
+
+  return ResAddr;
+}
+
+llvm::Value *ARM64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                          CodeGenFunction &CGF) const {
+
+  unsigned AllocatedGPR = 0, AllocatedVFP = 0;
+  bool IsHA = false, IsSmallAggr = false;
+  ABIArgInfo AI =
+      classifyArgumentType(Ty, AllocatedVFP, IsHA, AllocatedGPR, IsSmallAggr);
+
+  return EmitAArch64VAArg(VAListAddr, Ty, AllocatedGPR, AllocatedVFP,
+                          AI.isIndirect(), CGF);
+}
+
+llvm::Value *ARM64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                           CodeGenFunction &CGF) const {
+  // We do not support va_arg for aggregates or illegal vector types.
+  // Lower VAArg here for these cases and use the LLVM va_arg instruction for
+  // other cases.
+  if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
+    return 0;
+
+  uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
+  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+
+  const Type *Base = 0;
+  bool isHA = isHomogeneousAggregate(Ty, Base, getContext());
+
+  bool isIndirect = false;
+  // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
+  // be passed indirectly.
+  if (Size > 16 && !isHA) {
+    isIndirect = true;
+    Size = 8;
+    Align = 8;
+  }
+
+  llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+  CGBuilderTy &Builder = CGF.Builder;
+  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+
+  if (isEmptyRecord(getContext(), Ty, true)) {
+    // These are ignored for parameter passing purposes.
+    llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+    return Builder.CreateBitCast(Addr, PTy);
+  }
+
+  const uint64_t MinABIAlign = 8;
+  if (Align > MinABIAlign) {
+    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
+    Addr = Builder.CreateGEP(Addr, Offset);
+    llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
+    llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
+    llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
+    Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
+  }
+
+  uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
+  llvm::Value *NextAddr = Builder.CreateGEP(
+      Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
+  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+  if (isIndirect)
+    Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
+  llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+  return AddrTyped;
+}
+
+//===----------------------------------------------------------------------===//
 // ARM ABI Implementation
 //===----------------------------------------------------------------------===//
 
@@ -3064,35 +3713,62 @@
 
 private:
   ABIKind Kind;
+  mutable int VFPRegs[16];
+  const unsigned NumVFPs;
+  const unsigned NumGPRs;
+  mutable unsigned AllocatedGPRs;
+  mutable unsigned AllocatedVFPs;
 
 public:
-  ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
+  ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
+    NumVFPs(16), NumGPRs(4) {
     setRuntimeCC();
+    resetAllocatedRegs();
   }
 
   bool isEABI() const {
-    StringRef Env = getTarget().getTriple().getEnvironmentName();
-    return (Env == "gnueabi" || Env == "eabi" ||
-            Env == "android" || Env == "androideabi");
+    switch (getTarget().getTriple().getEnvironment()) {
+    case llvm::Triple::Android:
+    case llvm::Triple::EABI:
+    case llvm::Triple::EABIHF:
+    case llvm::Triple::GNUEABI:
+    case llvm::Triple::GNUEABIHF:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  bool isEABIHF() const {
+    switch (getTarget().getTriple().getEnvironment()) {
+    case llvm::Triple::EABIHF:
+    case llvm::Triple::GNUEABIHF:
+      return true;
+    default:
+      return false;
+    }
   }
 
   ABIKind getABIKind() const { return Kind; }
 
 private:
-  ABIArgInfo classifyReturnType(QualType RetTy) const;
-  ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs,
-                                  unsigned &AllocatedVFP,
-                                  bool &IsHA) const;
+  ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
+  ABIArgInfo classifyArgumentType(QualType RetTy, bool &IsHA, bool isVariadic,
+                                  bool &IsCPRC) const;
   bool isIllegalVectorType(QualType Ty) const;
 
-  virtual void computeInfo(CGFunctionInfo &FI) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
 
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 
   llvm::CallingConv::ID getLLVMDefaultCC() const;
   llvm::CallingConv::ID getABIDefaultCC() const;
   void setRuntimeCC();
+
+  void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
+  void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
+  void resetAllocatedRegs(void) const;
 };
 
 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -3104,16 +3780,16 @@
     return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
   }
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
     return 13;
   }
 
-  StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+  StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
     return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const {
+                               llvm::Value *Address) const override {
     llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
 
     // 0-15 are the 16 integer registers.
@@ -3121,13 +3797,13 @@
     return false;
   }
 
-  unsigned getSizeOfUnwindException() const {
+  unsigned getSizeOfUnwindException() const override {
     if (getABIInfo().isEABI()) return 88;
     return TargetCodeGenInfo::getSizeOfUnwindException();
   }
 
   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
-                           CodeGen::CodeGenModule &CGM) const {
+                           CodeGen::CodeGenModule &CGM) const override {
     const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
     if (!FD)
       return;
@@ -3176,24 +3852,40 @@
   // allocated to the lowest-numbered sequence of such registers.
   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
   // unallocated are marked as unavailable. 
-  unsigned AllocatedVFP = 0;
-  int VFPRegs[16] = { 0 };
-  FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-       it != ie; ++it) {
-    unsigned PreAllocation = AllocatedVFP;
+  resetAllocatedRegs();
+
+  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
+  for (auto &I : FI.arguments()) {
+    unsigned PreAllocationVFPs = AllocatedVFPs;
+    unsigned PreAllocationGPRs = AllocatedGPRs;
     bool IsHA = false;
+    bool IsCPRC = false;
     // 6.1.2.3 There is one VFP co-processor register class using registers
     // s0-s15 (d0-d7) for passing arguments.
-    const unsigned NumVFPs = 16;
-    it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA);
+    I.info = classifyArgumentType(I.type, IsHA, FI.isVariadic(), IsCPRC);
+    assert((IsCPRC || !IsHA) && "Homogeneous aggregates must be CPRCs");
     // If we do not have enough VFP registers for the HA, any VFP registers
     // that are unallocated are marked as unavailable. To achieve this, we add
-    // padding of (NumVFPs - PreAllocation) floats.
-    if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
+    // padding of (NumVFPs - PreAllocationVFP) floats.
+    // Note that IsHA will only be set when using the AAPCS-VFP calling convention,
+    // and the callee is not variadic.
+    if (IsHA && AllocatedVFPs > NumVFPs && PreAllocationVFPs < NumVFPs) {
       llvm::Type *PaddingTy = llvm::ArrayType::get(
-          llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
-      it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
+          llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocationVFPs);
+      I.info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
+    }
+
+    // If we have allocated some arguments onto the stack (due to running
+    // out of VFP registers), we cannot split an argument between GPRs and
+    // the stack. If this situation occurs, we add padding to prevent the
+    // GPRs from being used. In this situiation, the current argument could
+    // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
+    // unusable anyway.
+    const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
+    if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs && StackUsed) {
+      llvm::Type *PaddingTy = llvm::ArrayType::get(
+          llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
+      I.info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
     }
   }
 
@@ -3209,7 +3901,7 @@
 /// Return the default calling convention that LLVM will use.
 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
   // The default calling convention that LLVM will infer.
-  if (getTarget().getTriple().getEnvironmentName()=="gnueabihf")
+  if (isEABIHF())
     return llvm::CallingConv::ARM_AAPCS_VFP;
   else if (isEABI())
     return llvm::CallingConv::ARM_AAPCS;
@@ -3243,8 +3935,7 @@
 /// contained in the type is returned through it; this is used for the
 /// recursive calls that check aggregate component types.
 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
-                                   ASTContext &Context,
-                                   uint64_t *HAMembers = 0) {
+                                   ASTContext &Context, uint64_t *HAMembers) {
   uint64_t Members = 0;
   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
     if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
@@ -3256,9 +3947,7 @@
       return false;
 
     Members = 0;
-    for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
-         i != e; ++i) {
-      const FieldDecl *FD = *i;
+    for (const auto *FD : RD->fields()) {
       uint64_t FldMembers;
       if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
         return false;
@@ -3293,10 +3982,29 @@
     const Type *TyPtr = Ty.getTypePtr();
     if (!Base)
       Base = TyPtr;
-    if (Base != TyPtr &&
-        (!Base->isVectorType() || !TyPtr->isVectorType() ||
-         Context.getTypeSize(Base) != Context.getTypeSize(TyPtr)))
-      return false;
+
+    if (Base != TyPtr) {
+      // Homogeneous aggregates are defined as containing members with the
+      // same machine type. There are two cases in which two members have
+      // different TypePtrs but the same machine type:
+
+      // 1) Vectors of the same length, regardless of the type and number
+      //    of their members.
+      const bool SameLengthVectors = Base->isVectorType() && TyPtr->isVectorType()
+        && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
+
+      // 2) In the 32-bit AAPCS, `double' and `long double' have the same
+      //    machine type. This is not the case for the 64-bit AAPCS.
+      const bool SameSizeDoubles =
+           (   (   Base->isSpecificBuiltinType(BuiltinType::Double)
+                && TyPtr->isSpecificBuiltinType(BuiltinType::LongDouble))
+            || (   Base->isSpecificBuiltinType(BuiltinType::LongDouble)
+                && TyPtr->isSpecificBuiltinType(BuiltinType::Double)))
+        && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
+
+      if (!SameLengthVectors && !SameSizeDoubles)
+        return false;
+    }
   }
 
   // Homogeneous Aggregates can have at most 4 members of the base type.
@@ -3308,12 +4016,15 @@
 
 /// markAllocatedVFPs - update VFPRegs according to the alignment and
 /// number of VFP registers (unit is S register) requested.
-static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP,
-                              unsigned Alignment,
-                              unsigned NumRequired) {
+void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
+                                   unsigned NumRequired) const {
   // Early Exit.
-  if (AllocatedVFP >= 16)
+  if (AllocatedVFPs >= 16) {
+    // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
+    // the stack.
+    AllocatedVFPs = 17;
     return;
+  }
   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
   // VFP registers of the appropriate type unallocated then the argument is
   // allocated to the lowest-numbered sequence of such registers.
@@ -3327,7 +4038,7 @@
     if (FoundSlot) {
       for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
         VFPRegs[J] = 1;
-      AllocatedVFP += NumRequired;
+      AllocatedVFPs += NumRequired;
       return;
     }
   }
@@ -3335,12 +4046,32 @@
   // unallocated are marked as unavailable.
   for (unsigned I = 0; I < 16; I++)
     VFPRegs[I] = 1;
-  AllocatedVFP = 17; // We do not have enough VFP registers.
+  AllocatedVFPs = 17; // We do not have enough VFP registers.
 }
 
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs,
-                                            unsigned &AllocatedVFP,
-                                            bool &IsHA) const {
+/// Update AllocatedGPRs to record the number of general purpose registers
+/// which have been allocated. It is valid for AllocatedGPRs to go above 4,
+/// this represents arguments being stored on the stack.
+void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
+                                          unsigned NumRequired) const {
+  assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
+
+  if (Alignment == 2 && AllocatedGPRs & 0x1)
+    AllocatedGPRs += 1;
+
+  AllocatedGPRs += NumRequired;
+}
+
+void ARMABIInfo::resetAllocatedRegs(void) const {
+  AllocatedGPRs = 0;
+  AllocatedVFPs = 0;
+  for (unsigned i = 0; i < NumVFPs; ++i)
+    VFPRegs[i] = 0;
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool &IsHA,
+                                            bool isVariadic,
+                                            bool &IsCPRC) const {
   // We update number of allocated VFPs according to
   // 6.1.2.1 The following argument types are VFP CPRCs:
   //   A single-precision floating-point type (including promoted
@@ -3356,55 +4087,82 @@
     if (Size <= 32) {
       llvm::Type *ResType =
           llvm::Type::getInt32Ty(getVMContext());
+      markAllocatedGPRs(1, 1);
       return ABIArgInfo::getDirect(ResType);
     }
     if (Size == 64) {
       llvm::Type *ResType = llvm::VectorType::get(
           llvm::Type::getInt32Ty(getVMContext()), 2);
-      markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
+      if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
+        markAllocatedGPRs(2, 2);
+      } else {
+        markAllocatedVFPs(2, 2);
+        IsCPRC = true;
+      }
       return ABIArgInfo::getDirect(ResType);
     }
     if (Size == 128) {
       llvm::Type *ResType = llvm::VectorType::get(
           llvm::Type::getInt32Ty(getVMContext()), 4);
-      markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4);
+      if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
+        markAllocatedGPRs(2, 4);
+      } else {
+        markAllocatedVFPs(4, 4);
+        IsCPRC = true;
+      }
       return ABIArgInfo::getDirect(ResType);
     }
+    markAllocatedGPRs(1, 1);
     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
   }
   // Update VFPRegs for legal vector types.
-  if (const VectorType *VT = Ty->getAs<VectorType>()) {
-    uint64_t Size = getContext().getTypeSize(VT);
-    // Size of a legal vector should be power of 2 and above 64.
-    markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32);
+  if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
+    if (const VectorType *VT = Ty->getAs<VectorType>()) {
+      uint64_t Size = getContext().getTypeSize(VT);
+      // Size of a legal vector should be power of 2 and above 64.
+      markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
+      IsCPRC = true;
+    }
   }
   // Update VFPRegs for floating point types.
-  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
-    if (BT->getKind() == BuiltinType::Half ||
-        BT->getKind() == BuiltinType::Float)
-      markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1);
-    if (BT->getKind() == BuiltinType::Double ||
-        BT->getKind() == BuiltinType::LongDouble)
-      markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
+  if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
+    if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+      if (BT->getKind() == BuiltinType::Half ||
+          BT->getKind() == BuiltinType::Float) {
+        markAllocatedVFPs(1, 1);
+        IsCPRC = true;
+      }
+      if (BT->getKind() == BuiltinType::Double ||
+          BT->getKind() == BuiltinType::LongDouble) {
+        markAllocatedVFPs(2, 2);
+        IsCPRC = true;
+      }
+    }
   }
 
   if (!isAggregateTypeForABI(Ty)) {
     // Treat an enum type as its underlying type.
-    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+    if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
       Ty = EnumTy->getDecl()->getIntegerType();
+    }
 
+    unsigned Size = getContext().getTypeSize(Ty);
+    if (!IsCPRC)
+      markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
     return (Ty->isPromotableIntegerType() ?
             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
   }
 
-  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+    markAllocatedGPRs(1, 1);
     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+  }
 
   // Ignore empty records.
   if (isEmptyRecord(getContext(), Ty, true))
     return ABIArgInfo::getIgnore();
 
-  if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
+  if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
     // Homogeneous Aggregates need to be expanded when we can fit the aggregate
     // into VFP registers.
     const Type *Base = 0;
@@ -3415,16 +4173,17 @@
       if (Base->isVectorType()) {
         // ElementSize is in number of floats.
         unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
-        markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize,
+        markAllocatedVFPs(ElementSize,
                           Members * ElementSize);
       } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
-        markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members);
+        markAllocatedVFPs(1, Members);
       else {
         assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
                Base->isSpecificBuiltinType(BuiltinType::LongDouble));
-        markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2);
+        markAllocatedVFPs(2, Members * 2);
       }
       IsHA = true;
+      IsCPRC = true;
       return ABIArgInfo::getExpand();
     }
   }
@@ -3439,7 +4198,9 @@
       getABIKind() == ARMABIInfo::AAPCS)
     ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
   if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
-    return ABIArgInfo::getIndirect(0, /*ByVal=*/true,
+      // Update Allocated GPRs
+    markAllocatedGPRs(1, 1);
+    return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
            /*Realign=*/TyAlign > ABIAlign);
   }
 
@@ -3451,9 +4212,11 @@
   if (getContext().getTypeAlign(Ty) <= 32) {
     ElemTy = llvm::Type::getInt32Ty(getVMContext());
     SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+    markAllocatedGPRs(1, SizeRegs);
   } else {
     ElemTy = llvm::Type::getInt64Ty(getVMContext());
     SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
+    markAllocatedGPRs(2, SizeRegs * 2);
   }
 
   llvm::Type *STy =
@@ -3546,13 +4309,16 @@
   return true;
 }
 
-ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
+                                          bool isVariadic) const {
   if (RetTy->isVoidType())
     return ABIArgInfo::getIgnore();
 
   // Large vector types should be returned via memory.
-  if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+  if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
+    markAllocatedGPRs(1, 1);
     return ABIArgInfo::getIndirect(0);
+  }
 
   if (!isAggregateTypeForABI(RetTy)) {
     // Treat an enum type as its underlying type.
@@ -3565,8 +4331,10 @@
 
   // Structures with either a non-trivial destructor or a non-trivial
   // copy constructor are always indirect.
-  if (isRecordReturnIndirect(RetTy, getCXXABI()))
+  if (isRecordReturnIndirect(RetTy, getCXXABI())) {
+    markAllocatedGPRs(1, 1);
     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+  }
 
   // Are we following APCS?
   if (getABIKind() == APCS) {
@@ -3593,6 +4361,7 @@
     }
 
     // Otherwise return in memory.
+    markAllocatedGPRs(1, 1);
     return ABIArgInfo::getIndirect(0);
   }
 
@@ -3602,7 +4371,7 @@
     return ABIArgInfo::getIgnore();
 
   // Check for homogeneous aggregates with AAPCS-VFP.
-  if (getABIKind() == AAPCS_VFP) {
+  if (getABIKind() == AAPCS_VFP && !isVariadic) {
     const Type *Base = 0;
     if (isHomogeneousAggregate(RetTy, Base, getContext())) {
       assert(Base && "Base class should be set for homogeneous aggregate");
@@ -3623,6 +4392,7 @@
     return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
   }
 
+  markAllocatedGPRs(1, 1);
   return ABIArgInfo::getIndirect(0);
 }
 
@@ -3720,9 +4490,9 @@
  public:
   NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
-  virtual void computeInfo(CGFunctionInfo &FI) const;
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
  private:
   PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
   ARMABIInfo NInfo; // Used for everything else.
@@ -3770,10 +4540,10 @@
   ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt,
                         llvm::Type *DirectTy = 0) const;
 
-  virtual void computeInfo(CGFunctionInfo &FI) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
 
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -3785,12 +4555,12 @@
     return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
   }
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
     return 31;
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const {
+                               llvm::Value *Address) const override {
     // 0-31 are x0-x30 and sp: 8 bytes each
     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31);
@@ -3813,9 +4583,8 @@
                                            FreeIntRegs, FreeVFPRegs);
 
   FreeIntRegs = FreeVFPRegs = 8;
-  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-       it != ie; ++it) {
-    it->info = classifyGenericType(it->type, FreeIntRegs, FreeVFPRegs);
+  for (auto &I : FI.arguments()) {
+    I.info = classifyGenericType(I.type, FreeIntRegs, FreeVFPRegs);
 
   }
 }
@@ -3958,208 +4727,12 @@
 
 llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
                                        CodeGenFunction &CGF) const {
-  // The AArch64 va_list type and handling is specified in the Procedure Call
-  // Standard, section B.4:
-  //
-  // struct {
-  //   void *__stack;
-  //   void *__gr_top;
-  //   void *__vr_top;
-  //   int __gr_offs;
-  //   int __vr_offs;
-  // };
-
-  assert(!CGF.CGM.getDataLayout().isBigEndian()
-         && "va_arg not implemented for big-endian AArch64");
-
   int FreeIntRegs = 8, FreeVFPRegs = 8;
   Ty = CGF.getContext().getCanonicalType(Ty);
   ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs);
 
-  llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
-  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
-  llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
-  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
-
-  llvm::Value *reg_offs_p = 0, *reg_offs = 0;
-  int reg_top_index;
-  int RegSize;
-  if (FreeIntRegs < 8) {
-    assert(FreeVFPRegs == 8 && "Arguments never split between int & VFP regs");
-    // 3 is the field number of __gr_offs
-    reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
-    reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
-    reg_top_index = 1; // field number for __gr_top
-    RegSize = 8 * (8 - FreeIntRegs);
-  } else {
-    assert(FreeVFPRegs < 8 && "Argument must go in VFP or int regs");
-    // 4 is the field number of __vr_offs.
-    reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
-    reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
-    reg_top_index = 2; // field number for __vr_top
-    RegSize = 16 * (8 - FreeVFPRegs);
-  }
-
-  //=======================================
-  // Find out where argument was passed
-  //=======================================
-
-  // If reg_offs >= 0 we're already using the stack for this type of
-  // argument. We don't want to keep updating reg_offs (in case it overflows,
-  // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
-  // whatever they get).
-  llvm::Value *UsingStack = 0;
-  UsingStack = CGF.Builder.CreateICmpSGE(reg_offs,
-                                         llvm::ConstantInt::get(CGF.Int32Ty, 0));
-
-  CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
-
-  // Otherwise, at least some kind of argument could go in these registers, the
-  // quesiton is whether this particular type is too big.
-  CGF.EmitBlock(MaybeRegBlock);
-
-  // Integer arguments may need to correct register alignment (for example a
-  // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
-  // align __gr_offs to calculate the potential address.
-  if (FreeIntRegs < 8 && AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
-    int Align = getContext().getTypeAlign(Ty) / 8;
-
-    reg_offs = CGF.Builder.CreateAdd(reg_offs,
-                                 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
-                                 "align_regoffs");
-    reg_offs = CGF.Builder.CreateAnd(reg_offs,
-                                    llvm::ConstantInt::get(CGF.Int32Ty, -Align),
-                                    "aligned_regoffs");
-  }
-
-  // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
-  llvm::Value *NewOffset = 0;
-  NewOffset = CGF.Builder.CreateAdd(reg_offs,
-                                    llvm::ConstantInt::get(CGF.Int32Ty, RegSize),
-                                    "new_reg_offs");
-  CGF.Builder.CreateStore(NewOffset, reg_offs_p);
-
-  // Now we're in a position to decide whether this argument really was in
-  // registers or not.
-  llvm::Value *InRegs = 0;
-  InRegs = CGF.Builder.CreateICmpSLE(NewOffset,
-                                     llvm::ConstantInt::get(CGF.Int32Ty, 0),
-                                     "inreg");
-
-  CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
-
-  //=======================================
-  // Argument was in registers
-  //=======================================
-
-  // Now we emit the code for if the argument was originally passed in
-  // registers. First start the appropriate block:
-  CGF.EmitBlock(InRegBlock);
-
-  llvm::Value *reg_top_p = 0, *reg_top = 0;
-  reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
-  reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
-  llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
-  llvm::Value *RegAddr = 0;
-  llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
-
-  if (!AI.isDirect()) {
-    // If it's been passed indirectly (actually a struct), whatever we find from
-    // stored registers or on the stack will actually be a struct **.
-    MemTy = llvm::PointerType::getUnqual(MemTy);
-  }
-
-  const Type *Base = 0;
-  uint64_t NumMembers;
-  if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)
-      && NumMembers > 1) {
-    // Homogeneous aggregates passed in registers will have their elements split
-    // and stored 16-bytes apart regardless of size (they're notionally in qN,
-    // qN+1, ...). We reload and store into a temporary local variable
-    // contiguously.
-    assert(AI.isDirect() && "Homogeneous aggregates should be passed directly");
-    llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
-    llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
-    llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
-
-    for (unsigned i = 0; i < NumMembers; ++i) {
-      llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty, 16 * i);
-      llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
-      LoadAddr = CGF.Builder.CreateBitCast(LoadAddr,
-                                           llvm::PointerType::getUnqual(BaseTy));
-      llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
-
-      llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
-      CGF.Builder.CreateStore(Elem, StoreAddr);
-    }
-
-    RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
-  } else {
-    // Otherwise the object is contiguous in memory
-    RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
-  }
-
-  CGF.EmitBranch(ContBlock);
-
-  //=======================================
-  // Argument was on the stack
-  //=======================================
-  CGF.EmitBlock(OnStackBlock);
-
-  llvm::Value *stack_p = 0, *OnStackAddr = 0;
-  stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
-  OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
-
-  // Again, stack arguments may need realigmnent. In this case both integer and
-  // floating-point ones might be affected.
-  if (AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
-    int Align = getContext().getTypeAlign(Ty) / 8;
-
-    OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
-
-    OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr,
-                                 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
-                                 "align_stack");
-    OnStackAddr = CGF.Builder.CreateAnd(OnStackAddr,
-                                    llvm::ConstantInt::get(CGF.Int64Ty, -Align),
-                                    "align_stack");
-
-    OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
-  }
-
-  uint64_t StackSize;
-  if (AI.isDirect())
-    StackSize = getContext().getTypeSize(Ty) / 8;
-  else
-    StackSize = 8;
-
-  // All stack slots are 8 bytes
-  StackSize = llvm::RoundUpToAlignment(StackSize, 8);
-
-  llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
-  llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC,
-                                                "new_stack");
-
-  // Write the new value of __stack for the next call to va_arg
-  CGF.Builder.CreateStore(NewStack, stack_p);
-
-  OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
-
-  CGF.EmitBranch(ContBlock);
-
-  //=======================================
-  // Tidy up
-  //=======================================
-  CGF.EmitBlock(ContBlock);
-
-  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
-  ResAddr->addIncoming(RegAddr, InRegBlock);
-  ResAddr->addIncoming(OnStackAddr, OnStackBlock);
-
-  if (AI.isDirect())
-    return ResAddr;
-
-  return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
+  return EmitAArch64VAArg(VAListAddr, Ty, 8 - FreeIntRegs, 8 - FreeVFPRegs,
+                          AI.isIndirect(), CGF);
 }
 
 //===----------------------------------------------------------------------===//
@@ -4175,18 +4748,18 @@
   ABIArgInfo classifyReturnType(QualType RetTy) const;
   ABIArgInfo classifyArgumentType(QualType Ty) const;
 
-  virtual void computeInfo(CGFunctionInfo &FI) const;
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CFG) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CFG) const override;
 };
 
 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
 public:
   NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
     : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
-    
-  virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
-                                   CodeGen::CodeGenModule &M) const;
+
+  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+                           CodeGen::CodeGenModule &M) const override;
 private:
   static void addKernelMetadata(llvm::Function *F);
 };
@@ -4218,9 +4791,8 @@
 
 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-       it != ie; ++it)
-    it->info = classifyArgumentType(it->type);
+  for (auto &I : FI.arguments())
+    I.info = classifyArgumentType(I.type);
 
   // Always honor user-specified calling convention.
   if (FI.getCallingConvention() != llvm::CallingConv::C)
@@ -4259,7 +4831,7 @@
     // CUDA __global__ functions get a kernel metadata entry.  Since
     // __global__ functions cannot be called from the device, we do not
     // need to set the noinline attribute.
-    if (FD->getAttr<CUDAGlobalAttr>())
+    if (FD->hasAttr<CUDAGlobalAttr>())
       addKernelMetadata(F);
   }
 }
@@ -4300,15 +4872,14 @@
   ABIArgInfo classifyReturnType(QualType RetTy) const;
   ABIArgInfo classifyArgumentType(QualType ArgTy) const;
 
-  virtual void computeInfo(CGFunctionInfo &FI) const {
+  void computeInfo(CGFunctionInfo &FI) const override {
     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-         it != ie; ++it)
-      it->info = classifyArgumentType(it->type);
+    for (auto &I : FI.arguments())
+      I.info = classifyArgumentType(I.type);
   }
 
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -4360,9 +4931,8 @@
 
     // If this is a C++ record, check the bases first.
     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
-      for (CXXRecordDecl::base_class_const_iterator I = CXXRD->bases_begin(),
-             E = CXXRD->bases_end(); I != E; ++I) {
-        QualType Base = I->getType();
+      for (const auto &I : CXXRD->bases()) {
+        QualType Base = I.getType();
 
         // Empty bases don't affect things either way.
         if (isEmptyRecord(getContext(), Base, true))
@@ -4376,10 +4946,7 @@
       }
 
     // Check the fields.
-    for (RecordDecl::field_iterator I = RD->field_begin(),
-           E = RD->field_end(); I != E; ++I) {
-      const FieldDecl *FD = *I;
-
+    for (const auto *FD : RD->fields()) {
       // Empty bitfields don't affect things either way.
       // Unlike isSingleElementStruct(), empty structure and array fields
       // do count.  So do anonymous bitfields that aren't zero-sized.
@@ -4452,7 +5019,7 @@
   llvm::Type *IndexTy = RegCount->getType();
   llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
   llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
-						  "fits_in_regs");
+                                                 "fits_in_regs");
 
   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
@@ -4535,15 +5102,22 @@
     return true;
 
   switch (Triple.getOS()) {
-  case llvm::Triple::Cygwin:
-  case llvm::Triple::MinGW32:
   case llvm::Triple::AuroraUX:
   case llvm::Triple::DragonFly:
   case llvm::Triple::FreeBSD:
   case llvm::Triple::OpenBSD:
   case llvm::Triple::Bitrig:
-  case llvm::Triple::Win32:
     return true;
+  case llvm::Triple::Win32:
+    switch (Triple.getEnvironment()) {
+    case llvm::Triple::UnknownEnvironment:
+    case llvm::Triple::Cygnus:
+    case llvm::Triple::GNU:
+    case llvm::Triple::MSVC:
+      return true;
+    default:
+      return false;
+    }
   default:
     return false;
   }
@@ -4611,7 +5185,7 @@
   MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
-                           CodeGen::CodeGenModule &M) const;
+                           CodeGen::CodeGenModule &M) const override;
 };
 
 }
@@ -4660,9 +5234,9 @@
 
   ABIArgInfo classifyReturnType(QualType RetTy) const;
   ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
-  virtual void computeInfo(CGFunctionInfo &FI) const;
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -4672,12 +5246,12 @@
     : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
       SizeOfUnwindException(IsO32 ? 24 : 32) {}
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
     return 29;
   }
 
   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
-                           CodeGen::CodeGenModule &CGM) const {
+                           CodeGen::CodeGenModule &CGM) const override {
     const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
     if (!FD) return;
     llvm::Function *Fn = cast<llvm::Function>(GV);
@@ -4690,9 +5264,9 @@
   }
 
   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
-                               llvm::Value *Address) const;
+                               llvm::Value *Address) const override;
 
-  unsigned getSizeOfUnwindException() const {
+  unsigned getSizeOfUnwindException() const override {
     return SizeOfUnwindException;
   }
 };
@@ -4901,9 +5475,8 @@
   // Check if a pointer to an aggregate is passed as a hidden argument.  
   uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
 
-  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-       it != ie; ++it)
-    it->info = classifyArgumentType(it->type, Offset);
+  for (auto &I : FI.arguments())
+    I.info = classifyArgumentType(I.type, Offset);
 }
 
 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -4984,8 +5557,8 @@
   TCETargetCodeGenInfo(CodeGenTypes &CGT)
     : DefaultTargetCodeGenInfo(CGT) {}
 
-  virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
-                                   CodeGen::CodeGenModule &M) const;
+  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+                           CodeGen::CodeGenModule &M) const override;
 };
 
 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
@@ -5000,9 +5573,8 @@
     if (FD->hasAttr<OpenCLKernelAttr>()) {
       // OpenCL C Kernel functions are not subject to inlining
       F->addFnAttr(llvm::Attribute::NoInline);
-          
-      if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
-
+      const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
+      if (Attr) {
         // Convert the reqd_work_group_size() attributes to metadata.
         llvm::LLVMContext &Context = F->getContext();
         llvm::NamedMDNode *OpenCLMetadata = 
@@ -5012,14 +5584,11 @@
         Operands.push_back(F);
 
         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 
-                             llvm::APInt(32, 
-                             FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
+                             llvm::APInt(32, Attr->getXDim())));
         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
-                             llvm::APInt(32,
-                               FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
+                             llvm::APInt(32, Attr->getYDim())));
         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 
-                             llvm::APInt(32, 
-                               FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
+                             llvm::APInt(32, Attr->getZDim())));
 
         // Add a boolean constant operand for "required" (true) or "hint" (false)
         // for implementing the work_group_size_hint attr later. Currently 
@@ -5050,10 +5619,10 @@
   ABIArgInfo classifyReturnType(QualType RetTy) const;
   ABIArgInfo classifyArgumentType(QualType RetTy) const;
 
-  virtual void computeInfo(CGFunctionInfo &FI) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
 
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -5061,7 +5630,7 @@
   HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
     :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
 
-  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
     return 29;
   }
 };
@@ -5070,9 +5639,8 @@
 
 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-       it != ie; ++it)
-    it->info = classifyArgumentType(it->type);
+  for (auto &I : FI.arguments())
+    I.info = classifyArgumentType(I.type);
 }
 
 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
@@ -5205,9 +5773,9 @@
 
 private:
   ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
-  virtual void computeInfo(CGFunctionInfo &FI) const;
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  void computeInfo(CGFunctionInfo &FI) const override;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 
   // Coercion type builder for structs passed in registers. The coercion type
   // serves two purposes:
@@ -5346,6 +5914,11 @@
   if (!isAggregateTypeForABI(Ty))
     return ABIArgInfo::getDirect();
 
+  // If a C++ object has either a non-trivial copy constructor or a non-trivial
+  // destructor, it is passed with an explicit indirect pointer / sret pointer.
+  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+    return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+
   // This is a small aggregate type that should be passed in registers.
   // Build a coercion type from the LLVM struct type.
   llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
@@ -5382,6 +5955,7 @@
 
   switch (AI.getKind()) {
   case ABIArgInfo::Expand:
+  case ABIArgInfo::InAlloca:
     llvm_unreachable("Unsupported ABI kind for va_arg");
 
   case ABIArgInfo::Extend:
@@ -5417,9 +5991,8 @@
 
 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
   FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
-  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
-       it != ie; ++it)
-    it->info = classifyType(it->type, 16 * 8);
+  for (auto &I : FI.arguments())
+    I.info = classifyType(I.type, 16 * 8);
 }
 
 namespace {
@@ -5427,24 +6000,65 @@
 public:
   SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
     : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
+
+  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+    return 14;
+  }
+
+  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+                               llvm::Value *Address) const override;
 };
 } // end anonymous namespace
 
+bool
+SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+                                                llvm::Value *Address) const {
+  // This is calculated from the LLVM and GCC tables and verified
+  // against gcc output.  AFAIK all ABIs use the same encoding.
+
+  CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+  llvm::IntegerType *i8 = CGF.Int8Ty;
+  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+  llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+
+  // 0-31: the 8-byte general-purpose registers
+  AssignToArrayRange(Builder, Address, Eight8, 0, 31);
+
+  // 32-63: f0-31, the 4-byte floating-point registers
+  AssignToArrayRange(Builder, Address, Four8, 32, 63);
+
+  //   Y   = 64
+  //   PSR = 65
+  //   WIM = 66
+  //   TBR = 67
+  //   PC  = 68
+  //   NPC = 69
+  //   FSR = 70
+  //   CSR = 71
+  AssignToArrayRange(Builder, Address, Eight8, 64, 71);
+   
+  // 72-87: d0-15, the 8-byte floating-point registers
+  AssignToArrayRange(Builder, Address, Eight8, 72, 87);
+
+  return false;
+}
+
 
 //===----------------------------------------------------------------------===//
-// Xcore ABI Implementation
+// XCore ABI Implementation
 //===----------------------------------------------------------------------===//
 namespace {
 class XCoreABIInfo : public DefaultABIInfo {
 public:
   XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
-                                 CodeGenFunction &CGF) const;
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                         CodeGenFunction &CGF) const override;
 };
 
-class XcoreTargetCodeGenInfo : public TargetCodeGenInfo {
+class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
 public:
-  XcoreTargetCodeGenInfo(CodeGenTypes &CGT)
+  XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
     :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
 };
 } // End anonymous namespace.
@@ -5468,6 +6082,7 @@
   uint64_t ArgSize = 0;
   switch (AI.getKind()) {
   case ABIArgInfo::Expand:
+  case ABIArgInfo::InAlloca:
     llvm_unreachable("Unsupported ABI kind for va_arg");
   case ABIArgInfo::Ignore:
     Val = llvm::UndefValue::get(ArgPtrTy);
@@ -5520,11 +6135,22 @@
   case llvm::Triple::mips64el:
     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
 
+  case llvm::Triple::arm64: {
+    ARM64ABIInfo::ABIKind Kind = ARM64ABIInfo::AAPCS;
+    if (strcmp(getTarget().getABI(), "darwinpcs") == 0)
+      Kind = ARM64ABIInfo::DarwinPCS;
+
+    return *(TheTargetCodeGenInfo = new ARM64TargetCodeGenInfo(Types, Kind));
+  }
+
   case llvm::Triple::aarch64:
+  case llvm::Triple::aarch64_be:
     return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types));
 
   case llvm::Triple::arm:
+  case llvm::Triple::armeb:
   case llvm::Triple::thumb:
+  case llvm::Triple::thumbeb:
     {
       ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
       if (strcmp(getTarget().getABI(), "apcs-gnu") == 0)
@@ -5572,7 +6198,7 @@
     bool IsDarwinVectorABI = Triple.isOSDarwin();
     bool IsSmallStructInRegABI =
         X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
-    bool IsWin32FloatStructABI = (Triple.getOS() == llvm::Triple::Win32);
+    bool IsWin32FloatStructABI = Triple.isWindowsMSVCEnvironment();
 
     if (Triple.getOS() == llvm::Triple::Win32) {
       return *(TheTargetCodeGenInfo =
@@ -5610,7 +6236,6 @@
   case llvm::Triple::sparcv9:
     return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
   case llvm::Triple::xcore:
-    return *(TheTargetCodeGenInfo = new XcoreTargetCodeGenInfo(Types));
-
+    return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));
   }
 }
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index f631f31..6c3ab64 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -17,8 +17,8 @@
 
 #include "clang/AST/Type.h"
 #include "clang/Basic/LLVM.h"
-#include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
 
 namespace llvm {
   class Constant;
@@ -123,6 +123,10 @@
       return Ty;
     }
 
+    /// doesReturnSlotInterfereWithArgs - Return true if the target uses an
+    /// argument slot for an 'sret' type.
+    virtual bool doesReturnSlotInterfereWithArgs() const { return true; }
+
     /// Retrieve the address of a function to call immediately before
     /// calling objc_retainAutoreleasedReturnValue.  The
     /// implementation of objc_autoreleaseReturnValue sniffs the
@@ -176,7 +180,7 @@
     /// However, some platforms make the conventions identical except
     /// for passing additional out-of-band information to a variadic
     /// function: for example, x86-64 passes the number of SSE
-    /// arguments in %al.  On these platforms, it is desireable to
+    /// arguments in %al.  On these platforms, it is desirable to
     /// call unprototyped functions using the variadic convention so
     /// that unprototyped calls to varargs functions still succeed.
     ///
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp
index ddd2d59..86a48fd 100644
--- a/lib/Driver/Action.cpp
+++ b/lib/Driver/Action.cpp
@@ -33,7 +33,8 @@
   case LinkJobClass: return "linker";
   case LipoJobClass: return "lipo";
   case DsymutilJobClass: return "dsymutil";
-  case VerifyJobClass: return "verify";
+  case VerifyDebugInfoJobClass: return "verify-debug-info";
+  case VerifyPCHJobClass: return "verify-pch";
   }
 
   llvm_unreachable("invalid class");
@@ -117,6 +118,29 @@
 
 void VerifyJobAction::anchor() {}
 
-VerifyJobAction::VerifyJobAction(ActionList &Inputs, types::ID Type)
-  : JobAction(VerifyJobClass, Inputs, Type) {
+VerifyJobAction::VerifyJobAction(ActionClass Kind, Action *Input,
+                                 types::ID Type)
+    : JobAction(Kind, Input, Type) {
+  assert((Kind == VerifyDebugInfoJobClass || Kind == VerifyPCHJobClass) &&
+         "ActionClass is not a valid VerifyJobAction");
+}
+
+VerifyJobAction::VerifyJobAction(ActionClass Kind, ActionList &Inputs,
+                                 types::ID Type)
+    : JobAction(Kind, Inputs, Type) {
+  assert((Kind == VerifyDebugInfoJobClass || Kind == VerifyPCHJobClass) &&
+           "ActionClass is not a valid VerifyJobAction");
+}
+
+void VerifyDebugInfoJobAction::anchor() {}
+
+VerifyDebugInfoJobAction::VerifyDebugInfoJobAction(Action *Input,
+                                                   types::ID Type)
+    : VerifyJobAction(VerifyDebugInfoJobClass, Input, Type) {
+}
+
+void VerifyPCHJobAction::anchor() {}
+
+VerifyPCHJobAction::VerifyPCHJobAction(Action *Input, types::ID Type)
+    : VerifyJobAction(VerifyPCHJobClass, Input, Type) {
 }
diff --git a/lib/Driver/Android.mk b/lib/Driver/Android.mk
index f84aec6..559ca83 100644
--- a/lib/Driver/Android.mk
+++ b/lib/Driver/Android.mk
@@ -6,6 +6,7 @@
 include $(CLEAR_TBLGEN_VARS)
 
 TBLGEN_TABLES := \
+  AttrVisitor.inc \
   DiagnosticCommonKinds.inc \
   DiagnosticDriverKinds.inc \
   DiagnosticSemaKinds.inc \
@@ -20,6 +21,7 @@
   Driver.cpp \
   DriverOptions.cpp \
   Job.cpp \
+  Multilib.cpp \
   Phases.cpp \
   SanitizerArgs.cpp \
   Tool.cpp \
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
index 0152b19..f2bdaee 100644
--- a/lib/Driver/CMakeLists.txt
+++ b/lib/Driver/CMakeLists.txt
@@ -1,3 +1,9 @@
+set(LLVM_LINK_COMPONENTS
+  Option
+  Support
+  TransformUtils
+  )
+
 add_clang_library(clangDriver
   Action.cpp
   CC1AsOptions.cpp
@@ -5,6 +11,7 @@
   Driver.cpp
   DriverOptions.cpp
   Job.cpp
+  Multilib.cpp
   Phases.cpp
   SanitizerArgs.cpp
   Tool.cpp
@@ -13,18 +20,11 @@
   WindowsToolChain.cpp
   Tools.cpp
   Types.cpp
-  )
 
-add_dependencies(clangDriver
-  ClangAttrList
+  DEPENDS
   ClangCC1AsOptions
-  ClangDiagnosticCommon
-  ClangDiagnosticDriver
   ClangDriverOptions
-  )
 
-target_link_libraries(clangDriver
+  LINK_LIBS
   clangBasic
-  LLVMOption
-  LLVMTransformUtils
   )
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index f077fd6..a20a2bd 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -70,8 +70,6 @@
 }
 
 bool Compilation::CleanupFile(const char *File, bool IssueErrors) const {
-  std::string P(File);
-
   // FIXME: Why are we trying to remove files that we have not created? For
   // example we should only try to remove a temporary assembly file if
   // "clang -cc1" succeed in writing it. Was this a workaround for when
@@ -137,7 +135,8 @@
     if (getDriver().CCPrintOptions && getDriver().CCPrintOptionsFilename) {
       std::string Error;
       OS = new llvm::raw_fd_ostream(getDriver().CCPrintOptionsFilename, Error,
-                                    llvm::sys::fs::F_Append);
+                                    llvm::sys::fs::F_Append |
+                                        llvm::sys::fs::F_Text);
       if (!Error.empty()) {
         getDriver().Diag(clang::diag::err_drv_cc_print_options_failure)
           << Error;
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index 5307910..317b822 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -19,15 +19,14 @@
 #include "clang/Driver/Tool.h"
 #include "clang/Driver/ToolChain.h"
 #include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/StringSet.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/Option/Arg.h"
 #include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptSpecifier.h"
 #include "llvm/Option/OptTable.h"
 #include "llvm/Option/Option.h"
-#include "llvm/Option/OptSpecifier.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/FileSystem.h"
@@ -36,6 +35,7 @@
 #include "llvm/Support/Program.h"
 #include "llvm/Support/raw_ostream.h"
 #include <map>
+#include <memory>
 
 // FIXME: It would prevent us from including llvm-config.h
 // if config.h were included before system_error.h.
@@ -77,10 +77,7 @@
 Driver::~Driver() {
   delete Opts;
 
-  for (llvm::StringMap<ToolChain *>::iterator I = ToolChains.begin(),
-                                              E = ToolChains.end();
-       I != E; ++I)
-    delete I->second;
+  llvm::DeleteContainerSeconds(ToolChains);
 }
 
 void Driver::ParseDriverMode(ArrayRef<const char *> Args) {
@@ -112,7 +109,7 @@
 
   unsigned IncludedFlagsBitmask;
   unsigned ExcludedFlagsBitmask;
-  llvm::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
+  std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
     getIncludeExcludeOptionFlagMasks();
 
   unsigned MissingArgIndex, MissingArgCount;
@@ -159,15 +156,17 @@
   Arg *PhaseArg = 0;
   phases::ID FinalPhase;
 
-  // -{E,M,MM} only run the preprocessor.
+  // -{E,M,MM} and /P only run the preprocessor.
   if (CCCIsCPP() ||
       (PhaseArg = DAL.getLastArg(options::OPT_E)) ||
-      (PhaseArg = DAL.getLastArg(options::OPT_M, options::OPT_MM))) {
+      (PhaseArg = DAL.getLastArg(options::OPT_M, options::OPT_MM)) ||
+      (PhaseArg = DAL.getLastArg(options::OPT__SLASH_P))) {
     FinalPhase = phases::Preprocess;
 
     // -{fsyntax-only,-analyze,emit-ast,S} only run up to the compiler.
   } else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
              (PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
+             (PhaseArg = DAL.getLastArg(options::OPT_verify_pch)) ||
              (PhaseArg = DAL.getLastArg(options::OPT_rewrite_objc)) ||
              (PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
              (PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
@@ -336,9 +335,10 @@
   // FIXME: DefaultTargetTriple is used by the target-prefixed calls to as/ld
   // and getToolChain is const.
   if (IsCLMode()) {
-    // clang-cl targets Win32.
+    // clang-cl targets MSVC-style Win32.
     llvm::Triple T(DefaultTargetTriple);
-    T.setOSName(llvm::Triple::getOSTypeName(llvm::Triple::Win32));
+    T.setOS(llvm::Triple::Win32);
+    T.setEnvironment(llvm::Triple::MSVC);
     DefaultTargetTriple = T.str();
   }
   if (const Arg *A = Args->getLastArg(options::OPT_target))
@@ -378,8 +378,8 @@
   BuildInputs(C->getDefaultToolChain(), *TranslatedArgs, Inputs);
 
   // Construct the list of abstract actions to perform for this compilation. On
-  // Darwin target OSes this uses the driver-driver and universal actions.
-  if (TC.getTriple().isOSDarwin())
+  // MachO targets this uses the driver-driver and universal actions.
+  if (TC.getTriple().isOSBinFormatMachO())
     BuildUniversalActions(C->getDefaultToolChain(), C->getArgs(),
                           Inputs, C->getActions());
   else
@@ -491,7 +491,7 @@
   // Construct the list of abstract actions to perform for this compilation. On
   // Darwin OSes this uses the driver-driver and builds universal actions.
   const ToolChain &TC = C.getDefaultToolChain();
-  if (TC.getTriple().isOSDarwin())
+  if (TC.getTriple().isOSBinFormatMachO())
     BuildUniversalActions(TC, C.getArgs(), Inputs, C.getActions());
   else
     BuildActions(TC, C.getArgs(), Inputs, C.getActions());
@@ -523,8 +523,7 @@
       std::string Err;
       std::string Script = StringRef(*it).rsplit('.').first;
       Script += ".sh";
-      llvm::raw_fd_ostream ScriptOS(
-          Script.c_str(), Err, llvm::sys::fs::F_Excl | llvm::sys::fs::F_Binary);
+      llvm::raw_fd_ostream ScriptOS(Script.c_str(), Err, llvm::sys::fs::F_Excl);
       if (!Err.empty()) {
         Diag(clang::diag::note_drv_command_failed_diag_msg)
           << "Error generating run script: " + Script + " " + Err;
@@ -621,7 +620,7 @@
 void Driver::PrintHelp(bool ShowHidden) const {
   unsigned IncludedFlagsBitmask;
   unsigned ExcludedFlagsBitmask;
-  llvm::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
+  std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
     getIncludeExcludeOptionFlagMasks();
 
   ExcludedFlagsBitmask |= options::NoDriverOption;
@@ -746,54 +745,37 @@
   }
 
   if (C.getArgs().hasArg(options::OPT_print_multi_lib)) {
-    // FIXME: We need tool chain support for this.
-    llvm::outs() << ".;\n";
+    const MultilibSet &Multilibs = TC.getMultilibs();
 
-    switch (C.getDefaultToolChain().getTriple().getArch()) {
-    default:
-      break;
-
-    case llvm::Triple::x86_64:
-      llvm::outs() << "x86_64;@m64" << "\n";
-      break;
-
-    case llvm::Triple::ppc64:
-      llvm::outs() << "ppc64;@m64" << "\n";
-      break;
-
-    case llvm::Triple::ppc64le:
-      llvm::outs() << "ppc64le;@m64" << "\n";
-      break;
+    for (MultilibSet::const_iterator I = Multilibs.begin(), E = Multilibs.end();
+         I != E; ++I) {
+      llvm::outs() << *I << "\n";
     }
     return false;
   }
 
-  // FIXME: What is the difference between print-multi-directory and
-  // print-multi-os-directory?
-  if (C.getArgs().hasArg(options::OPT_print_multi_directory) ||
-      C.getArgs().hasArg(options::OPT_print_multi_os_directory)) {
-    switch (C.getDefaultToolChain().getTriple().getArch()) {
-    default:
-    case llvm::Triple::x86:
-    case llvm::Triple::ppc:
-      llvm::outs() << "." << "\n";
-      break;
-
-    case llvm::Triple::x86_64:
-      llvm::outs() << "x86_64" << "\n";
-      break;
-
-    case llvm::Triple::ppc64:
-      llvm::outs() << "ppc64" << "\n";
-      break;
-
-    case llvm::Triple::ppc64le:
-      llvm::outs() << "ppc64le" << "\n";
-      break;
+  if (C.getArgs().hasArg(options::OPT_print_multi_directory)) {
+    const MultilibSet &Multilibs = TC.getMultilibs();
+    for (MultilibSet::const_iterator I = Multilibs.begin(), E = Multilibs.end();
+         I != E; ++I) {
+      if (I->gccSuffix().empty())
+        llvm::outs() << ".\n";
+      else {
+        StringRef Suffix(I->gccSuffix());
+        assert(Suffix.front() == '/');
+        llvm::outs() << Suffix.substr(1) << "\n";
+      }
     }
     return false;
   }
 
+  if (C.getArgs().hasArg(options::OPT_print_multi_os_directory)) {
+    // FIXME: This should print out "lib/../lib", "lib/../lib64", or
+    // "lib/../lib32" as appropriate for the toolchain. For now, print
+    // nothing because it's not supported yet.
+    return false;
+  }
+
   return true;
 }
 
@@ -867,7 +849,7 @@
       // Validate the option here; we don't save the type here because its
       // particular spelling may participate in other driver choices.
       llvm::Triple::ArchType Arch =
-        tools::darwin::getArchTypeForDarwinArchName(A->getValue());
+        tools::darwin::getArchTypeForMachOArchName(A->getValue());
       if (Arch == llvm::Triple::UnknownArch) {
         Diag(clang::diag::err_drv_invalid_arch_name)
           << A->getAsString(Args);
@@ -934,13 +916,12 @@
         Actions.push_back(new DsymutilJobAction(Inputs, types::TY_dSYM));
       }
 
-      // Verify the output (debug information only) if we passed '-verify'.
-      if (Args.hasArg(options::OPT_verify)) {
-        ActionList VerifyInputs;
-        VerifyInputs.push_back(Actions.back());
+      // Verify the debug info output.
+      if (Args.hasArg(options::OPT_verify_debug_info)) {
+        Action *VerifyInput = Actions.back();
         Actions.pop_back();
-        Actions.push_back(new VerifyJobAction(VerifyInputs,
-                                              types::TY_Nothing));
+        Actions.push_back(new VerifyDebugInfoJobAction(VerifyInput,
+                                                       types::TY_Nothing));
       }
     }
   }
@@ -948,7 +929,7 @@
 
 /// \brief Check that the file referenced by Value exists. If it doesn't,
 /// issue a diagnostic and return false.
-static bool DiagnoseInputExistance(const Driver &D, const DerivedArgList &Args,
+static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
                                    StringRef Value) {
   if (!D.getCheckInputsExist())
     return true;
@@ -1029,7 +1010,8 @@
           // Otherwise emit an error but still use a valid type to avoid
           // spurious errors (e.g., no inputs).
           if (!Args.hasArgNoClaim(options::OPT_E) && !CCCIsCPP())
-            Diag(clang::diag::err_drv_unknown_stdin_type);
+            Diag(IsCLMode() ? clang::diag::err_drv_unknown_stdin_type_clang_cl
+                            : clang::diag::err_drv_unknown_stdin_type);
           Ty = types::TY_C;
         } else {
           // Otherwise lookup by extension.
@@ -1075,19 +1057,19 @@
         Ty = InputType;
       }
 
-      if (DiagnoseInputExistance(*this, Args, Value))
+      if (DiagnoseInputExistence(*this, Args, Value))
         Inputs.push_back(std::make_pair(Ty, A));
 
     } else if (A->getOption().matches(options::OPT__SLASH_Tc)) {
       StringRef Value = A->getValue();
-      if (DiagnoseInputExistance(*this, Args, Value)) {
+      if (DiagnoseInputExistence(*this, Args, Value)) {
         Arg *InputArg = MakeInputArg(Args, Opts, A->getValue());
         Inputs.push_back(std::make_pair(types::TY_C, InputArg));
       }
       A->claim();
     } else if (A->getOption().matches(options::OPT__SLASH_Tp)) {
       StringRef Value = A->getValue();
-      if (DiagnoseInputExistance(*this, Args, Value)) {
+      if (DiagnoseInputExistence(*this, Args, Value)) {
         Arg *InputArg = MakeInputArg(Args, Opts, A->getValue());
         Inputs.push_back(std::make_pair(types::TY_CXX, InputArg));
       }
@@ -1177,7 +1159,7 @@
 
   // Construct the actions to perform.
   ActionList LinkerInputs;
-  ActionList SplitInputs;
+
   llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
   for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
     types::ID InputType = Inputs[i].first;
@@ -1211,18 +1193,18 @@
         Diag(clang::diag::warn_drv_preprocessed_input_file_unused)
           << InputArg->getAsString(Args)
           << !!FinalPhaseArg
-          << FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "";
+          << (FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "");
       else
         Diag(clang::diag::warn_drv_input_file_unused)
           << InputArg->getAsString(Args)
           << getPhaseName(InitialPhase)
           << !!FinalPhaseArg
-          << FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "";
+          << (FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "");
       continue;
     }
 
     // Build the pipeline for this file.
-    OwningPtr<Action> Current(new InputAction(*InputArg, InputType));
+    std::unique_ptr<Action> Current(new InputAction(*InputArg, InputType));
     for (SmallVectorImpl<phases::ID>::iterator
            i = PL.begin(), e = PL.end(); i != e; ++i) {
       phases::ID Phase = *i;
@@ -1234,7 +1216,7 @@
       // Queue linker inputs.
       if (Phase == phases::Link) {
         assert((i + 1) == e && "linking must be final compilation step.");
-        LinkerInputs.push_back(Current.take());
+        LinkerInputs.push_back(Current.release());
         break;
       }
 
@@ -1245,14 +1227,14 @@
         continue;
 
       // Otherwise construct the appropriate action.
-      Current.reset(ConstructPhaseAction(Args, Phase, Current.take()));
+      Current.reset(ConstructPhaseAction(Args, Phase, Current.release()));
       if (Current->getType() == types::TY_Nothing)
         break;
     }
 
     // If we ended with something, add to the output list.
     if (Current)
-      Actions.push_back(Current.take());
+      Actions.push_back(Current.release());
   }
 
   // Add a link action if necessary.
@@ -1314,6 +1296,8 @@
       return new CompileJobAction(Input, types::TY_AST);
     } else if (Args.hasArg(options::OPT_module_file_info)) {
       return new CompileJobAction(Input, types::TY_ModuleFile);
+    } else if (Args.hasArg(options::OPT_verify_pch)) {
+      return new VerifyPCHJobAction(Input, types::TY_Nothing);
     } else if (IsUsingLTO(Args)) {
       types::ID Output =
         Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
@@ -1362,7 +1346,7 @@
 
   // Collect the list of architectures.
   llvm::StringSet<> ArchNames;
-  if (C.getDefaultToolChain().getTriple().isOSDarwin()) {
+  if (C.getDefaultToolChain().getTriple().isOSBinFormatMachO()) {
     for (ArgList::const_iterator it = C.getArgs().begin(), ie = C.getArgs().end();
          it != ie; ++it) {
       Arg *A = *it;
@@ -1456,6 +1440,7 @@
 
   if (TC->useIntegratedAs() &&
       !C.getArgs().hasArg(options::OPT_save_temps) &&
+      !C.getArgs().hasArg(options::OPT_via_file_asm) &&
       !C.getArgs().hasArg(options::OPT__SLASH_FA) &&
       !C.getArgs().hasArg(options::OPT__SLASH_Fa) &&
       isa<AssembleJobAction>(JA) &&
@@ -1626,6 +1611,14 @@
       return C.addResultFile(FinalOutput->getValue(), &JA);
   }
 
+  // For /P, preprocess to file named after BaseInput.
+  if (C.getArgs().hasArg(options::OPT__SLASH_P)) {
+    assert(AtTopLevel && isa<PreprocessJobAction>(JA));
+    StringRef BaseName = llvm::sys::path::filename(BaseInput);
+    return C.addResultFile(MakeCLOutputFilename(C.getArgs(), "", BaseName,
+                                                types::TY_PP_C), &JA);
+  }
+
   // Default to writing to stdout?
   if (AtTopLevel && !CCGenDiagnostics &&
       (isa<PreprocessJobAction>(JA) || JA.getType() == types::TY_ModuleFile))
@@ -1854,21 +1847,18 @@
 
   llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
 
-  // Handle Darwin-specific options available here.
-  if (Target.isOSDarwin()) {
+  // Handle Apple-specific options available here.
+  if (Target.isOSBinFormatMachO()) {
     // If an explict Darwin arch name is given, that trumps all.
     if (!DarwinArchName.empty()) {
-      Target.setArch(
-        tools::darwin::getArchTypeForDarwinArchName(DarwinArchName));
+      tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName);
       return Target;
     }
 
     // Handle the Darwin '-arch' flag.
     if (Arg *A = Args.getLastArg(options::OPT_arch)) {
-      llvm::Triple::ArchType DarwinArch
-        = tools::darwin::getArchTypeForDarwinArchName(A->getValue());
-      if (DarwinArch != llvm::Triple::UnknownArch)
-        Target.setArch(DarwinArch);
+      StringRef ArchName = A->getValue();
+      tools::darwin::setTripleTypeForMachOArchName(Target, ArchName);
     }
   }
 
@@ -1893,20 +1883,23 @@
       Target.getOS() == llvm::Triple::Minix)
     return Target;
 
-  // Handle pseudo-target flags '-m32' and '-m64'.
-  // FIXME: Should this information be in llvm::Triple?
-  if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
-    if (A->getOption().matches(options::OPT_m32)) {
-      if (Target.getArch() == llvm::Triple::x86_64)
-        Target.setArch(llvm::Triple::x86);
-      if (Target.getArch() == llvm::Triple::ppc64)
-        Target.setArch(llvm::Triple::ppc);
-    } else {
-      if (Target.getArch() == llvm::Triple::x86)
-        Target.setArch(llvm::Triple::x86_64);
-      if (Target.getArch() == llvm::Triple::ppc)
-        Target.setArch(llvm::Triple::ppc64);
+  // Handle pseudo-target flags '-m64', '-m32' and '-m16'.
+  if (Arg *A = Args.getLastArg(options::OPT_m64, options::OPT_m32,
+                               options::OPT_m16)) {
+    llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
+
+    if (A->getOption().matches(options::OPT_m64))
+      AT = Target.get64BitArchVariant().getArch();
+    else if (A->getOption().matches(options::OPT_m32))
+      AT = Target.get32BitArchVariant().getArch();
+    else if (A->getOption().matches(options::OPT_m16) &&
+             Target.get32BitArchVariant().getArch() == llvm::Triple::x86) {
+      AT = llvm::Triple::x86;
+      Target.setEnvironment(llvm::Triple::CODE16);
     }
+
+    if (AT != llvm::Triple::UnknownArch)
+      Target.setArch(AT);
   }
 
   return Target;
@@ -1926,13 +1919,7 @@
     case llvm::Triple::Darwin:
     case llvm::Triple::MacOSX:
     case llvm::Triple::IOS:
-      if (Target.getArch() == llvm::Triple::x86 ||
-          Target.getArch() == llvm::Triple::x86_64 ||
-          Target.getArch() == llvm::Triple::arm ||
-          Target.getArch() == llvm::Triple::thumb)
-        TC = new toolchains::DarwinClang(*this, Target, Args);
-      else
-        TC = new toolchains::Darwin_Generic_GCC(*this, Target, Args);
+      TC = new toolchains::DarwinClang(*this, Target, Args);
       break;
     case llvm::Triple::DragonFly:
       TC = new toolchains::DragonFly(*this, Target, Args);
@@ -1962,10 +1949,29 @@
       TC = new toolchains::Solaris(*this, Target, Args);
       break;
     case llvm::Triple::Win32:
-      TC = new toolchains::Windows(*this, Target, Args);
+      switch (Target.getEnvironment()) {
+      default:
+        if (Target.isOSBinFormatELF())
+          TC = new toolchains::Generic_ELF(*this, Target, Args);
+        else if (Target.isOSBinFormatMachO())
+          TC = new toolchains::MachO(*this, Target, Args);
+        else
+          TC = new toolchains::Generic_GCC(*this, Target, Args);
+        break;
+      case llvm::Triple::GNU:
+        // FIXME: We need a MinGW toolchain.  Use the default Generic_GCC
+        // toolchain for now as the default case would below otherwise.
+        if (Target.isOSBinFormatELF())
+          TC = new toolchains::Generic_ELF(*this, Target, Args);
+        else
+          TC = new toolchains::Generic_GCC(*this, Target, Args);
+        break;
+      case llvm::Triple::MSVC:
+      case llvm::Triple::UnknownEnvironment:
+        TC = new toolchains::Windows(*this, Target, Args);
+        break;
+      }
       break;
-    case llvm::Triple::MinGW32:
-      // FIXME: We need a MinGW toolchain. Fallthrough for now.
     default:
       // TCE is an OSless target
       if (Target.getArchName() == "tce") {
@@ -1981,6 +1987,14 @@
         TC = new toolchains::XCore(*this, Target, Args);
         break;
       }
+      if (Target.isOSBinFormatELF()) {
+        TC = new toolchains::Generic_ELF(*this, Target, Args);
+        break;
+      }
+      if (Target.getObjectFormat() == llvm::Triple::MachO) {
+        TC = new toolchains::MachO(*this, Target, Args);
+        break;
+      }
       TC = new toolchains::Generic_GCC(*this, Target, Args);
       break;
     }
@@ -2055,3 +2069,7 @@
 
   return std::make_pair(IncludedFlagsBitmask, ExcludedFlagsBitmask);
 }
+
+bool clang::driver::isOptimizationLevelFast(const llvm::opt::ArgList &Args) {
+  return Args.hasFlag(options::OPT_Ofast, options::OPT_O_Group, false);
+}
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
index ee68e6f..2775d89 100644
--- a/lib/Driver/Job.cpp
+++ b/lib/Driver/Job.cpp
@@ -7,7 +7,11 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
 #include "clang/Driver/Job.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/StringSwitch.h"
@@ -159,6 +163,9 @@
   if (ExecutionFailed)
     *ExecutionFailed = false;
 
+  const Driver &D = getCreator().getToolChain().getDriver();
+  D.Diag(diag::warn_drv_invoking_fallback) << Fallback->getExecutable();
+
   int SecondaryStatus = Fallback->Execute(Redirects, ErrMsg, ExecutionFailed);
   return SecondaryStatus;
 }
diff --git a/lib/Driver/Multilib.cpp b/lib/Driver/Multilib.cpp
new file mode 100644
index 0000000..910e638
--- /dev/null
+++ b/lib/Driver/Multilib.cpp
@@ -0,0 +1,351 @@
+//===--- Multilib.cpp - Multilib Implementation ---------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Multilib.h"
+#include "Tools.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+using namespace clang::driver;
+using namespace clang;
+using namespace llvm::opt;
+using namespace llvm::sys;
+
+/// normalize Segment to "/foo/bar" or "".
+static void normalizePathSegment(std::string &Segment) {
+  StringRef seg = Segment;
+
+  // Prune trailing "/" or "./"
+  while (1) {
+    StringRef last = *--path::end(seg);
+    if (last != ".")
+      break;
+    seg = path::parent_path(seg);
+  }
+
+  if (seg.empty() || seg == "/") {
+    Segment = "";
+    return;
+  }
+
+  // Add leading '/'
+  if (seg.front() != '/') {
+    Segment = "/" + seg.str();
+  } else {
+    Segment = seg;
+  }
+}
+
+Multilib::Multilib(StringRef GCCSuffix, StringRef OSSuffix,
+                   StringRef IncludeSuffix)
+    : GCCSuffix(GCCSuffix), OSSuffix(OSSuffix), IncludeSuffix(IncludeSuffix) {
+  normalizePathSegment(this->GCCSuffix);
+  normalizePathSegment(this->OSSuffix);
+  normalizePathSegment(this->IncludeSuffix);
+}
+
+Multilib &Multilib::gccSuffix(StringRef S) {
+  GCCSuffix = S;
+  normalizePathSegment(GCCSuffix);
+  return *this;
+}
+
+Multilib &Multilib::osSuffix(StringRef S) {
+  OSSuffix = S;
+  normalizePathSegment(OSSuffix);
+  return *this;
+}
+
+Multilib &Multilib::includeSuffix(StringRef S) {
+  IncludeSuffix = S;
+  normalizePathSegment(IncludeSuffix);
+  return *this;
+}
+
+void Multilib::print(raw_ostream &OS) const {
+  assert(GCCSuffix.empty() || (StringRef(GCCSuffix).front() == '/'));
+  if (GCCSuffix.empty())
+    OS << ".";
+  else {
+    OS << StringRef(GCCSuffix).drop_front();
+  }
+  OS << ";";
+  for (flags_list::const_iterator I = Flags.begin(), E = Flags.end(); I != E;
+       ++I) {
+    if (StringRef(*I).front() == '+')
+      OS << "@" << I->substr(1);
+  }
+}
+
+bool Multilib::isValid() const {
+  llvm::StringMap<int> FlagSet;
+  for (unsigned I = 0, N = Flags.size(); I != N; ++I) {
+    StringRef Flag(Flags[I]);
+    llvm::StringMap<int>::iterator SI = FlagSet.find(Flag.substr(1));
+
+    assert(StringRef(Flag).front() == '+' || StringRef(Flag).front() == '-');
+
+    if (SI == FlagSet.end())
+      FlagSet[Flag.substr(1)] = I;
+    else if (Flags[I] != Flags[SI->getValue()])
+      return false;
+  }
+  return true;
+}
+
+bool Multilib::operator==(const Multilib &Other) const {
+  // Check whether the flags sets match
+  // allowing for the match to be order invariant
+  llvm::StringSet<> MyFlags;
+  for (flags_list::const_iterator I = Flags.begin(), E = Flags.end(); I != E;
+       ++I) {
+    MyFlags.insert(*I);
+  }
+  for (flags_list::const_iterator I = Other.Flags.begin(),
+                                  E = Other.Flags.end();
+       I != E; ++I) {
+    if (MyFlags.find(*I) == MyFlags.end())
+      return false;
+  }
+
+  if (osSuffix() != Other.osSuffix())
+    return false;
+
+  if (gccSuffix() != Other.gccSuffix())
+    return false;
+
+  if (includeSuffix() != Other.includeSuffix())
+    return false;
+
+  return true;
+}
+
+raw_ostream &clang::driver::operator<<(raw_ostream &OS, const Multilib &M) {
+  M.print(OS);
+  return OS;
+}
+
+MultilibSet &MultilibSet::Maybe(const Multilib &M) {
+  Multilib Opposite;
+  // Negate any '+' flags
+  for (Multilib::flags_list::const_iterator I = M.flags().begin(),
+                                            E = M.flags().end();
+       I != E; ++I) {
+    StringRef Flag(*I);
+    if (Flag.front() == '+')
+      Opposite.flags().push_back(("-" + Flag.substr(1)).str());
+  }
+  return Either(M, Opposite);
+}
+
+MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2) {
+  std::vector<Multilib> Ms;
+  Ms.push_back(M1);
+  Ms.push_back(M2);
+  return Either(Ms);
+}
+
+MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
+                                 const Multilib &M3) {
+  std::vector<Multilib> Ms;
+  Ms.push_back(M1);
+  Ms.push_back(M2);
+  Ms.push_back(M3);
+  return Either(Ms);
+}
+
+MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
+                                 const Multilib &M3, const Multilib &M4) {
+  std::vector<Multilib> Ms;
+  Ms.push_back(M1);
+  Ms.push_back(M2);
+  Ms.push_back(M3);
+  Ms.push_back(M4);
+  return Either(Ms);
+}
+
+MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
+                                 const Multilib &M3, const Multilib &M4,
+                                 const Multilib &M5) {
+  std::vector<Multilib> Ms;
+  Ms.push_back(M1);
+  Ms.push_back(M2);
+  Ms.push_back(M3);
+  Ms.push_back(M4);
+  Ms.push_back(M5);
+  return Either(Ms);
+}
+
+static Multilib compose(const Multilib &Base, const Multilib &New) {
+  SmallString<128> GCCSuffix;
+  llvm::sys::path::append(GCCSuffix, "/", Base.gccSuffix(), New.gccSuffix());
+  SmallString<128> OSSuffix;
+  llvm::sys::path::append(OSSuffix, "/", Base.osSuffix(), New.osSuffix());
+  SmallString<128> IncludeSuffix;
+  llvm::sys::path::append(IncludeSuffix, "/", Base.includeSuffix(),
+                          New.includeSuffix());
+
+  Multilib Composed(GCCSuffix.str(), OSSuffix.str(), IncludeSuffix.str());
+
+  Multilib::flags_list &Flags = Composed.flags();
+
+  Flags.insert(Flags.end(), Base.flags().begin(), Base.flags().end());
+  Flags.insert(Flags.end(), New.flags().begin(), New.flags().end());
+
+  return Composed;
+}
+
+MultilibSet &
+MultilibSet::Either(const std::vector<Multilib> &MultilibSegments) {
+  multilib_list Composed;
+
+  if (Multilibs.empty())
+    Multilibs.insert(Multilibs.end(), MultilibSegments.begin(),
+                     MultilibSegments.end());
+  else {
+    for (std::vector<Multilib>::const_iterator NewI = MultilibSegments.begin(),
+                                               NewE = MultilibSegments.end();
+         NewI != NewE; ++NewI) {
+      for (const_iterator BaseI = begin(), BaseE = end(); BaseI != BaseE;
+           ++BaseI) {
+        Multilib MO = compose(*BaseI, *NewI);
+        if (MO.isValid())
+          Composed.push_back(MO);
+      }
+    }
+
+    Multilibs = Composed;
+  }
+
+  return *this;
+}
+
+MultilibSet &MultilibSet::FilterOut(const MultilibSet::FilterCallback &F) {
+  filterInPlace(F, Multilibs);
+  return *this;
+}
+
+MultilibSet &MultilibSet::FilterOut(std::string Regex) {
+  class REFilter : public MultilibSet::FilterCallback {
+    mutable llvm::Regex R;
+
+  public:
+    REFilter(std::string Regex) : R(Regex) {}
+    bool operator()(const Multilib &M) const override {
+      std::string Error;
+      if (!R.isValid(Error)) {
+        llvm::errs() << Error;
+        assert(false);
+        return false;
+      }
+      return R.match(M.gccSuffix());
+    }
+  };
+
+  REFilter REF(Regex);
+  filterInPlace(REF, Multilibs);
+  return *this;
+}
+
+void MultilibSet::push_back(const Multilib &M) { Multilibs.push_back(M); }
+
+void MultilibSet::combineWith(const MultilibSet &Other) {
+  Multilibs.insert(Multilibs.end(), Other.begin(), Other.end());
+}
+
+bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const {
+  class FilterFlagsMismatch : public MultilibSet::FilterCallback {
+    llvm::StringMap<bool> FlagSet;
+
+  public:
+    FilterFlagsMismatch(const std::vector<std::string> &Flags) {
+      // Stuff all of the flags into the FlagSet such that a true mappend
+      // indicates the flag was enabled, and a false mappend indicates the
+      // flag was disabled
+      for (Multilib::flags_list::const_iterator I = Flags.begin(),
+                                                E = Flags.end();
+           I != E; ++I) {
+        FlagSet[StringRef(*I).substr(1)] = isFlagEnabled(*I);
+      }
+    }
+    bool operator()(const Multilib &M) const override {
+      for (Multilib::flags_list::const_iterator I = M.flags().begin(),
+                                                E = M.flags().end();
+           I != E; ++I) {
+        StringRef Flag(*I);
+        llvm::StringMap<bool>::const_iterator SI = FlagSet.find(Flag.substr(1));
+        if (SI != FlagSet.end())
+          if ((*SI).getValue() != isFlagEnabled(Flag))
+            return true;
+      }
+      return false;
+    }
+  private:
+    bool isFlagEnabled(StringRef Flag) const {
+      char Indicator = Flag.front();
+      assert(Indicator == '+' || Indicator == '-');
+      return Indicator == '+';
+    }
+  };
+
+  FilterFlagsMismatch FlagsMismatch(Flags);
+
+  multilib_list Filtered = filterCopy(FlagsMismatch, Multilibs);
+
+  if (Filtered.size() == 0) {
+    return false;
+  } else if (Filtered.size() == 1) {
+    M = Filtered[0];
+    return true;
+  }
+
+  // TODO: pick the "best" multlib when more than one is suitable
+  assert(false);
+
+  return false;
+}
+
+void MultilibSet::print(raw_ostream &OS) const {
+  for (const_iterator I = begin(), E = end(); I != E; ++I)
+    OS << *I << "\n";
+}
+
+MultilibSet::multilib_list
+MultilibSet::filterCopy(const MultilibSet::FilterCallback &F,
+                        const multilib_list &Ms) {
+  multilib_list Copy(Ms);
+  filterInPlace(F, Copy);
+  return Copy;
+}
+
+void MultilibSet::filterInPlace(const MultilibSet::FilterCallback &F,
+                                multilib_list &Ms) {
+  Ms.erase(std::remove_if(Ms.begin(), Ms.end(),
+                          [&F](const Multilib &M) { return F(M); }),
+           Ms.end());
+}
+
+raw_ostream &clang::driver::operator<<(raw_ostream &OS, const MultilibSet &MS) {
+  MS.print(OS);
+  return OS;
+}
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index 43209f0..78c8d18 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -7,16 +7,16 @@
 //
 //===----------------------------------------------------------------------===//
 #include "clang/Driver/SanitizerArgs.h"
-
 #include "clang/Driver/Driver.h"
 #include "clang/Driver/DriverDiagnostic.h"
 #include "clang/Driver/Options.h"
 #include "clang/Driver/ToolChain.h"
-#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Transforms/Utils/SpecialCaseList.h"
+#include <memory>
 
 using namespace clang::driver;
 using namespace llvm::opt;
@@ -24,9 +24,10 @@
 void SanitizerArgs::clear() {
   Kind = 0;
   BlacklistFile = "";
-  MsanTrackOrigins = false;
+  MsanTrackOrigins = 0;
   AsanZeroBaseShadow = false;
   UbsanTrapOnError = false;
+  AsanSharedRuntime = false;
 }
 
 SanitizerArgs::SanitizerArgs() {
@@ -72,30 +73,14 @@
   }
 
   UbsanTrapOnError =
-    Args.hasArg(options::OPT_fcatch_undefined_behavior) ||
     Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error,
                  options::OPT_fno_sanitize_undefined_trap_on_error, false);
 
-  if (Args.hasArg(options::OPT_fcatch_undefined_behavior) &&
-      !Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error,
-                    options::OPT_fno_sanitize_undefined_trap_on_error, true)) {
-    D.Diag(diag::err_drv_argument_not_allowed_with)
-      << "-fcatch-undefined-behavior"
-      << "-fno-sanitize-undefined-trap-on-error";
-  }
-
   // Warn about undefined sanitizer options that require runtime support.
   if (UbsanTrapOnError && notAllowedWithTrap()) {
-    if (Args.hasArg(options::OPT_fcatch_undefined_behavior))
-      D.Diag(diag::err_drv_argument_not_allowed_with)
-        << lastArgumentForKind(D, Args, NotAllowedWithTrap)
-        << "-fcatch-undefined-behavior";
-    else if (Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error,
-                          options::OPT_fno_sanitize_undefined_trap_on_error,
-                          false))
-      D.Diag(diag::err_drv_argument_not_allowed_with)
-        << lastArgumentForKind(D, Args, NotAllowedWithTrap)
-        << "-fsanitize-undefined-trap-on-error";
+    D.Diag(diag::err_drv_argument_not_allowed_with)
+      << lastArgumentForKind(D, Args, NotAllowedWithTrap)
+      << "-fsanitize-undefined-trap-on-error";
   }
 
   // Only one runtime library can be used at once.
@@ -123,7 +108,7 @@
     D.Diag(diag::err_drv_argument_not_allowed_with)
       << lastArgumentForKind(D, Args, NeedsLeakDetection)
       << lastArgumentForKind(D, Args, NeedsMsanRt);
-  // FIXME: Currenly -fsanitize=leak is silently ignored in the presence of
+  // FIXME: Currently -fsanitize=leak is silently ignored in the presence of
   // -fsanitize=address. Perhaps it should print an error, or perhaps
   // -f(-no)sanitize=leak should change whether leak detection is enabled by
   // default in ASan?
@@ -144,7 +129,7 @@
       if (llvm::sys::fs::exists(BLPath)) {
         // Validate the blacklist format.
         std::string BLError;
-        llvm::OwningPtr<llvm::SpecialCaseList> SCL(
+        std::unique_ptr<llvm::SpecialCaseList> SCL(
             llvm::SpecialCaseList::create(BLPath, BLError));
         if (!SCL.get())
           D.Diag(diag::err_drv_malformed_sanitizer_blacklist) << BLError;
@@ -163,28 +148,34 @@
       BlacklistFile = BLPath;
   }
 
-  // Parse -f(no-)sanitize-memory-track-origins options.
-  if (NeedsMsan)
-    MsanTrackOrigins =
-      Args.hasFlag(options::OPT_fsanitize_memory_track_origins,
-                   options::OPT_fno_sanitize_memory_track_origins,
-                   /* Default */false);
-
-  // Parse -f(no-)sanitize-address-zero-base-shadow options.
-  if (NeedsAsan) {
-    bool IsAndroid = (TC.getTriple().getEnvironment() == llvm::Triple::Android);
-    bool ZeroBaseShadowDefault = IsAndroid;
-    AsanZeroBaseShadow =
-        Args.hasFlag(options::OPT_fsanitize_address_zero_base_shadow,
-                     options::OPT_fno_sanitize_address_zero_base_shadow,
-                     ZeroBaseShadowDefault);
-    // Zero-base shadow is a requirement on Android.
-    if (IsAndroid && !AsanZeroBaseShadow) {
-      D.Diag(diag::err_drv_argument_not_allowed_with)
-          << "-fno-sanitize-address-zero-base-shadow"
-          << lastArgumentForKind(D, Args, Address);
+  // Parse -f[no-]sanitize-memory-track-origins[=level] options.
+  if (NeedsMsan) {
+    if (Arg *A =
+            Args.getLastArg(options::OPT_fsanitize_memory_track_origins_EQ,
+                            options::OPT_fsanitize_memory_track_origins,
+                            options::OPT_fno_sanitize_memory_track_origins)) {
+      if (A->getOption().matches(options::OPT_fsanitize_memory_track_origins)) {
+        MsanTrackOrigins = 1;
+      } else if (A->getOption().matches(
+                     options::OPT_fno_sanitize_memory_track_origins)) {
+        MsanTrackOrigins = 0;
+      } else {
+        StringRef S = A->getValue();
+        if (S.getAsInteger(0, MsanTrackOrigins) || MsanTrackOrigins < 0 ||
+            MsanTrackOrigins > 2) {
+          D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+        }
+      }
     }
   }
+
+  if (NeedsAsan) {
+    AsanSharedRuntime =
+      (TC.getTriple().getEnvironment() == llvm::Triple::Android) ||
+      Args.hasArg(options::OPT_shared_libasan);
+    AsanZeroBaseShadow =
+        (TC.getTriple().getEnvironment() == llvm::Triple::Android);
+  }
 }
 
 void SanitizerArgs::addArgs(const llvm::opt::ArgList &Args,
@@ -205,11 +196,8 @@
   }
 
   if (MsanTrackOrigins)
-    CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-track-origins"));
-
-  if (AsanZeroBaseShadow)
-    CmdArgs.push_back(
-        Args.MakeArgString("-fsanitize-address-zero-base-shadow"));
+    CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-track-origins=" +
+                                         llvm::utostr(MsanTrackOrigins)));
 
   // Workaround for PR16386.
   if (needsMsanRt())
@@ -296,28 +284,7 @@
                           unsigned &Remove, bool DiagnoseErrors) {
   Add = 0;
   Remove = 0;
-  const char *DeprecatedReplacement = 0;
-  if (A->getOption().matches(options::OPT_faddress_sanitizer)) {
-    Add = Address;
-    DeprecatedReplacement = "-fsanitize=address";
-  } else if (A->getOption().matches(options::OPT_fno_address_sanitizer)) {
-    Remove = Address;
-    DeprecatedReplacement = "-fno-sanitize=address";
-  } else if (A->getOption().matches(options::OPT_fthread_sanitizer)) {
-    Add = Thread;
-    DeprecatedReplacement = "-fsanitize=thread";
-  } else if (A->getOption().matches(options::OPT_fno_thread_sanitizer)) {
-    Remove = Thread;
-    DeprecatedReplacement = "-fno-sanitize=thread";
-  } else if (A->getOption().matches(options::OPT_fcatch_undefined_behavior)) {
-    Add = UndefinedTrap;
-    DeprecatedReplacement =
-      "-fsanitize=undefined-trap -fsanitize-undefined-trap-on-error";
-  } else if (A->getOption().matches(options::OPT_fbounds_checking) ||
-             A->getOption().matches(options::OPT_fbounds_checking_EQ)) {
-    Add = LocalBounds;
-    DeprecatedReplacement = "-fsanitize=local-bounds";
-  } else if (A->getOption().matches(options::OPT_fsanitize_EQ)) {
+  if (A->getOption().matches(options::OPT_fsanitize_EQ)) {
     Add = parse(D, A, DiagnoseErrors);
   } else if (A->getOption().matches(options::OPT_fno_sanitize_EQ)) {
     Remove = parse(D, A, DiagnoseErrors);
@@ -325,11 +292,6 @@
     // Flag is not relevant to sanitizers.
     return false;
   }
-  // If this is a deprecated synonym, produce a warning directing users
-  // towards the new spelling.
-  if (DeprecatedReplacement && DiagnoseErrors)
-    D.Diag(diag::warn_drv_deprecated_arg)
-      << A->getAsString(Args) << DeprecatedReplacement;
   return true;
 }
 
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index efd3945..d164edd 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -38,8 +38,8 @@
 }
 
 bool ToolChain::useIntegratedAs() const {
-  return Args.hasFlag(options::OPT_integrated_as,
-                      options::OPT_no_integrated_as,
+  return Args.hasFlag(options::OPT_fintegrated_as,
+                      options::OPT_fno_integrated_as,
                       IsIntegratedAssemblerDefault());
 }
 
@@ -114,7 +114,7 @@
   case Action::BindArchClass:
   case Action::LipoJobClass:
   case Action::DsymutilJobClass:
-  case Action::VerifyJobClass:
+  case Action::VerifyDebugInfoJobClass:
     llvm_unreachable("Invalid tool kind.");
 
   case Action::CompileJobClass:
@@ -122,6 +122,7 @@
   case Action::PreprocessJobClass:
   case Action::AnalyzeJobClass:
   case Action::MigrateJobClass:
+  case Action::VerifyPCHJobClass:
     return getClang();
   }
 
@@ -154,109 +155,27 @@
   return false;
 }
 
+bool ToolChain::isCrossCompiling() const {
+  llvm::Triple HostTriple(LLVM_HOST_TRIPLE);
+  switch (HostTriple.getArch()) {
+  // The A32/T32/T16 instruction sets are not separate architectures in this
+  // context.
+  case llvm::Triple::arm:
+  case llvm::Triple::armeb:
+  case llvm::Triple::thumb:
+  case llvm::Triple::thumbeb:
+    return getArch() != llvm::Triple::arm && getArch() != llvm::Triple::thumb &&
+           getArch() != llvm::Triple::armeb && getArch() != llvm::Triple::thumbeb;
+  default:
+    return HostTriple.getArch() != getArch();
+  }
+}
+
 ObjCRuntime ToolChain::getDefaultObjCRuntime(bool isNonFragile) const {
   return ObjCRuntime(isNonFragile ? ObjCRuntime::GNUstep : ObjCRuntime::GCC,
                      VersionTuple());
 }
 
-/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
-//
-// FIXME: tblgen this.
-static const char *getARMTargetCPU(const ArgList &Args,
-                                   const llvm::Triple &Triple) {
-  // For Darwin targets, the -arch option (which is translated to a
-  // corresponding -march option) should determine the architecture
-  // (and the Mach-O slice) regardless of any -mcpu options.
-  if (!Triple.isOSDarwin()) {
-    // FIXME: Warn on inconsistent use of -mcpu and -march.
-    // If we have -mcpu=, use that.
-    if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
-      return A->getValue();
-  }
-
-  StringRef MArch;
-  if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
-    // Otherwise, if we have -march= choose the base CPU for that arch.
-    MArch = A->getValue();
-  } else {
-    // Otherwise, use the Arch from the triple.
-    MArch = Triple.getArchName();
-  }
-
-  if (Triple.getOS() == llvm::Triple::NetBSD) {
-    if (MArch == "armv6")
-      return "arm1176jzf-s";
-  }
-
-  const char *result = llvm::StringSwitch<const char *>(MArch)
-    .Cases("armv2", "armv2a","arm2")
-    .Case("armv3", "arm6")
-    .Case("armv3m", "arm7m")
-    .Case("armv4", "strongarm")
-    .Case("armv4t", "arm7tdmi")
-    .Cases("armv5", "armv5t", "arm10tdmi")
-    .Cases("armv5e", "armv5te", "arm1026ejs")
-    .Case("armv5tej", "arm926ej-s")
-    .Cases("armv6", "armv6k", "arm1136jf-s")
-    .Case("armv6j", "arm1136j-s")
-    .Cases("armv6z", "armv6zk", "arm1176jzf-s")
-    .Case("armv6t2", "arm1156t2-s")
-    .Cases("armv6m", "armv6-m", "cortex-m0")
-    .Cases("armv7", "armv7a", "armv7-a", "cortex-a8")
-    .Cases("armv7l", "armv7-l", "cortex-a8")
-    .Cases("armv7f", "armv7-f", "cortex-a9-mp")
-    .Cases("armv7s", "armv7-s", "swift")
-    .Cases("armv7r", "armv7-r", "cortex-r4")
-    .Cases("armv7m", "armv7-m", "cortex-m3")
-    .Cases("armv7em", "armv7e-m", "cortex-m4")
-    .Cases("armv8", "armv8a", "armv8-a", "cortex-a53")
-    .Case("ep9312", "ep9312")
-    .Case("iwmmxt", "iwmmxt")
-    .Case("xscale", "xscale")
-    // If all else failed, return the most base CPU with thumb interworking
-    // supported by LLVM.
-    .Default(0);
-
-  if (result)
-    return result;
-
-  return
-    Triple.getEnvironment() == llvm::Triple::GNUEABIHF
-      ? "arm1176jzf-s"
-      : "arm7tdmi";
-}
-
-/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
-/// CPU.
-//
-// FIXME: This is redundant with -mcpu, why does LLVM use this.
-// FIXME: tblgen this, or kill it!
-static const char *getLLVMArchSuffixForARM(StringRef CPU) {
-  return llvm::StringSwitch<const char *>(CPU)
-    .Case("strongarm", "v4")
-    .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t")
-    .Cases("arm720t", "arm9", "arm9tdmi", "v4t")
-    .Cases("arm920", "arm920t", "arm922t", "v4t")
-    .Cases("arm940t", "ep9312","v4t")
-    .Cases("arm10tdmi",  "arm1020t", "v5")
-    .Cases("arm9e",  "arm926ej-s",  "arm946e-s", "v5e")
-    .Cases("arm966e-s",  "arm968e-s",  "arm10e", "v5e")
-    .Cases("arm1020e",  "arm1022e",  "xscale", "iwmmxt", "v5e")
-    .Cases("arm1136j-s",  "arm1136jf-s",  "arm1176jz-s", "v6")
-    .Cases("arm1176jzf-s",  "mpcorenovfp",  "mpcore", "v6")
-    .Cases("arm1156t2-s",  "arm1156t2f-s", "v6t2")
-    .Cases("cortex-a5", "cortex-a7", "cortex-a8", "v7")
-    .Cases("cortex-a9", "cortex-a12", "cortex-a15", "v7")
-    .Cases("cortex-r4", "cortex-r5", "v7r")
-    .Case("cortex-m0", "v6m")
-    .Case("cortex-m3", "v7m")
-    .Case("cortex-m4", "v7em")
-    .Case("cortex-a9-mp", "v7f")
-    .Case("swift", "v7s")
-    .Cases("cortex-a53", "cortex-a57", "v8")
-    .Default("");
-}
-
 std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
                                          types::ID InputType) const {
   switch (getTriple().getArch()) {
@@ -265,7 +184,7 @@
 
   case llvm::Triple::x86_64: {
     llvm::Triple Triple = getTriple();
-    if (!Triple.isOSDarwin())
+    if (!Triple.isOSBinFormatMachO())
       return getTripleString();
 
     if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
@@ -278,23 +197,38 @@
     return Triple.getTriple();
   }
   case llvm::Triple::arm:
-  case llvm::Triple::thumb: {
+  case llvm::Triple::armeb:
+  case llvm::Triple::thumb:
+  case llvm::Triple::thumbeb: {
     // FIXME: Factor into subclasses.
     llvm::Triple Triple = getTriple();
+    bool IsBigEndian = getTriple().getArch() == llvm::Triple::armeb ||
+                       getTriple().getArch() == llvm::Triple::thumbeb;
 
     // Thumb2 is the default for V7 on Darwin.
     //
     // FIXME: Thumb should just be another -target-feaure, not in the triple.
-    StringRef Suffix =
-      getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
-    bool ThumbDefault = Suffix.startswith("v6m") ||
-      (Suffix.startswith("v7") && getTriple().isOSDarwin());
-    std::string ArchName = "arm";
+    StringRef Suffix = Triple.isOSBinFormatMachO()
+      ? tools::arm::getLLVMArchSuffixForARM(tools::arm::getARMCPUForMArch(Args, Triple))
+      : tools::arm::getLLVMArchSuffixForARM(tools::arm::getARMTargetCPU(Args, Triple));
+    bool ThumbDefault = Suffix.startswith("v6m") || Suffix.startswith("v7m") ||
+      Suffix.startswith("v7em") ||
+      (Suffix.startswith("v7") && getTriple().isOSBinFormatMachO());
+    std::string ArchName;
+    if (IsBigEndian)
+      ArchName = "armeb";
+    else
+      ArchName = "arm";
 
     // Assembly files should start in ARM mode.
     if (InputType != types::TY_PP_Asm &&
         Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb, ThumbDefault))
-      ArchName = "thumb";
+    {
+      if (IsBigEndian)
+        ArchName = "thumbeb";
+      else
+        ArchName = "thumb";
+    }
     Triple.setArchName(ArchName + Suffix.str());
 
     return Triple.getTriple();
@@ -304,13 +238,6 @@
 
 std::string ToolChain::ComputeEffectiveClangTriple(const ArgList &Args, 
                                                    types::ID InputType) const {
-  // Diagnose use of Darwin OS deployment target arguments on non-Darwin.
-  if (Arg *A = Args.getLastArg(options::OPT_mmacosx_version_min_EQ,
-                               options::OPT_miphoneos_version_min_EQ,
-                               options::OPT_mios_simulator_version_min_EQ))
-    getDriver().Diag(diag::err_drv_clang_unsupported)
-      << A->getAsString(Args);
-
   return ComputeLLVMTriple(Args, InputType);
 }
 
@@ -323,6 +250,8 @@
                                       ArgStringList &CC1Args) const {
 }
 
+void ToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {}
+
 ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
   const ArgList &Args) const
 {
@@ -430,16 +359,19 @@
 
 bool ToolChain::AddFastMathRuntimeIfAvailable(const ArgList &Args,
                                               ArgStringList &CmdArgs) const {
-  // Check if -ffast-math or -funsafe-math is enabled.
-  Arg *A = Args.getLastArg(options::OPT_ffast_math,
-                           options::OPT_fno_fast_math,
-                           options::OPT_funsafe_math_optimizations,
-                           options::OPT_fno_unsafe_math_optimizations);
+  // Do not check for -fno-fast-math or -fno-unsafe-math when -Ofast passed
+  // (to keep the linker options consistent with gcc and clang itself).
+  if (!isOptimizationLevelFast(Args)) {
+    // Check if -ffast-math or -funsafe-math.
+    Arg *A =
+        Args.getLastArg(options::OPT_ffast_math, options::OPT_fno_fast_math,
+                        options::OPT_funsafe_math_optimizations,
+                        options::OPT_fno_unsafe_math_optimizations);
 
-  if (!A || A->getOption().getID() == options::OPT_fno_fast_math ||
-      A->getOption().getID() == options::OPT_fno_unsafe_math_optimizations)
-    return false;
-
+    if (!A || A->getOption().getID() == options::OPT_fno_fast_math ||
+        A->getOption().getID() == options::OPT_fno_unsafe_math_optimizations)
+      return false;
+  }
   // If crtfastmath.o exists add it to the arguments.
   std::string Path = GetFilePath("crtfastmath.o");
   if (Path == "crtfastmath.o") // Not found.
diff --git a/lib/Driver/ToolChains.cpp b/lib/Driver/ToolChains.cpp
index e5528f0..c1f7ccc 100644
--- a/lib/Driver/ToolChains.cpp
+++ b/lib/Driver/ToolChains.cpp
@@ -27,9 +27,9 @@
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/system_error.h"
-#include "llvm/Support/Program.h"
 
 // FIXME: This needs to be listed last until we fix the broken include guards
 // in these files and the LLVM config.h files.
@@ -42,11 +42,15 @@
 using namespace clang;
 using namespace llvm::opt;
 
-/// Darwin - Darwin tool chain for i386 and x86_64.
+MachO::MachO(const Driver &D, const llvm::Triple &Triple,
+                       const ArgList &Args)
+  : ToolChain(D, Triple, Args) {
+}
 
-Darwin::Darwin(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
-  : ToolChain(D, Triple, Args), TargetInitialized(false)
-{
+/// Darwin - Darwin tool chain for i386 and x86_64.
+Darwin::Darwin(const Driver & D, const llvm::Triple & Triple,
+               const ArgList & Args)
+  : MachO(D, Triple, Args), TargetInitialized(false) {
   // Compute the initial Darwin version from the triple
   unsigned Major, Minor, Micro;
   if (!Triple.getMacOSXVersion(Major, Minor, Micro))
@@ -67,7 +71,7 @@
     << Major << '.' << Minor << '.' << Micro;
 }
 
-types::ID Darwin::LookupTypeForExtension(const char *Ext) const {
+types::ID MachO::LookupTypeForExtension(const char *Ext) const {
   types::ID Ty = types::lookupTypeForExtension(Ext);
 
   // Darwin always preprocesses assembly files (unless -x is used explicitly).
@@ -77,13 +81,13 @@
   return Ty;
 }
 
-bool Darwin::HasNativeLLVMSupport() const {
+bool MachO::HasNativeLLVMSupport() const {
   return true;
 }
 
 /// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
 ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
-  if (isTargetIPhoneOS())
+  if (isTargetIOSBased())
     return ObjCRuntime(ObjCRuntime::iOS, TargetVersion);
   if (isNonFragile)
     return ObjCRuntime(ObjCRuntime::MacOSX, TargetVersion);
@@ -92,10 +96,12 @@
 
 /// Darwin provides a blocks runtime starting in MacOS X 10.6 and iOS 3.2.
 bool Darwin::hasBlocksRuntime() const {
-  if (isTargetIPhoneOS())
+  if (isTargetIOSBased())
     return !isIPhoneOSVersionLT(3, 2);
-  else
+  else {
+    assert(isTargetMacOS() && "unexpected darwin target");
     return !isMacosxVersionLT(10, 6);
+  }
 }
 
 static const char *GetArmArchForMArch(StringRef Value) {
@@ -109,7 +115,6 @@
     .Cases("armv7a", "armv7-a", "armv7")
     .Cases("armv7r", "armv7-r", "armv7")
     .Cases("armv7em", "armv7e-m", "armv7em")
-    .Cases("armv7f", "armv7-f", "armv7f")
     .Cases("armv7k", "armv7-k", "armv7k")
     .Cases("armv7m", "armv7-m", "armv7m")
     .Cases("armv7s", "armv7-s", "armv7s")
@@ -124,17 +129,27 @@
     .Case("xscale", "xscale")
     .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "arm1176jzf-s", "armv6")
     .Case("cortex-m0", "armv6m")
-    .Cases("cortex-a5", "cortex-a7", "cortex-a8", "armv7")
-    .Cases("cortex-a9", "cortex-a12", "cortex-a15", "armv7")
+    .Cases("cortex-a5", "cortex-a7", "cortex-a8", "cortex-a9-mp", "armv7")
+    .Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "armv7")
     .Cases("cortex-r4", "cortex-r5", "armv7r")
-    .Case("cortex-a9-mp", "armv7f")
     .Case("cortex-m3", "armv7m")
     .Case("cortex-m4", "armv7em")
     .Case("swift", "armv7s")
     .Default(0);
 }
 
-StringRef Darwin::getDarwinArchName(const ArgList &Args) const {
+static bool isSoftFloatABI(const ArgList &Args) {
+  Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+                           options::OPT_mfloat_abi_EQ);
+  if (!A)
+    return false;
+ 
+  return A->getOption().matches(options::OPT_msoft_float) ||
+         (A->getOption().matches(options::OPT_mfloat_abi_EQ) &&
+          A->getValue() == StringRef("soft"));
+}
+
+StringRef MachO::getMachOArchName(const ArgList &Args) const {
   switch (getTriple().getArch()) {
   default:
     return getArchName();
@@ -157,6 +172,17 @@
 Darwin::~Darwin() {
 }
 
+MachO::~MachO() {
+}
+
+
+std::string MachO::ComputeEffectiveClangTriple(const ArgList &Args,
+                                                    types::ID InputType) const {
+  llvm::Triple Triple(ComputeLLVMTriple(Args, InputType));
+
+  return Triple.getTriple();
+}
+
 std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
                                                 types::ID InputType) const {
   llvm::Triple Triple(ComputeLLVMTriple(Args, InputType));
@@ -166,25 +192,17 @@
   if (!isTargetInitialized())
     return Triple.getTriple();
 
-  if (Triple.getArchName() == "thumbv6m" ||
-      Triple.getArchName() == "thumbv7m" ||
-      Triple.getArchName() == "thumbv7em") {
-    // OS is ios or macosx unless it's the v6m or v7m.
-    Triple.setOS(llvm::Triple::Darwin);
-    Triple.setEnvironment(llvm::Triple::EABI);
-  } else {
-    SmallString<16> Str;
-    Str += isTargetIPhoneOS() ? "ios" : "macosx";
-    Str += getTargetVersion().getAsString();
-    Triple.setOSName(Str);
-  }
+  SmallString<16> Str;
+  Str += isTargetIOSBased() ? "ios" : "macosx";
+  Str += getTargetVersion().getAsString();
+  Triple.setOSName(Str);
 
   return Triple.getTriple();
 }
 
 void Generic_ELF::anchor() {}
 
-Tool *Darwin::getTool(Action::ActionClass AC) const {
+Tool *MachO::getTool(Action::ActionClass AC) const {
   switch (AC) {
   case Action::LipoJobClass:
     if (!Lipo)
@@ -194,7 +212,7 @@
     if (!Dsymutil)
       Dsymutil.reset(new tools::darwin::Dsymutil(*this));
     return Dsymutil.get();
-  case Action::VerifyJobClass:
+  case Action::VerifyDebugInfoJobClass:
     if (!VerifyDebug)
       VerifyDebug.reset(new tools::darwin::VerifyDebug(*this));
     return VerifyDebug.get();
@@ -203,18 +221,17 @@
   }
 }
 
-Tool *Darwin::buildLinker() const {
+Tool *MachO::buildLinker() const {
   return new tools::darwin::Link(*this);
 }
 
-Tool *Darwin::buildAssembler() const {
+Tool *MachO::buildAssembler() const {
   return new tools::darwin::Assemble(*this);
 }
 
 DarwinClang::DarwinClang(const Driver &D, const llvm::Triple& Triple,
                          const ArgList &Args)
-  : Darwin(D, Triple, Args)
-{
+  : Darwin(D, Triple, Args) {
   getProgramPaths().push_back(getDriver().getInstalledDir());
   if (getDriver().getInstalledDir() != getDriver().Dir)
     getProgramPaths().push_back(getDriver().Dir);
@@ -225,8 +242,37 @@
     getProgramPaths().push_back(getDriver().Dir);
 }
 
+void DarwinClang::addClangWarningOptions(ArgStringList &CC1Args) const {
+  // For iOS, 64-bit, promote certain warnings to errors.
+  if (!isTargetMacOS() && getTriple().isArch64Bit()) {
+    // Always enable -Wdeprecated-objc-isa-usage and promote it
+    // to an error.
+    CC1Args.push_back("-Wdeprecated-objc-isa-usage");
+    CC1Args.push_back("-Werror=deprecated-objc-isa-usage");
+
+    // Also error about implicit function declarations, as that
+    // can impact calling conventions.
+    CC1Args.push_back("-Werror=implicit-function-declaration");
+  }
+}
+
+/// \brief Determine whether Objective-C automated reference counting is
+/// enabled.
+static bool isObjCAutoRefCount(const ArgList &Args) {
+  return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false);
+}
+
 void DarwinClang::AddLinkARCArgs(const ArgList &Args,
                                  ArgStringList &CmdArgs) const {
+  // Avoid linking compatibility stubs on i386 mac.
+  if (isTargetMacOS() && getArch() == llvm::Triple::x86)
+    return;
+
+  ObjCRuntime runtime = getDefaultObjCRuntime(/*nonfragile*/ true);
+
+  if ((runtime.hasNativeARC() || !isObjCAutoRefCount(Args)) &&
+      runtime.hasSubscripting())
+    return;
 
   CmdArgs.push_back("-force_load");
   SmallString<128> P(getDriver().ClangExecutable);
@@ -245,12 +291,12 @@
   CmdArgs.push_back(Args.MakeArgString(P));
 }
 
-void DarwinClang::AddLinkRuntimeLib(const ArgList &Args,
-                                    ArgStringList &CmdArgs,
-                                    const char *DarwinStaticLib,
-                                    bool AlwaysLink) const {
+void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
+                              StringRef DarwinStaticLib, bool AlwaysLink,
+                              bool IsEmbedded) const {
   SmallString<128> P(getDriver().ResourceDir);
-  llvm::sys::path::append(P, "lib", "darwin", DarwinStaticLib);
+  llvm::sys::path::append(P, "lib", IsEmbedded ? "macho_embedded" : "darwin",
+                          DarwinStaticLib);
 
   // For now, allow missing resource libraries to support developers who may
   // not have compiler-rt checked out or integrated into their build (unless
@@ -290,14 +336,14 @@
   // If we are building profile support, link that library in.
   if (Args.hasArg(options::OPT_fprofile_arcs) ||
       Args.hasArg(options::OPT_fprofile_generate) ||
+      Args.hasArg(options::OPT_fprofile_instr_generate) ||
       Args.hasArg(options::OPT_fcreate_profile) ||
       Args.hasArg(options::OPT_coverage)) {
     // Select the appropriate runtime library for the target.
-    if (isTargetIPhoneOS()) {
+    if (isTargetIOSBased())
       AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_ios.a");
-    } else {
+    else
       AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_osx.a");
-    }
   }
 
   const SanitizerArgs &Sanitize = getSanitizerArgs();
@@ -305,10 +351,11 @@
   // Add Ubsan runtime library, if required.
   if (Sanitize.needsUbsanRt()) {
     // FIXME: Move this check to SanitizerArgs::filterUnsupportedKinds.
-    if (isTargetIPhoneOS()) {
+    if (isTargetIOSBased()) {
       getDriver().Diag(diag::err_drv_clang_unsupported_per_platform)
         << "-fsanitize=undefined";
     } else {
+      assert(isTargetMacOS() && "unexpected non OS X target");
       AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.ubsan_osx.a", true);
 
       // The Ubsan runtime library requires C++.
@@ -320,7 +367,7 @@
   // should not be linked with the runtime library.
   if (Sanitize.needsAsanRt()) {
     // FIXME: Move this check to SanitizerArgs::filterUnsupportedKinds.
-    if (isTargetIPhoneOS() && !isTargetIOSSimulator()) {
+    if (isTargetIPhoneOS()) {
       getDriver().Diag(diag::err_drv_clang_unsupported_per_platform)
         << "-fsanitize=address";
     } else {
@@ -348,16 +395,18 @@
   CmdArgs.push_back("-lSystem");
 
   // Select the dynamic runtime library and the target specific static library.
-  if (isTargetIPhoneOS()) {
+  if (isTargetIOSBased()) {
     // If we are compiling as iOS / simulator, don't attempt to link libgcc_s.1,
     // it never went into the SDK.
     // Linking against libgcc_s.1 isn't needed for iOS 5.0+
-    if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator())
+    if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator() &&
+        getTriple().getArch() != llvm::Triple::arm64)
       CmdArgs.push_back("-lgcc_s.1");
 
     // We currently always need a static runtime library for iOS.
     AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.ios.a");
   } else {
+    assert(isTargetMacOS() && "unexpected non MacOS platform");
     // The dynamic runtime library was merged with libSystem for 10.6 and
     // beyond; only 10.4 and 10.5 need an additional runtime library.
     if (isMacosxVersionLT(10, 5))
@@ -440,7 +489,7 @@
       if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
         StringRef first, second;
         StringRef isysroot = A->getValue();
-        llvm::tie(first, second) = isysroot.split(StringRef("SDKs/iPhoneOS"));
+        std::tie(first, second) = isysroot.split(StringRef("SDKs/iPhoneOS"));
         if (second != "")
           iOSTarget = second.substr(0,3);
       }
@@ -448,9 +497,10 @@
 
     // If no OSX or iOS target has been specified and we're compiling for armv7,
     // go ahead as assume we're targeting iOS.
+    StringRef MachOArchName = getMachOArchName(Args);
     if (OSXTarget.empty() && iOSTarget.empty() &&
-        (getDarwinArchName(Args) == "armv7" ||
-         getDarwinArchName(Args) == "armv7s"))
+        (MachOArchName == "armv7" || MachOArchName == "armv7s" ||
+         MachOArchName == "arm64"))
         iOSTarget = iOSVersionMin;
 
     // Handle conflicting deployment targets
@@ -469,6 +519,7 @@
     // default platform.
     if (!OSXTarget.empty() && !iOSTarget.empty()) {
       if (getTriple().getArch() == llvm::Triple::arm ||
+          getTriple().getArch() == llvm::Triple::arm64 ||
           getTriple().getArch() == llvm::Triple::thumb)
         OSXTarget = "";
       else
@@ -488,7 +539,8 @@
         options::OPT_mios_simulator_version_min_EQ);
       iOSSimVersion = Args.MakeJoinedArg(0, O, iOSSimTarget);
       Args.append(iOSSimVersion);
-    } else {
+    } else if (MachOArchName != "armv6m" && MachOArchName != "armv7m" &&
+               MachOArchName != "armv7em") {
       // Otherwise, assume we are targeting OS X.
       const Option O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
       OSXVersion = Args.MakeJoinedArg(0, O, MacosxVersionMin);
@@ -496,6 +548,16 @@
     }
   }
 
+  DarwinPlatformKind Platform;
+  if (OSXVersion)
+    Platform = MacOS;
+  else if (iOSVersion)
+    Platform = IPhoneOS;
+  else if (iOSSimVersion)
+    Platform = IPhoneOSSimulator;
+  else
+    llvm_unreachable("Unable to infer Darwin variant");
+
   // Reject invalid architecture combinations.
   if (iOSSimVersion && (getTriple().getArch() != llvm::Triple::x86 &&
                         getTriple().getArch() != llvm::Triple::x86_64)) {
@@ -506,14 +568,14 @@
   // Set the tool chain target information.
   unsigned Major, Minor, Micro;
   bool HadExtra;
-  if (OSXVersion) {
+  if (Platform == MacOS) {
     assert((!iOSVersion && !iOSSimVersion) && "Unknown target platform!");
     if (!Driver::GetReleaseVersion(OSXVersion->getValue(), Major, Minor,
                                    Micro, HadExtra) || HadExtra ||
         Major != 10 || Minor >= 100 || Micro >= 100)
       getDriver().Diag(diag::err_drv_invalid_version_number)
         << OSXVersion->getAsString(Args);
-  } else {
+  } else if (Platform == IPhoneOS || Platform == IPhoneOSSimulator) {
     const Arg *Version = iOSVersion ? iOSVersion : iOSSimVersion;
     assert(Version && "Unknown target platform!");
     if (!Driver::GetReleaseVersion(Version->getValue(), Major, Minor,
@@ -521,9 +583,8 @@
         Major >= 10 || Minor >= 100 || Micro >= 100)
       getDriver().Diag(diag::err_drv_invalid_version_number)
         << Version->getAsString(Args);
-  }
-
-  bool IsIOSSim = bool(iOSSimVersion);
+  } else
+    llvm_unreachable("unknown kind of Darwin platform");
 
   // In GCC, the simulator historically was treated as being OS X in some
   // contexts, like determining the link logic, despite generally being called
@@ -531,9 +592,9 @@
   // simulator as iOS + x86, and treat it differently in a few contexts.
   if (iOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
                      getTriple().getArch() == llvm::Triple::x86_64))
-    IsIOSSim = true;
+    Platform = IPhoneOSSimulator;
 
-  setTarget(/*IsIPhoneOS=*/ !OSXVersion, Major, Minor, Micro, IsIOSSim);
+  setTarget(Platform, Major, Minor, Micro);
 }
 
 void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
@@ -594,6 +655,7 @@
 
   // Use the newer cc_kext for iOS ARM after 6.0.
   if (!isTargetIPhoneOS() || isTargetIOSSimulator() ||
+      getTriple().getArch() == llvm::Triple::arm64 ||
       !isIPhoneOSVersionLT(6, 0)) {
     llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
   } else {
@@ -606,8 +668,8 @@
     CmdArgs.push_back(Args.MakeArgString(P.str()));
 }
 
-DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
-                                      const char *BoundArch) const {
+DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
+                                     const char *BoundArch) const {
   DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
   const OptTable &Opts = getDriver().getOpts();
 
@@ -626,10 +688,10 @@
       // Skip this argument unless the architecture matches either the toolchain
       // triple arch, or the arch being bound.
       llvm::Triple::ArchType XarchArch =
-        tools::darwin::getArchTypeForDarwinArchName(A->getValue(0));
+        tools::darwin::getArchTypeForMachOArchName(A->getValue(0));
       if (!(XarchArch == getArch()  ||
             (BoundArch && XarchArch ==
-             tools::darwin::getArchTypeForDarwinArchName(BoundArch))))
+             tools::darwin::getArchTypeForMachOArchName(BoundArch))))
         continue;
 
       Arg *OriginalArg = A;
@@ -814,8 +876,6 @@
       DAL->AddJoinedArg(0, MArch, "armv7a");
     else if (Name == "armv7em")
       DAL->AddJoinedArg(0, MArch, "armv7em");
-    else if (Name == "armv7f")
-      DAL->AddJoinedArg(0, MArch, "armv7f");
     else if (Name == "armv7k")
       DAL->AddJoinedArg(0, MArch, "armv7k");
     else if (Name == "armv7m")
@@ -823,21 +883,52 @@
     else if (Name == "armv7s")
       DAL->AddJoinedArg(0, MArch, "armv7s");
 
-    else
-      llvm_unreachable("invalid Darwin arch");
+    else if (Name == "arm64")
+      DAL->AddJoinedArg(0, MArch, "arm64");
+    else if (Name == "armv8")
+      DAL->AddJoinedArg(0, MArch, "arm64");
   }
 
+  return DAL;
+}
+
+void MachO::AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
+                                  llvm::opt::ArgStringList &CmdArgs) const {
+  // Embedded targets are simple at the moment, not supporting sanitizers and
+  // with different libraries for each member of the product { static, PIC } x
+  // { hard-float, soft-float }
+  llvm::SmallString<32> CompilerRT = StringRef("libclang_rt.");
+  CompilerRT +=
+      tools::arm::getARMFloatABI(getDriver(), Args, getTriple()) == "hard"
+          ? "hard"
+          : "soft";
+  CompilerRT += Args.hasArg(options::OPT_fPIC) ? "_pic.a" : "_static.a";
+
+  AddLinkRuntimeLib(Args, CmdArgs, CompilerRT, false, true);
+}
+
+
+DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
+                                      const char *BoundArch) const {
+  // First get the generic Apple args, before moving onto Darwin-specific ones.
+  DerivedArgList *DAL = MachO::TranslateArgs(Args, BoundArch);
+  const OptTable &Opts = getDriver().getOpts();
+
+  // If no architecture is bound, none of the translations here are relevant.
+  if (!BoundArch)
+    return DAL;
+
   // Add an explicit version min argument for the deployment target. We do this
   // after argument translation because -Xarch_ arguments may add a version min
   // argument.
-  if (BoundArch)
-    AddDeploymentTarget(*DAL);
+  AddDeploymentTarget(*DAL);
 
   // For iOS 6, undo the translation to add -static for -mkernel/-fapple-kext.
   // FIXME: It would be far better to avoid inserting those -static arguments,
   // but we can't check the deployment target in the translation code until
   // it is set here.
-  if (isTargetIPhoneOS() && !isIPhoneOSVersionLT(6, 0)) {
+  if (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0) &&
+      getTriple().getArch() != llvm::Triple::arm64) {
     for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
       Arg *A = *it;
       ++it;
@@ -854,7 +945,7 @@
 
   // Default to use libc++ on OS X 10.9+ and iOS 7+.
   if (((isTargetMacOS() && !isMacosxVersionLT(10, 9)) ||
-       (isTargetIPhoneOS() && !isIPhoneOSVersionLT(7, 0))) &&
+       (isTargetIOSBased() && !isIPhoneOSVersionLT(7, 0))) &&
       !Args.getLastArg(options::OPT_stdlib_EQ))
     DAL->AddJoinedArg(0, Opts.getOption(options::OPT_stdlib_EQ), "libc++");
 
@@ -864,8 +955,8 @@
     // Check whether the target provides libc++.
     StringRef where;
 
-    // Complain about targetting iOS < 5.0 in any way.
-    if (isTargetIPhoneOS() && isIPhoneOSVersionLT(5, 0))
+    // Complain about targeting iOS < 5.0 in any way.
+    if (isTargetIOSBased() && isIPhoneOSVersionLT(5, 0))
       where = "iOS 5.0";
 
     if (where != StringRef()) {
@@ -877,11 +968,11 @@
   return DAL;
 }
 
-bool Darwin::IsUnwindTablesDefault() const {
+bool MachO::IsUnwindTablesDefault() const {
   return getArch() == llvm::Triple::x86_64;
 }
 
-bool Darwin::UseDwarfDebugFlags() const {
+bool MachO::UseDwarfDebugFlags() const {
   if (const char *S = ::getenv("RC_DEBUG_OPTIONS"))
     return S[0] != '\0';
   return false;
@@ -893,40 +984,143 @@
           getTriple().getArch() == llvm::Triple::thumb);
 }
 
-bool Darwin::isPICDefault() const {
+bool MachO::isPICDefault() const {
   return true;
 }
 
-bool Darwin::isPIEDefault() const {
+bool MachO::isPIEDefault() const {
   return false;
 }
 
-bool Darwin::isPICDefaultForced() const {
-  return getArch() == llvm::Triple::x86_64;
+bool MachO::isPICDefaultForced() const {
+  return (getArch() == llvm::Triple::x86_64 ||
+          getArch() == llvm::Triple::arm64);
 }
 
-bool Darwin::SupportsProfiling() const {
+bool MachO::SupportsProfiling() const {
   // Profiling instrumentation is only supported on x86.
   return getArch() == llvm::Triple::x86 || getArch() == llvm::Triple::x86_64;
 }
 
+void Darwin::addMinVersionArgs(const llvm::opt::ArgList &Args,
+                               llvm::opt::ArgStringList &CmdArgs) const {
+  VersionTuple TargetVersion = getTargetVersion();
+
+  // If we had an explicit -mios-simulator-version-min argument, honor that,
+  // otherwise use the traditional deployment targets. We can't just check the
+  // is-sim attribute because existing code follows this path, and the linker
+  // may not handle the argument.
+  //
+  // FIXME: We may be able to remove this, once we can verify no one depends on
+  // it.
+  if (Args.hasArg(options::OPT_mios_simulator_version_min_EQ))
+    CmdArgs.push_back("-ios_simulator_version_min");
+  else if (isTargetIOSBased())
+    CmdArgs.push_back("-iphoneos_version_min");
+  else {
+    assert(isTargetMacOS() && "unexpected target");
+    CmdArgs.push_back("-macosx_version_min");
+  }
+
+  CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+}
+
+void Darwin::addStartObjectFileArgs(const llvm::opt::ArgList &Args,
+                                    llvm::opt::ArgStringList &CmdArgs) const {
+  // Derived from startfile spec.
+  if (Args.hasArg(options::OPT_dynamiclib)) {
+    // Derived from darwin_dylib1 spec.
+    if (isTargetIOSSimulator()) {
+      ; // iOS simulator does not need dylib1.o.
+    } else if (isTargetIPhoneOS()) {
+      if (isIPhoneOSVersionLT(3, 1))
+        CmdArgs.push_back("-ldylib1.o");
+    } else {
+      if (isMacosxVersionLT(10, 5))
+        CmdArgs.push_back("-ldylib1.o");
+      else if (isMacosxVersionLT(10, 6))
+        CmdArgs.push_back("-ldylib1.10.5.o");
+    }
+  } else {
+    if (Args.hasArg(options::OPT_bundle)) {
+      if (!Args.hasArg(options::OPT_static)) {
+        // Derived from darwin_bundle1 spec.
+        if (isTargetIOSSimulator()) {
+          ; // iOS simulator does not need bundle1.o.
+        } else if (isTargetIPhoneOS()) {
+          if (isIPhoneOSVersionLT(3, 1))
+            CmdArgs.push_back("-lbundle1.o");
+        } else {
+          if (isMacosxVersionLT(10, 6))
+            CmdArgs.push_back("-lbundle1.o");
+        }
+      }
+    } else {
+      if (Args.hasArg(options::OPT_pg) && SupportsProfiling()) {
+        if (Args.hasArg(options::OPT_static) ||
+            Args.hasArg(options::OPT_object) ||
+            Args.hasArg(options::OPT_preload)) {
+          CmdArgs.push_back("-lgcrt0.o");
+        } else {
+          CmdArgs.push_back("-lgcrt1.o");
+
+          // darwin_crt2 spec is empty.
+        }
+        // By default on OS X 10.8 and later, we don't link with a crt1.o
+        // file and the linker knows to use _main as the entry point.  But,
+        // when compiling with -pg, we need to link with the gcrt1.o file,
+        // so pass the -no_new_main option to tell the linker to use the
+        // "start" symbol as the entry point.
+        if (isTargetMacOS() && !isMacosxVersionLT(10, 8))
+          CmdArgs.push_back("-no_new_main");
+      } else {
+        if (Args.hasArg(options::OPT_static) ||
+            Args.hasArg(options::OPT_object) ||
+            Args.hasArg(options::OPT_preload)) {
+          CmdArgs.push_back("-lcrt0.o");
+        } else {
+          // Derived from darwin_crt1 spec.
+          if (isTargetIOSSimulator()) {
+            ; // iOS simulator does not need crt1.o.
+          } else if (isTargetIPhoneOS()) {
+            if (getArch() == llvm::Triple::arm64)
+              ; // iOS does not need any crt1 files for arm64
+            else if (isIPhoneOSVersionLT(3, 1))
+              CmdArgs.push_back("-lcrt1.o");
+            else if (isIPhoneOSVersionLT(6, 0))
+              CmdArgs.push_back("-lcrt1.3.1.o");
+          } else {
+            if (isMacosxVersionLT(10, 5))
+              CmdArgs.push_back("-lcrt1.o");
+            else if (isMacosxVersionLT(10, 6))
+              CmdArgs.push_back("-lcrt1.10.5.o");
+            else if (isMacosxVersionLT(10, 8))
+              CmdArgs.push_back("-lcrt1.10.6.o");
+
+            // darwin_crt2 spec is empty.
+          }
+        }
+      }
+    }
+  }
+
+  if (!isTargetIPhoneOS() && Args.hasArg(options::OPT_shared_libgcc) &&
+      isMacosxVersionLT(10, 5)) {
+    const char *Str = Args.MakeArgString(GetFilePath("crt3.o"));
+    CmdArgs.push_back(Str);
+  }
+}
+
 bool Darwin::SupportsObjCGC() const {
-  // Garbage collection is supported everywhere except on iPhone OS.
-  return !isTargetIPhoneOS();
+  return isTargetMacOS();
 }
 
 void Darwin::CheckObjCARC() const {
-  if (isTargetIPhoneOS() || !isMacosxVersionLT(10, 6))
+  if (isTargetIOSBased()|| (isTargetMacOS() && !isMacosxVersionLT(10, 6)))
     return;
   getDriver().Diag(diag::err_arc_unsupported_on_toolchain);
 }
 
-std::string
-Darwin_Generic_GCC::ComputeEffectiveClangTriple(const ArgList &Args,
-                                                types::ID InputType) const {
-  return ComputeLLVMTriple(Args, InputType);
-}
-
 /// Generic_GCC - A tool chain using the 'gcc' command to perform
 /// all subcommands; this relies on gcc translating the majority of
 /// command line options.
@@ -1007,14 +1201,14 @@
   return false;
 }
 
-static StringRef getGCCToolchainDir(const ArgList &Args) {
+static llvm::StringRef getGCCToolchainDir(const ArgList &Args) {
   const Arg *A = Args.getLastArg(options::OPT_gcc_toolchain);
   if (A)
     return A->getValue();
   return GCC_INSTALL_PREFIX;
 }
 
-/// \brief Construct a GCCInstallationDetector from the driver.
+/// \brief Initialize a GCCInstallationDetector from the driver.
 ///
 /// This performs all of the autodetection and sets up the various paths.
 /// Once constructed, a GCCInstallationDetector is essentially immutable.
@@ -1023,13 +1217,12 @@
 /// should instead pull the target out of the driver. This is currently
 /// necessary because the driver doesn't store the final version of the target
 /// triple.
-Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
-    const Driver &D, const llvm::Triple &TargetTriple, const ArgList &Args)
-    : IsValid(false), D(D) {
+void
+Generic_GCC::GCCInstallationDetector::init(
+    const Driver &D, const llvm::Triple &TargetTriple, const ArgList &Args) {
   llvm::Triple BiarchVariantTriple =
       TargetTriple.isArch32Bit() ? TargetTriple.get64BitArchVariant()
                                  : TargetTriple.get32BitArchVariant();
-  llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
   // The library directories which may contain GCC installations.
   SmallVector<StringRef, 4> CandidateLibDirs, CandidateBiarchLibDirs;
   // The compatible GCC triples for this particular architecture.
@@ -1075,7 +1268,7 @@
       if (!llvm::sys::fs::exists(LibDir))
         continue;
       for (unsigned k = 0, ke = CandidateTripleAliases.size(); k < ke; ++k)
-        ScanLibDirForGCCTriple(TargetArch, Args, LibDir,
+        ScanLibDirForGCCTriple(TargetTriple, Args, LibDir,
                                CandidateTripleAliases[k]);
     }
     for (unsigned j = 0, je = CandidateBiarchLibDirs.size(); j < je; ++j) {
@@ -1084,7 +1277,7 @@
         continue;
       for (unsigned k = 0, ke = CandidateBiarchTripleAliases.size(); k < ke;
            ++k)
-        ScanLibDirForGCCTriple(TargetArch, Args, LibDir,
+        ScanLibDirForGCCTriple(TargetTriple, Args, LibDir,
                                CandidateBiarchTripleAliases[k],
                                /*NeedsBiarchSuffix=*/ true);
     }
@@ -1098,7 +1291,24 @@
        I != E; ++I)
     OS << "Found candidate GCC installation: " << *I << "\n";
 
-  OS << "Selected GCC installation: " << GCCInstallPath << "\n";
+  if (!GCCInstallPath.empty())
+    OS << "Selected GCC installation: " << GCCInstallPath << "\n";
+
+  for (MultilibSet::const_iterator I = Multilibs.begin(), E = Multilibs.end();
+       I != E; ++I) {
+    OS << "Candidate multilib: " << *I << "\n";
+  }
+
+  if (Multilibs.size() != 0 || !SelectedMultilib.isDefault())
+    OS << "Selected multilib: " << SelectedMultilib << "\n";
+}
+
+bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
+  if (BiarchSibling.hasValue()) {
+    M = BiarchSibling.getValue();
+    return true;
+  }
+  return false;
 }
 
 /*static*/ void Generic_GCC::GCCInstallationDetector::CollectLibDirsAndTriples(
@@ -1113,25 +1323,34 @@
   static const char *const AArch64LibDirs[] = { "/lib" };
   static const char *const AArch64Triples[] = { "aarch64-none-linux-gnu",
                                                 "aarch64-linux-gnu" };
+  static const char *const AArch64beLibDirs[] = { "/lib" };
+  static const char *const AArch64beTriples[] = { "aarch64_be-none-linux-gnu",
+                                                  "aarch64_be-linux-gnu" };
 
   static const char *const ARMLibDirs[] = { "/lib" };
   static const char *const ARMTriples[] = { "arm-linux-gnueabi",
                                             "arm-linux-androideabi" };
   static const char *const ARMHFTriples[] = { "arm-linux-gnueabihf",
                                               "armv7hl-redhat-linux-gnueabi" };
+  static const char *const ARMebLibDirs[] = { "/lib" };
+  static const char *const ARMebTriples[] = { "armeb-linux-gnueabi",
+                                              "armeb-linux-androideabi" };
+  static const char *const ARMebHFTriples[] = { "armeb-linux-gnueabihf",
+                                                "armebv7hl-redhat-linux-gnueabi" };
 
   static const char *const X86_64LibDirs[] = { "/lib64", "/lib" };
   static const char *const X86_64Triples[] = {
     "x86_64-linux-gnu", "x86_64-unknown-linux-gnu", "x86_64-pc-linux-gnu",
     "x86_64-redhat-linux6E", "x86_64-redhat-linux", "x86_64-suse-linux",
-    "x86_64-manbo-linux-gnu", "x86_64-linux-gnu", "x86_64-slackware-linux"
+    "x86_64-manbo-linux-gnu", "x86_64-linux-gnu", "x86_64-slackware-linux",
+    "x86_64-linux-android"
   };
   static const char *const X86LibDirs[] = { "/lib32", "/lib" };
   static const char *const X86Triples[] = {
     "i686-linux-gnu", "i686-pc-linux-gnu", "i486-linux-gnu", "i386-linux-gnu",
     "i386-redhat-linux6E", "i686-redhat-linux", "i586-redhat-linux",
     "i386-redhat-linux", "i586-suse-linux", "i486-slackware-linux",
-    "i686-montavista-linux"
+    "i686-montavista-linux", "i686-linux-android"
   };
 
   static const char *const MIPSLibDirs[] = { "/lib" };
@@ -1146,7 +1365,8 @@
                                                "mips-mti-linux-gnu" };
   static const char *const MIPS64ELLibDirs[] = { "/lib64", "/lib" };
   static const char *const MIPS64ELTriples[] = { "mips64el-linux-gnu",
-                                                 "mips-mti-linux-gnu" };
+                                                 "mips-mti-linux-gnu",
+                                                 "mips64el-linux-android" };
 
   static const char *const PPCLibDirs[] = { "/lib32", "/lib" };
   static const char *const PPCTriples[] = {
@@ -1164,6 +1384,13 @@
                                                 "powerpc64le-suse-linux",
                                                 "ppc64le-redhat-linux" };
 
+  static const char *const SPARCv8LibDirs[] = { "/lib32", "/lib" };
+  static const char *const SPARCv8Triples[] = { "sparc-linux-gnu",
+                                                "sparcv8-linux-gnu" };
+  static const char *const SPARCv9LibDirs[] = { "/lib64", "/lib" };
+  static const char *const SPARCv9Triples[] = { "sparc64-linux-gnu",
+                                                "sparcv9-linux-gnu" };
+
   static const char *const SystemZLibDirs[] = { "/lib64", "/lib" };
   static const char *const SystemZTriples[] = {
     "s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
@@ -1171,6 +1398,7 @@
   };
 
   switch (TargetTriple.getArch()) {
+  case llvm::Triple::arm64:
   case llvm::Triple::aarch64:
     LibDirs.append(AArch64LibDirs,
                    AArch64LibDirs + llvm::array_lengthof(AArch64LibDirs));
@@ -1181,6 +1409,16 @@
     BiarchTripleAliases.append(
         AArch64Triples, AArch64Triples + llvm::array_lengthof(AArch64Triples));
     break;
+  case llvm::Triple::aarch64_be:
+    LibDirs.append(AArch64beLibDirs,
+                   AArch64beLibDirs + llvm::array_lengthof(AArch64beLibDirs));
+    TripleAliases.append(AArch64beTriples,
+                         AArch64beTriples + llvm::array_lengthof(AArch64beTriples));
+    BiarchLibDirs.append(AArch64beLibDirs,
+                         AArch64beLibDirs + llvm::array_lengthof(AArch64beLibDirs));
+    BiarchTripleAliases.append(
+        AArch64beTriples, AArch64beTriples + llvm::array_lengthof(AArch64beTriples));
+    break;
   case llvm::Triple::arm:
   case llvm::Triple::thumb:
     LibDirs.append(ARMLibDirs, ARMLibDirs + llvm::array_lengthof(ARMLibDirs));
@@ -1192,6 +1430,17 @@
                            ARMTriples + llvm::array_lengthof(ARMTriples));
     }
     break;
+  case llvm::Triple::armeb:
+  case llvm::Triple::thumbeb:
+    LibDirs.append(ARMebLibDirs, ARMebLibDirs + llvm::array_lengthof(ARMebLibDirs));
+    if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
+      TripleAliases.append(ARMebHFTriples,
+                           ARMebHFTriples + llvm::array_lengthof(ARMebHFTriples));
+    } else {
+      TripleAliases.append(ARMebTriples,
+                           ARMebTriples + llvm::array_lengthof(ARMebTriples));
+    }
+    break;
   case llvm::Triple::x86_64:
     LibDirs.append(X86_64LibDirs,
                    X86_64LibDirs + llvm::array_lengthof(X86_64LibDirs));
@@ -1283,6 +1532,26 @@
     TripleAliases.append(PPC64LETriples,
                          PPC64LETriples + llvm::array_lengthof(PPC64LETriples));
     break;
+  case llvm::Triple::sparc:
+    LibDirs.append(SPARCv8LibDirs,
+                   SPARCv8LibDirs + llvm::array_lengthof(SPARCv8LibDirs));
+    TripleAliases.append(SPARCv8Triples,
+                         SPARCv8Triples + llvm::array_lengthof(SPARCv8Triples));
+    BiarchLibDirs.append(SPARCv9LibDirs,
+                         SPARCv9LibDirs + llvm::array_lengthof(SPARCv9LibDirs));
+    BiarchTripleAliases.append(
+        SPARCv9Triples, SPARCv9Triples + llvm::array_lengthof(SPARCv9Triples));
+    break;
+  case llvm::Triple::sparcv9:
+    LibDirs.append(SPARCv9LibDirs,
+                   SPARCv9LibDirs + llvm::array_lengthof(SPARCv9LibDirs));
+    TripleAliases.append(SPARCv9Triples,
+                         SPARCv9Triples + llvm::array_lengthof(SPARCv9Triples));
+    BiarchLibDirs.append(SPARCv8LibDirs,
+                         SPARCv8LibDirs + llvm::array_lengthof(SPARCv8LibDirs));
+    BiarchTripleAliases.append(
+        SPARCv8Triples, SPARCv8Triples + llvm::array_lengthof(SPARCv8Triples));
+    break;
   case llvm::Triple::systemz:
     LibDirs.append(SystemZLibDirs,
                    SystemZLibDirs + llvm::array_lengthof(SystemZLibDirs));
@@ -1305,22 +1574,45 @@
     BiarchTripleAliases.push_back(BiarchTriple.str());
 }
 
-static bool isSoftFloatABI(const ArgList &Args) {
-  Arg *A = Args.getLastArg(options::OPT_msoft_float,
-                           options::OPT_mhard_float,
-                           options::OPT_mfloat_abi_EQ);
-  if (!A) return false;
+namespace {
+// Filter to remove Multilibs that don't exist as a suffix to Path
+class FilterNonExistent : public MultilibSet::FilterCallback {
+  std::string Base;
+public:
+  FilterNonExistent(std::string Base) : Base(Base) {}
+  bool operator()(const Multilib &M) const override {
+    return !llvm::sys::fs::exists(Base + M.gccSuffix() + "/crtbegin.o");
+  }
+};
+} // end anonymous namespace
 
-  return A->getOption().matches(options::OPT_msoft_float) ||
-         (A->getOption().matches(options::OPT_mfloat_abi_EQ) &&
-          A->getValue() == StringRef("soft"));
+static void addMultilibFlag(bool Enabled, const char *const Flag,
+                            std::vector<std::string> &Flags) {
+  if (Enabled)
+    Flags.push_back(std::string("+") + Flag);
+  else
+    Flags.push_back(std::string("-") + Flag);
 }
 
 static bool isMipsArch(llvm::Triple::ArchType Arch) {
-  return Arch == llvm::Triple::mips ||
-         Arch == llvm::Triple::mipsel ||
-         Arch == llvm::Triple::mips64 ||
-         Arch == llvm::Triple::mips64el;
+  return Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel ||
+         Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el;
+}
+
+static bool isMips32(llvm::Triple::ArchType Arch) {
+  return Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel;
+}
+
+static bool isMips64(llvm::Triple::ArchType Arch) {
+  return Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el;
+}
+
+static bool isMipsEL(llvm::Triple::ArchType Arch) {
+  return Arch == llvm::Triple::mipsel || Arch == llvm::Triple::mips64el;
+}
+
+static bool isMipsEB(llvm::Triple::ArchType Arch) {
+  return Arch == llvm::Triple::mips || Arch == llvm::Triple::mips64;
 }
 
 static bool isMips16(const ArgList &Args) {
@@ -1359,42 +1651,9 @@
   return A && A->getValue() == StringRef("2008");
 }
 
-// FIXME: There is the same routine in the Tools.cpp.
-static bool hasMipsN32ABIArg(const ArgList &Args) {
-  Arg *A = Args.getLastArg(options::OPT_mabi_EQ);
-  return A && (A->getValue() == StringRef("n32"));
-}
-
-static bool hasCrtBeginObj(Twine Path) {
-  return llvm::sys::fs::exists(Path + "/crtbegin.o");
-}
-
-static bool findTargetBiarchSuffix(std::string &Suffix, StringRef Path,
-                                   llvm::Triple::ArchType TargetArch,
-                                   const ArgList &Args) {
-  // FIXME: This routine was only intended to model bi-arch toolchains which
-  // use -m32 and -m64 to swap between variants of a target. It shouldn't be
-  // doing ABI-based builtin location for MIPS.
-  if (hasMipsN32ABIArg(Args))
-    Suffix = "/n32";
-  else if (TargetArch == llvm::Triple::x86_64 ||
-           TargetArch == llvm::Triple::ppc64 ||
-           TargetArch == llvm::Triple::systemz ||
-           TargetArch == llvm::Triple::mips64 ||
-           TargetArch == llvm::Triple::mips64el)
-    Suffix = "/64";
-  else
-    Suffix = "/32";
-
-  return hasCrtBeginObj(Path + Suffix);
-}
-
-void Generic_GCC::GCCInstallationDetector::findMIPSABIDirSuffix(
-    std::string &Suffix, llvm::Triple::ArchType TargetArch, StringRef Path,
+bool Generic_GCC::GCCInstallationDetector::findMIPSMultilibs(
+    const llvm::Triple &TargetTriple, StringRef Path,
     const llvm::opt::ArgList &Args) {
-  if (!isMipsArch(TargetArch))
-    return;
-
   // Some MIPS toolchains put libraries and object files compiled
   // using different options in to the sub-directoris which names
   // reflects the flags used for compilation. For example sysroot
@@ -1419,71 +1678,302 @@
   // /mips32
   //     /usr
   //       /lib  <= crt*.o files compiled with '-mips32'
-  //
-  // Unfortunately different toolchains use different and partially
-  // overlapped naming schemes. So we have to make a trick for detection
-  // of using toolchain. We lookup a path which unique for each toolchains.
 
-  bool IsMentorToolChain = hasCrtBeginObj(Path + "/mips16/soft-float");
-  bool IsFSFToolChain = hasCrtBeginObj(Path + "/mips32/mips16/sof");
+  FilterNonExistent NonExistent(Path);
 
-  if (IsMentorToolChain && IsFSFToolChain)
-    D.Diag(diag::err_drv_unknown_toolchain);
+  // Check for FSF toolchain multilibs
+  MultilibSet FSFMipsMultilibs;
+  {
+    Multilib MArchMips32 = Multilib()
+      .gccSuffix("/mips32")
+      .osSuffix("/mips32")
+      .includeSuffix("/mips32")
+      .flag("+m32").flag("-m64").flag("-mmicromips").flag("-march=mips32r2");
 
-  if (IsMentorToolChain) {
-    if (isMips16(Args))
-      Suffix += "/mips16";
-    else if (isMicroMips(Args))
-      Suffix += "/micromips";
+    Multilib MArchMicroMips = Multilib()
+      .gccSuffix("/micromips")
+      .osSuffix("/micromips")
+      .includeSuffix("/micromips")
+      .flag("+m32").flag("-m64").flag("+mmicromips");
 
-    if (isSoftFloatABI(Args))
-      Suffix += "/soft-float";
+    Multilib MArchMips64r2 = Multilib()
+      .gccSuffix("/mips64r2")
+      .osSuffix("/mips64r2")
+      .includeSuffix("/mips64r2")
+      .flag("-m32").flag("+m64").flag("+march=mips64r2");
 
-    if (TargetArch == llvm::Triple::mipsel ||
-        TargetArch == llvm::Triple::mips64el)
-      Suffix += "/el";
-  } else if (IsFSFToolChain) {
-    if (TargetArch == llvm::Triple::mips ||
-        TargetArch == llvm::Triple::mipsel) {
-      if (isMicroMips(Args))
-        Suffix += "/micromips";
-      else if (isMips32r2(Args))
-        Suffix += "";
-      else
-        Suffix += "/mips32";
+    Multilib MArchMips64 = Multilib()
+      .gccSuffix("/mips64")
+      .osSuffix("/mips64")
+      .includeSuffix("/mips64")
+      .flag("-m32").flag("+m64").flag("-march=mips64r2");
 
-      if (isMips16(Args))
-        Suffix += "/mips16";
-    } else {
-      if (isMips64r2(Args))
-        Suffix += hasMipsN32ABIArg(Args) ? "/mips64r2" : "/mips64r2/64";
-      else
-        Suffix += hasMipsN32ABIArg(Args) ? "/mips64" : "/mips64/64";
-    }
+    Multilib MArchDefault = Multilib()
+      .flag("+m32").flag("-m64").flag("+march=mips32r2");
 
-    if (TargetArch == llvm::Triple::mipsel ||
-        TargetArch == llvm::Triple::mips64el)
-      Suffix += "/el";
+    Multilib Mips16 = Multilib()
+      .gccSuffix("/mips16")
+      .osSuffix("/mips16")
+      .includeSuffix("/mips16")
+      .flag("+mips16");
 
-    if (isSoftFloatABI(Args))
-      Suffix += "/sof";
-    else {
-      if (isMipsFP64(Args))
-        Suffix += "/fp64";
+    Multilib MAbi64 = Multilib()
+      .gccSuffix("/64")
+      .osSuffix("/64")
+      .includeSuffix("/64")
+      .flag("+mabi=64").flag("-mabi=n32").flag("-m32");
 
-      if (isMipsNan2008(Args))
-        Suffix += "/nan2008";
+    Multilib BigEndian = Multilib()
+      .flag("+EB").flag("-EL");
+
+    Multilib LittleEndian = Multilib()
+      .gccSuffix("/el")
+      .osSuffix("/el")
+      .includeSuffix("/el")
+      .flag("+EL").flag("-EB");
+
+    Multilib SoftFloat = Multilib()
+      .gccSuffix("/sof")
+      .osSuffix("/sof")
+      .includeSuffix("/sof")
+      .flag("+msoft-float");
+
+    Multilib FP64 = Multilib()
+      .gccSuffix("/fp64")
+      .osSuffix("/fp64")
+      .includeSuffix("/fp64")
+      .flag("+mfp64");
+
+    Multilib Nan2008 = Multilib()
+      .gccSuffix("/nan2008")
+      .osSuffix("/nan2008")
+      .includeSuffix("/nan2008")
+      .flag("+mnan=2008");
+
+    FSFMipsMultilibs = MultilibSet()
+      .Either(MArchMips32, MArchMicroMips, 
+              MArchMips64r2, MArchMips64, MArchDefault)
+      .Maybe(Mips16)
+      .FilterOut("/mips64/mips16")
+      .FilterOut("/mips64r2/mips16")
+      .FilterOut("/micromips/mips16")
+      .Maybe(MAbi64)
+      .FilterOut("/micromips/64")
+      .FilterOut("/mips32/64")
+      .FilterOut("^/64")
+      .FilterOut("/mips16/64")
+      .Either(BigEndian, LittleEndian)
+      .Maybe(SoftFloat)
+      .Maybe(FP64)
+      .Maybe(Nan2008)
+      .FilterOut(".*sof/nan2008")
+      .FilterOut(".*sof/fp64")
+      .FilterOut(NonExistent);
+  }
+
+  // Check for Code Sourcery toolchain multilibs
+  MultilibSet CSMipsMultilibs;
+  {
+    Multilib MArchMips16 = Multilib()
+      .gccSuffix("/mips16")
+      .osSuffix("/mips16")
+      .includeSuffix("/mips16")
+      .flag("+m32").flag("+mips16");
+
+    Multilib MArchMicroMips = Multilib()
+      .gccSuffix("/micromips")
+      .osSuffix("/micromips")
+      .includeSuffix("/micromips")
+      .flag("+m32").flag("+mmicromips");
+
+    Multilib MArchDefault = Multilib()
+      .flag("-mips16").flag("-mmicromips");
+
+    Multilib SoftFloat = Multilib()
+      .gccSuffix("/soft-float")
+      .osSuffix("/soft-float")
+      .includeSuffix("/soft-float")
+      .flag("+msoft-float");
+
+    Multilib Nan2008 = Multilib()
+      .gccSuffix("/nan2008")
+      .osSuffix("/nan2008")
+      .includeSuffix("/nan2008")
+      .flag("+mnan=2008");
+
+    Multilib DefaultFloat = Multilib()
+      .flag("-msoft-float").flag("-mnan=2008");
+
+    Multilib BigEndian = Multilib()
+      .flag("+EB").flag("-EL");
+
+    Multilib LittleEndian = Multilib()
+      .gccSuffix("/el")
+      .osSuffix("/el")
+      .includeSuffix("/el")
+      .flag("+EL").flag("-EB");
+
+    // Note that this one's osSuffix is ""
+    Multilib MAbi64 = Multilib()
+      .gccSuffix("/64")
+      .includeSuffix("/64")
+      .flag("+mabi=64").flag("-mabi=n32").flag("-m32");
+
+    CSMipsMultilibs = MultilibSet()
+      .Either(MArchMips16, MArchMicroMips, MArchDefault)
+      .Either(SoftFloat, Nan2008, DefaultFloat)
+      .FilterOut("/micromips/nan2008")
+      .FilterOut("/mips16/nan2008")
+      .Either(BigEndian, LittleEndian)
+      .Maybe(MAbi64)
+      .FilterOut("/mips16.*/64")
+      .FilterOut("/micromips.*/64")
+      .FilterOut(NonExistent);
+  }
+
+  MultilibSet AndroidMipsMultilibs = MultilibSet()
+    .Maybe(Multilib("/mips-r2").flag("+march=mips32r2"))
+    .FilterOut(NonExistent);
+
+  MultilibSet DebianMipsMultilibs;
+  {
+    Multilib MAbiN32 = Multilib()
+      .gccSuffix("/n32")
+      .includeSuffix("/n32")
+      .flag("+mabi=n32");
+
+    Multilib M64 = Multilib()
+      .gccSuffix("/64")
+      .includeSuffix("/64")
+      .flag("+m64").flag("-m32").flag("-mabi=n32");
+
+    Multilib M32 = Multilib()
+      .flag("-m64").flag("+m32").flag("-mabi=n32");
+
+    DebianMipsMultilibs = MultilibSet()
+      .Either(M32, M64, MAbiN32)
+      .FilterOut(NonExistent);
+  }
+
+  llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
+
+  Multilib::flags_list Flags;
+  addMultilibFlag(isMips32(TargetArch), "m32", Flags);
+  addMultilibFlag(isMips64(TargetArch), "m64", Flags);
+  addMultilibFlag(isMips16(Args), "mips16", Flags);
+  addMultilibFlag(isMips32r2(Args), "march=mips32r2", Flags);
+  addMultilibFlag(isMips64r2(Args), "march=mips64r2", Flags);
+  addMultilibFlag(isMicroMips(Args), "mmicromips", Flags);
+  addMultilibFlag(isMipsFP64(Args), "mfp64", Flags);
+  addMultilibFlag(!isMipsFP64(Args), "mfp32", Flags);
+  addMultilibFlag(isMipsNan2008(Args), "mnan=2008", Flags);
+  addMultilibFlag(tools::mips::hasMipsAbiArg(Args, "n32"), "mabi=n32", Flags);
+  // Default is to assume mabi=64
+  bool IsMABI64 =
+      tools::mips::hasMipsAbiArg(Args, "64") ||
+      (!tools::mips::hasMipsAbiArg(Args, "n32") && isMips64(TargetArch));
+  addMultilibFlag(IsMABI64, "mabi=64", Flags);
+  addMultilibFlag(isSoftFloatABI(Args), "msoft-float", Flags);
+  addMultilibFlag(isSoftFloatABI(Args), "mfloat-abi=soft", Flags);
+  addMultilibFlag(!isSoftFloatABI(Args), "mhard-float", Flags);
+  addMultilibFlag(!isSoftFloatABI(Args), "mfloat-abi=hard", Flags);
+  addMultilibFlag(isMipsEL(TargetArch), "EL", Flags);
+  addMultilibFlag(isMipsEB(TargetArch), "EB", Flags);
+
+  if (TargetTriple.getEnvironment() == llvm::Triple::Android) {
+    // Select Android toolchain. It's the only choice in that case.
+    Multilibs.clear();
+    Multilibs.combineWith(AndroidMipsMultilibs);
+    return Multilibs.select(Flags, SelectedMultilib);
+  }
+
+  // Sort candidates. Toolchain that best meets the directories goes first.
+  // Then select the first toolchains matches command line flags.
+  MultilibSet *candidates[] = { &DebianMipsMultilibs, &FSFMipsMultilibs,
+                                &CSMipsMultilibs };
+  std::sort(
+      std::begin(candidates), std::end(candidates),
+      [](MultilibSet *a, MultilibSet *b) { return a->size() > b->size(); });
+  for (const auto &candidate : candidates) {
+    Multilibs.clear();
+    Multilibs.combineWith(*candidate);
+    if (Multilibs.select(Flags, SelectedMultilib)) {
+      if (candidate == &DebianMipsMultilibs)
+        BiarchSibling = Multilib();
+      return true;
     }
   }
 
-  if (!hasCrtBeginObj(Path + Suffix))
-    Suffix.clear();
+  return false;
+}
+
+bool Generic_GCC::GCCInstallationDetector::findBiarchMultilibs(
+    const llvm::Triple &TargetTriple, StringRef Path, const ArgList &Args,
+    bool NeedsBiarchSuffix) {
+
+  // Some versions of SUSE and Fedora on ppc64 put 32-bit libs
+  // in what would normally be GCCInstallPath and put the 64-bit
+  // libs in a subdirectory named 64. The simple logic we follow is that
+  // *if* there is a subdirectory of the right name with crtbegin.o in it,
+  // we use that. If not, and if not a biarch triple alias, we look for
+  // crtbegin.o without the subdirectory.
+
+  Multilib Default;
+  Multilib Alt64 = Multilib()
+    .gccSuffix("/64")
+    .includeSuffix("/64")
+    .flag("-m32").flag("+m64");
+  Multilib Alt32 = Multilib()
+    .gccSuffix("/32")
+    .includeSuffix("/32")
+    .flag("+m32").flag("-m64");
+
+  FilterNonExistent NonExistent(Path);
+
+  // Decide whether the default multilib is 32bit, correcting for
+  // when the default multilib and the alternate appear backwards
+  bool DefaultIs32Bit;
+  if (TargetTriple.isArch32Bit() && !NonExistent(Alt32))
+    DefaultIs32Bit = false;
+  else if (TargetTriple.isArch64Bit() && !NonExistent(Alt64))
+    DefaultIs32Bit = true;
+  else {
+    if (NeedsBiarchSuffix)
+      DefaultIs32Bit = TargetTriple.isArch64Bit();
+    else
+      DefaultIs32Bit = TargetTriple.isArch32Bit();
+  }
+
+  if (DefaultIs32Bit)
+    Default.flag("+m32").flag("-m64");
+  else
+    Default.flag("-m32").flag("+m64");
+
+  Multilibs.push_back(Default);
+  Multilibs.push_back(Alt64);
+  Multilibs.push_back(Alt32);
+
+  Multilibs.FilterOut(NonExistent);
+
+  Multilib::flags_list Flags;
+  addMultilibFlag(TargetTriple.isArch64Bit(), "m64", Flags);
+  addMultilibFlag(TargetTriple.isArch32Bit(), "m32", Flags);
+
+  if (!Multilibs.select(Flags, SelectedMultilib))
+    return false;
+
+  if (SelectedMultilib == Alt64 || SelectedMultilib == Alt32) {
+    BiarchSibling = Default;
+  }
+
+  return true;
 }
 
 void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
-    llvm::Triple::ArchType TargetArch, const ArgList &Args,
+    const llvm::Triple &TargetTriple, const ArgList &Args,
     const std::string &LibDir, StringRef CandidateTriple,
     bool NeedsBiarchSuffix) {
+  llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
   // There are various different suffixes involving the triple we
   // check for. We also record what is necessary to walk from each back
   // up to the lib directory.
@@ -1528,27 +2018,18 @@
       if (CandidateVersion <= Version)
         continue;
 
-      std::string MIPSABIDirSuffix;
-      findMIPSABIDirSuffix(MIPSABIDirSuffix, TargetArch, LI->path(), Args);
+      Multilibs.clear();
+      SelectedMultilib = Multilib();
+      BiarchSibling.reset();
 
-      // Some versions of SUSE and Fedora on ppc64 put 32-bit libs
-      // in what would normally be GCCInstallPath and put the 64-bit
-      // libs in a subdirectory named 64. The simple logic we follow is that
-      // *if* there is a subdirectory of the right name with crtbegin.o in it,
-      // we use that. If not, and if not a biarch triple alias, we look for
-      // crtbegin.o without the subdirectory.
-
-      std::string BiarchSuffix;
-      if (findTargetBiarchSuffix(BiarchSuffix,
-                                 LI->path() + MIPSABIDirSuffix,
-                                 TargetArch, Args)) {
-        GCCBiarchSuffix = BiarchSuffix;
-      } else if (NeedsBiarchSuffix ||
-                 !hasCrtBeginObj(LI->path() + MIPSABIDirSuffix)) {
+      // Debian mips multilibs behave more like the rest of the biarch ones,
+      // so handle them there
+      if (isMipsArch(TargetArch)) {
+        if (!findMIPSMultilibs(TargetTriple, LI->path(), Args))
+          continue;
+      } else if (!findBiarchMultilibs(TargetTriple, LI->path(), Args,
+                                      NeedsBiarchSuffix))
         continue;
-      } else {
-        GCCBiarchSuffix.clear();
-      }
 
       Version = CandidateVersion;
       GCCTriple.setTriple(CandidateTriple);
@@ -1557,7 +2038,6 @@
       // Linux.
       GCCInstallPath = LibDir + LibSuffixes[i] + "/" + VersionText.str();
       GCCParentLibPath = GCCInstallPath + InstallSuffixes[i];
-      GCCMIPSABIDirSuffix = MIPSABIDirSuffix;
       IsValid = true;
     }
   }
@@ -1565,7 +2045,7 @@
 
 Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple& Triple,
                          const ArgList &Args)
-  : ToolChain(D, Triple, Args), GCCInstallation(getDriver(), Triple, Args) {
+  : ToolChain(D, Triple, Args), GCCInstallation() {
   getProgramPaths().push_back(getDriver().getInstalledDir());
   if (getDriver().getInstalledDir() != getDriver().Dir)
     getProgramPaths().push_back(getDriver().Dir);
@@ -1580,10 +2060,6 @@
     if (!Preprocess)
       Preprocess.reset(new tools::gcc::Preprocess(*this));
     return Preprocess.get();
-  case Action::PrecompileJobClass:
-    if (!Precompile)
-      Precompile.reset(new tools::gcc::Precompile(*this));
-    return Precompile.get();
   case Action::CompileJobClass:
     if (!Compile)
       Compile.reset(new tools::gcc::Compile(*this));
@@ -1594,7 +2070,7 @@
 }
 
 Tool *Generic_GCC::buildAssembler() const {
-  return new tools::gcc::Assemble(*this);
+  return new tools::gnutools::Assemble(*this);
 }
 
 Tool *Generic_GCC::buildLinker() const {
@@ -1622,6 +2098,35 @@
   return false;
 }
 
+bool Generic_GCC::IsIntegratedAssemblerDefault() const {
+  return getTriple().getArch() == llvm::Triple::x86 ||
+         getTriple().getArch() == llvm::Triple::x86_64 ||
+         getTriple().getArch() == llvm::Triple::aarch64 ||
+         getTriple().getArch() == llvm::Triple::aarch64_be ||
+         getTriple().getArch() == llvm::Triple::arm64 ||
+         getTriple().getArch() == llvm::Triple::arm ||
+         getTriple().getArch() == llvm::Triple::armeb ||
+         getTriple().getArch() == llvm::Triple::thumb ||
+         getTriple().getArch() == llvm::Triple::thumbeb;
+}
+
+void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
+                                        ArgStringList &CC1Args) const {
+  const Generic_GCC::GCCVersion &V = GCCInstallation.getVersion();
+  bool UseInitArrayDefault =
+      getTriple().getArch() == llvm::Triple::aarch64 ||
+      getTriple().getArch() == llvm::Triple::aarch64_be ||
+      getTriple().getArch() == llvm::Triple::arm64 ||
+      (getTriple().getOS() == llvm::Triple::Linux &&
+       (!V.isOlderThan(4, 7, 0) ||
+        getTriple().getEnvironment() == llvm::Triple::Android));
+
+  if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
+                         options::OPT_fno_use_init_array,
+                         UseInitArrayDefault))
+    CC1Args.push_back("-fuse-init-array");
+}
+
 /// Hexagon Toolchain
 
 std::string Hexagon_TC::GetGnuDir(const std::string &InstalledDir) {
@@ -2006,6 +2511,7 @@
 bool FreeBSD::UseSjLjExceptions() const {
   // FreeBSD uses SjLj exceptions on ARM oabi.
   switch (getTriple().getEnvironment()) {
+  case llvm::Triple::GNUEABIHF:
   case llvm::Triple::GNUEABI:
   case llvm::Triple::EABI:
     return false;
@@ -2016,6 +2522,14 @@
   }
 }
 
+bool FreeBSD::HasNativeLLVMSupport() const {
+  return true;
+}
+
+bool FreeBSD::isPIEDefault() const {
+  return getSanitizerArgs().hasZeroBaseShadow();
+}
+
 /// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
 
 NetBSD::NetBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
@@ -2027,8 +2541,34 @@
     // doesn't work.
     // FIXME: It'd be nicer to test if this directory exists, but I'm not sure
     // what all logic is needed to emulate the '=' prefix here.
-    if (Triple.getArch() == llvm::Triple::x86)
+    switch (Triple.getArch()) {
+    case llvm::Triple::x86:
       getFilePaths().push_back("=/usr/lib/i386");
+      break;
+    case llvm::Triple::arm:
+    case llvm::Triple::thumb:
+      switch (Triple.getEnvironment()) {
+      case llvm::Triple::EABI:
+      case llvm::Triple::EABIHF:
+      case llvm::Triple::GNUEABI:
+      case llvm::Triple::GNUEABIHF:
+        getFilePaths().push_back("=/usr/lib/eabi");
+        break;
+      default:
+        getFilePaths().push_back("=/usr/lib/oabi");
+        break;
+      }
+      break;
+    case llvm::Triple::mips64:
+    case llvm::Triple::mips64el:
+      if (tools::mips::hasMipsAbiArg(Args, "o32"))
+        getFilePaths().push_back("=/usr/lib/o32");
+      else if (tools::mips::hasMipsAbiArg(Args, "64"))
+        getFilePaths().push_back("=/usr/lib/64");
+      break;
+    default:
+      break;
+    }
 
     getFilePaths().push_back("=/usr/lib");
   }
@@ -2058,8 +2598,13 @@
   unsigned Major, Minor, Micro;
   getTriple().getOSVersion(Major, Minor, Micro);
   if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 23) || Major == 0) {
-    if (getArch() == llvm::Triple::x86 || getArch() == llvm::Triple::x86_64)
+    switch (getArch()) {
+    case llvm::Triple::x86:
+    case llvm::Triple::x86_64:
       return ToolChain::CST_Libcxx;
+    default:
+      break;
+    }
   }
   return ToolChain::CST_Libstdcxx;
 }
@@ -2195,7 +2740,7 @@
 }
 
 static Distro DetectDistro(llvm::Triple::ArchType Arch) {
-  OwningPtr<llvm::MemoryBuffer> File;
+  std::unique_ptr<llvm::MemoryBuffer> File;
   if (!llvm::MemoryBuffer::getFile("/etc/lsb-release", File)) {
     StringRef Data = File.get()->getBuffer();
     SmallVector<StringRef, 8> Lines;
@@ -2270,7 +2815,7 @@
 /// a target-triple directory in the library and header search paths.
 /// Unfortunately, this triple does not align with the vanilla target triple,
 /// so we provide a rough mapping here.
-static std::string getMultiarchTriple(const llvm::Triple TargetTriple,
+static std::string getMultiarchTriple(const llvm::Triple &TargetTriple,
                                       StringRef SysRoot) {
   // For most architectures, just use whatever we have rather than trying to be
   // clever.
@@ -2292,6 +2837,16 @@
         return "arm-linux-gnueabi";
     }
     return TargetTriple.str();
+  case llvm::Triple::armeb:
+  case llvm::Triple::thumbeb:
+    if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
+      if (llvm::sys::fs::exists(SysRoot + "/lib/armeb-linux-gnueabihf"))
+        return "armeb-linux-gnueabihf";
+    } else {
+      if (llvm::sys::fs::exists(SysRoot + "/lib/armeb-linux-gnueabi"))
+        return "armeb-linux-gnueabi";
+    }
+    return TargetTriple.str();
   case llvm::Triple::x86:
     if (llvm::sys::fs::exists(SysRoot + "/lib/i386-linux-gnu"))
       return "i386-linux-gnu";
@@ -2300,10 +2855,15 @@
     if (llvm::sys::fs::exists(SysRoot + "/lib/x86_64-linux-gnu"))
       return "x86_64-linux-gnu";
     return TargetTriple.str();
+  case llvm::Triple::arm64:
   case llvm::Triple::aarch64:
     if (llvm::sys::fs::exists(SysRoot + "/lib/aarch64-linux-gnu"))
       return "aarch64-linux-gnu";
     return TargetTriple.str();
+  case llvm::Triple::aarch64_be:
+    if (llvm::sys::fs::exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
+      return "aarch64_be-linux-gnu";
+    return TargetTriple.str();
   case llvm::Triple::mips:
     if (llvm::sys::fs::exists(SysRoot + "/lib/mips-linux-gnu"))
       return "mips-linux-gnu";
@@ -2332,25 +2892,24 @@
   if (llvm::sys::fs::exists(Path)) Paths.push_back(Path.str());
 }
 
-static StringRef getMultilibDir(const llvm::Triple &Triple,
-                                const ArgList &Args) {
+static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
   if (isMipsArch(Triple.getArch())) {
     // lib32 directory has a special meaning on MIPS targets.
     // It contains N32 ABI binaries. Use this folder if produce
     // code for N32 ABI only.
-    if (hasMipsN32ABIArg(Args))
+    if (tools::mips::hasMipsAbiArg(Args, "n32"))
       return "lib32";
     return Triple.isArch32Bit() ? "lib" : "lib64";
   }
 
-  // It happens that only x86 and PPC use the 'lib32' variant of multilib, and
+  // It happens that only x86 and PPC use the 'lib32' variant of oslibdir, and
   // using that variant while targeting other architectures causes problems
   // because the libraries are laid out in shared system roots that can't cope
-  // with a 'lib32' multilib search path being considered. So we only enable
+  // with a 'lib32' library search path being considered. So we only enable
   // them when we know we may need it.
   //
   // FIXME: This is a bit of a hack. We should really unify this code for
-  // reasoning about multilib spellings with the lib dir spellings in the
+  // reasoning about oslibdir spellings with the lib dir spellings in the
   // GCCInstallationDetector, but that is a more significant refactoring.
   if (Triple.getArch() == llvm::Triple::x86 ||
       Triple.getArch() == llvm::Triple::ppc)
@@ -2361,6 +2920,8 @@
 
 Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
   : Generic_ELF(D, Triple, Args) {
+  GCCInstallation.init(D, Triple, Args);
+  Multilibs = GCCInstallation.getMultilibs();
   llvm::Triple::ArchType Arch = Triple.getArch();
   std::string SysRoot = computeSysRoot();
 
@@ -2428,29 +2989,20 @@
   // to the link paths.
   path_list &Paths = getFilePaths();
 
-  const std::string Multilib = getMultilibDir(Triple, Args);
+  const std::string OSLibDir = getOSLibDir(Triple, Args);
   const std::string MultiarchTriple = getMultiarchTriple(Triple, SysRoot);
 
   // Add the multilib suffixed paths where they are available.
   if (GCCInstallation.isValid()) {
     const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
     const std::string &LibPath = GCCInstallation.getParentLibPath();
+    const Multilib &Multilib = GCCInstallation.getMultilib();
 
     // Sourcery CodeBench MIPS toolchain holds some libraries under
     // a biarch-like suffix of the GCC installation.
-    //
-    // FIXME: It would be cleaner to model this as a variant of bi-arch. IE,
-    // instead of a '64' biarch suffix it would be 'el' or something.
-    if (IsAndroid && IsMips && isMips32r2(Args)) {
-      assert(GCCInstallation.getBiarchSuffix().empty() &&
-             "Unexpected bi-arch suffix");
-      addPathIfExists(GCCInstallation.getInstallPath() + "/mips-r2", Paths);
-    } else {
-      addPathIfExists((GCCInstallation.getInstallPath() +
-                       GCCInstallation.getMIPSABIDirSuffix() +
-                       GCCInstallation.getBiarchSuffix()),
-                      Paths);
-    }
+    addPathIfExists((GCCInstallation.getInstallPath() +
+                     Multilib.gccSuffix()),
+                    Paths);
 
     // GCC cross compiling toolchains will install target libraries which ship
     // as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
@@ -2458,7 +3010,7 @@
     // <prefix>/<libdir>/gcc/<triple>/<version>. This decision is somewhat
     // debatable, but is the reality today. We need to search this tree even
     // when we have a sysroot somewhere else. It is the responsibility of
-    // whomever is doing the cross build targetting a sysroot using a GCC
+    // whomever is doing the cross build targeting a sysroot using a GCC
     // installation that is *not* within the system root to ensure two things:
     //
     //  1) Any DSOs that are linked in from this tree or from the install path
@@ -2470,8 +3022,8 @@
     //
     // Note that this matches the GCC behavior. See the below comment for where
     // Clang diverges from GCC's behavior.
-    addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib/../" + Multilib +
-                    GCCInstallation.getMIPSABIDirSuffix(),
+    addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib/../" + OSLibDir +
+                    Multilib.osSuffix(),
                     Paths);
 
     // If the GCC installation we found is inside of the sysroot, we want to
@@ -2485,45 +3037,64 @@
     // a bug.
     if (StringRef(LibPath).startswith(SysRoot)) {
       addPathIfExists(LibPath + "/" + MultiarchTriple, Paths);
-      addPathIfExists(LibPath + "/../" + Multilib, Paths);
+      addPathIfExists(LibPath + "/../" + OSLibDir, Paths);
     }
   }
+
+  // Similar to the logic for GCC above, if we currently running Clang inside
+  // of the requested system root, add its parent library paths to
+  // those searched.
+  // FIXME: It's not clear whether we should use the driver's installed
+  // directory ('Dir' below) or the ResourceDir.
+  if (StringRef(D.Dir).startswith(SysRoot)) {
+    addPathIfExists(D.Dir + "/../lib/" + MultiarchTriple, Paths);
+    addPathIfExists(D.Dir + "/../" + OSLibDir, Paths);
+  }
+
   addPathIfExists(SysRoot + "/lib/" + MultiarchTriple, Paths);
-  addPathIfExists(SysRoot + "/lib/../" + Multilib, Paths);
+  addPathIfExists(SysRoot + "/lib/../" + OSLibDir, Paths);
   addPathIfExists(SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
-  addPathIfExists(SysRoot + "/usr/lib/../" + Multilib, Paths);
+  addPathIfExists(SysRoot + "/usr/lib/../" + OSLibDir, Paths);
 
   // Try walking via the GCC triple path in case of biarch or multiarch GCC
   // installations with strange symlinks.
   if (GCCInstallation.isValid()) {
     addPathIfExists(SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
-                    "/../../" + Multilib, Paths);
+                    "/../../" + OSLibDir, Paths);
 
-    // Add the non-multilib suffixed paths (if potentially different).
-    const std::string &LibPath = GCCInstallation.getParentLibPath();
-    const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
-    if (!GCCInstallation.getBiarchSuffix().empty())
+    // Add the 'other' biarch variant path
+    Multilib BiarchSibling;
+    if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
       addPathIfExists(GCCInstallation.getInstallPath() +
-                      GCCInstallation.getMIPSABIDirSuffix(), Paths);
+                      BiarchSibling.gccSuffix(), Paths);
+    }
 
     // See comments above on the multilib variant for details of why this is
     // included even from outside the sysroot.
+    const std::string &LibPath = GCCInstallation.getParentLibPath();
+    const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+    const Multilib &Multilib = GCCInstallation.getMultilib();
     addPathIfExists(LibPath + "/../" + GCCTriple.str() +
-                    "/lib" + GCCInstallation.getMIPSABIDirSuffix(), Paths);
+                    "/lib" + Multilib.osSuffix(), Paths);
 
     // See comments above on the multilib variant for details of why this is
     // only included from within the sysroot.
     if (StringRef(LibPath).startswith(SysRoot))
       addPathIfExists(LibPath, Paths);
   }
+
+  // Similar to the logic for GCC above, if we are currently running Clang
+  // inside of the requested system root, add its parent library path to those
+  // searched.
+  // FIXME: It's not clear whether we should use the driver's installed
+  // directory ('Dir' below) or the ResourceDir.
+  if (StringRef(D.Dir).startswith(SysRoot))
+    addPathIfExists(D.Dir + "/../lib", Paths);
+
   addPathIfExists(SysRoot + "/lib", Paths);
   addPathIfExists(SysRoot + "/usr/lib", Paths);
 }
 
-bool FreeBSD::HasNativeLLVMSupport() const {
-  return true;
-}
-
 bool Linux::HasNativeLLVMSupport() const {
   return true;
 }
@@ -2536,19 +3107,6 @@
   return new tools::gnutools::Assemble(*this);
 }
 
-void Linux::addClangTargetOptions(const ArgList &DriverArgs,
-                                  ArgStringList &CC1Args) const {
-  const Generic_GCC::GCCVersion &V = GCCInstallation.getVersion();
-  bool UseInitArrayDefault =
-      !V.isOlderThan(4, 7, 0) ||
-      getTriple().getArch() == llvm::Triple::aarch64 ||
-      getTriple().getEnvironment() == llvm::Triple::Android;
-  if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
-                         options::OPT_fno_use_init_array,
-                         UseInitArrayDefault))
-    CC1Args.push_back("-fuse-init-array");
-}
-
 std::string Linux::computeSysRoot() const {
   if (!getDriver().SysRoot.empty())
     return getDriver().SysRoot;
@@ -2562,15 +3120,15 @@
 
   const StringRef InstallDir = GCCInstallation.getInstallPath();
   const StringRef TripleStr = GCCInstallation.getTriple().str();
-  const StringRef MIPSABIDirSuffix = GCCInstallation.getMIPSABIDirSuffix();
+  const Multilib &Multilib = GCCInstallation.getMultilib();
 
   std::string Path = (InstallDir + "/../../../../" + TripleStr + "/libc" +
-                      MIPSABIDirSuffix).str();
+                      Multilib.osSuffix()).str();
 
   if (llvm::sys::fs::exists(Path))
     return Path;
 
-  Path = (InstallDir + "/../../../../sysroot" + MIPSABIDirSuffix).str();
+  Path = (InstallDir + "/../../../../sysroot" + Multilib.osSuffix()).str();
 
   if (llvm::sys::fs::exists(Path))
     return Path;
@@ -2674,7 +3232,9 @@
     MultiarchIncludeDirs = X86_64MultiarchIncludeDirs;
   } else if (getTriple().getArch() == llvm::Triple::x86) {
     MultiarchIncludeDirs = X86MultiarchIncludeDirs;
-  } else if (getTriple().getArch() == llvm::Triple::aarch64) {
+  } else if (getTriple().getArch() == llvm::Triple::aarch64 ||
+             getTriple().getArch() == llvm::Triple::aarch64_be ||
+             getTriple().getArch() == llvm::Triple::arm64) {
     MultiarchIncludeDirs = AArch64MultiarchIncludeDirs;
   } else if (getTriple().getArch() == llvm::Triple::arm) {
     if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
@@ -2726,17 +3286,16 @@
 /// libstdc++ installation.
 /*static*/ bool Linux::addLibStdCXXIncludePaths(Twine Base, Twine Suffix,
                                                 Twine TargetArchDir,
-                                                Twine BiarchSuffix,
-                                                Twine MIPSABIDirSuffix,
+                                                Twine IncludeSuffix,
                                                 const ArgList &DriverArgs,
                                                 ArgStringList &CC1Args) {
   if (!addLibStdCXXIncludePaths(Base + Suffix,
-                                TargetArchDir + MIPSABIDirSuffix + BiarchSuffix,
+                                TargetArchDir + IncludeSuffix,
                                 DriverArgs, CC1Args))
     return false;
 
   addSystemInclude(DriverArgs, CC1Args, Base + "/" + TargetArchDir + Suffix
-                   + MIPSABIDirSuffix + BiarchSuffix);
+                   + IncludeSuffix);
   return true;
 }
 
@@ -2748,9 +3307,24 @@
 
   // Check if libc++ has been enabled and provide its include paths if so.
   if (GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx) {
-    // libc++ is always installed at a fixed path on Linux currently.
-    addSystemInclude(DriverArgs, CC1Args,
-                     getDriver().SysRoot + "/usr/include/c++/v1");
+    const std::string LibCXXIncludePathCandidates[] = {
+      // The primary location is within the Clang installation.
+      // FIXME: We shouldn't hard code 'v1' here to make Clang future proof to
+      // newer ABI versions.
+      getDriver().Dir + "/../include/c++/v1",
+
+      // We also check the system as for a long time this is the only place Clang looked.
+      // FIXME: We should really remove this. It doesn't make any sense.
+      getDriver().SysRoot + "/usr/include/c++/v1"
+    };
+    for (unsigned i = 0; i < llvm::array_lengthof(LibCXXIncludePathCandidates);
+         ++i) {
+      if (!llvm::sys::fs::exists(LibCXXIncludePathCandidates[i]))
+        continue;
+      // Add the first candidate that exists.
+      addSystemInclude(DriverArgs, CC1Args, LibCXXIncludePathCandidates[i]);
+      break;
+    }
     return;
   }
 
@@ -2765,16 +3339,15 @@
   StringRef LibDir = GCCInstallation.getParentLibPath();
   StringRef InstallDir = GCCInstallation.getInstallPath();
   StringRef TripleStr = GCCInstallation.getTriple().str();
-  StringRef MIPSABIDirSuffix = GCCInstallation.getMIPSABIDirSuffix();
-  StringRef BiarchSuffix = GCCInstallation.getBiarchSuffix();
+  const Multilib &Multilib = GCCInstallation.getMultilib();
   const GCCVersion &Version = GCCInstallation.getVersion();
 
   if (addLibStdCXXIncludePaths(LibDir.str() + "/../include",
-                               "/c++/" + Version.Text, TripleStr, BiarchSuffix,
-                               MIPSABIDirSuffix, DriverArgs, CC1Args))
+                               "/c++/" + Version.Text, TripleStr,
+                               Multilib.includeSuffix(), DriverArgs, CC1Args))
     return;
 
-  const std::string IncludePathCandidates[] = {
+  const std::string LibStdCXXIncludePathCandidates[] = {
     // Gentoo is weird and places its headers inside the GCC install, so if the
     // first attempt to find the headers fails, try these patterns.
     InstallDir.str() + "/include/g++-v" + Version.MajorStr + "." +
@@ -2787,9 +3360,10 @@
     LibDir.str() + "/../include/c++",
   };
 
-  for (unsigned i = 0; i < llvm::array_lengthof(IncludePathCandidates); ++i) {
-    if (addLibStdCXXIncludePaths(IncludePathCandidates[i],
-                                 TripleStr + MIPSABIDirSuffix + BiarchSuffix,
+  for (unsigned i = 0; i < llvm::array_lengthof(LibStdCXXIncludePathCandidates);
+       ++i) {
+    if (addLibStdCXXIncludePaths(LibStdCXXIncludePathCandidates[i],
+                                 TripleStr + Multilib.includeSuffix(),
                                  DriverArgs, CC1Args))
       break;
   }
@@ -2860,7 +3434,6 @@
   return false;
 }
 
-
 void XCore::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
                                       ArgStringList &CC1Args) const {
   if (DriverArgs.hasArg(options::OPT_nostdinc) ||
diff --git a/lib/Driver/ToolChains.h b/lib/Driver/ToolChains.h
index 50d3700..ad4d4c1 100644
--- a/lib/Driver/ToolChains.h
+++ b/lib/Driver/ToolChains.h
@@ -13,11 +13,13 @@
 #include "Tools.h"
 #include "clang/Basic/VersionTuple.h"
 #include "clang/Driver/Action.h"
+#include "clang/Driver/Multilib.h"
 #include "clang/Driver/ToolChain.h"
 #include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
 #include "llvm/Support/Compiler.h"
-#include <vector>
 #include <set>
+#include <vector>
 
 namespace clang {
 namespace driver {
@@ -67,7 +69,6 @@
     bool operator>=(const GCCVersion &RHS) const { return !(*this < RHS); }
   };
 
-
   /// \brief This is a class to find a viable GCC installation for Clang to
   /// use.
   ///
@@ -76,14 +77,17 @@
   /// Driver, and has logic for fuzzing that where appropriate.
   class GCCInstallationDetector {
     bool IsValid;
-    const Driver &D;
     llvm::Triple GCCTriple;
 
     // FIXME: These might be better as path objects.
     std::string GCCInstallPath;
-    std::string GCCBiarchSuffix;
     std::string GCCParentLibPath;
-    std::string GCCMIPSABIDirSuffix;
+
+    /// The primary multilib appropriate for the given flags.
+    Multilib SelectedMultilib;
+    /// On Biarch systems, this corresponds to the default multilib when
+    /// targeting the non-default multilib. Otherwise, it is empty.
+    llvm::Optional<Multilib> BiarchSibling;
 
     GCCVersion Version;
 
@@ -91,8 +95,12 @@
     // order to print out detailed information in verbose mode.
     std::set<std::string> CandidateGCCInstallPaths;
 
+    /// The set of multilibs that the detected installation supports.
+    MultilibSet Multilibs;
+
   public:
-    GCCInstallationDetector(const Driver &D, const llvm::Triple &TargetTriple,
+    GCCInstallationDetector() : IsValid(false) {}
+    void init(const Driver &D, const llvm::Triple &TargetTriple,
                             const llvm::opt::ArgList &Args);
 
     /// \brief Check whether we detected a valid GCC install.
@@ -104,26 +112,18 @@
     /// \brief Get the detected GCC installation path.
     StringRef getInstallPath() const { return GCCInstallPath; }
 
-    /// \brief Get the detected GCC installation path suffix for the bi-arch
-    /// target variant.
-    StringRef getBiarchSuffix() const { return GCCBiarchSuffix; }
-
     /// \brief Get the detected GCC parent lib path.
     StringRef getParentLibPath() const { return GCCParentLibPath; }
 
-    /// \brief Get the detected GCC MIPS ABI directory suffix.
-    ///
-    /// This is used as a suffix both to the install directory of GCC and as
-    /// a suffix to its parent lib path in order to select a MIPS ABI-specific
-    /// subdirectory.
-    ///
-    /// This will always be empty for any non-MIPS target.
-    ///
-    // FIXME: This probably shouldn't exist at all, and should be factored
-    // into the multiarch and/or biarch support. Please don't add more uses of
-    // this interface, it is meant as a legacy crutch for the MIPS driver
-    // logic.
-    StringRef getMIPSABIDirSuffix() const { return GCCMIPSABIDirSuffix; }
+    /// \brief Get the detected Multilib
+    const Multilib &getMultilib() const { return SelectedMultilib; }
+
+    /// \brief Get the whole MultilibSet
+    const MultilibSet &getMultilibs() const { return Multilibs; }
+
+    /// Get the biarch sibling multilib (if it exists).
+    /// \return true iff such a sibling exists
+    bool getBiarchSibling(Multilib &M) const;
 
     /// \brief Get the detected GCC version string.
     const GCCVersion &getVersion() const { return Version; }
@@ -140,15 +140,18 @@
                              SmallVectorImpl<StringRef> &BiarchLibDirs,
                              SmallVectorImpl<StringRef> &BiarchTripleAliases);
 
-    void ScanLibDirForGCCTriple(llvm::Triple::ArchType TargetArch,
+    void ScanLibDirForGCCTriple(const llvm::Triple &TargetArch,
                                 const llvm::opt::ArgList &Args,
                                 const std::string &LibDir,
                                 StringRef CandidateTriple,
                                 bool NeedsBiarchSuffix = false);
 
-    void findMIPSABIDirSuffix(std::string &Suffix,
-                              llvm::Triple::ArchType TargetArch, StringRef Path,
-                              const llvm::opt::ArgList &Args);
+    bool findMIPSMultilibs(const llvm::Triple &TargetArch, StringRef Path,
+                           const llvm::opt::ArgList &Args);
+
+    bool findBiarchMultilibs(const llvm::Triple &TargetArch, StringRef Path,
+                             const llvm::opt::ArgList &Args,
+                             bool NeedsBiarchSuffix);
   };
 
   GCCInstallationDetector GCCInstallation;
@@ -158,17 +161,18 @@
               const llvm::opt::ArgList &Args);
   ~Generic_GCC();
 
-  virtual void printVerboseInfo(raw_ostream &OS) const;
+  void printVerboseInfo(raw_ostream &OS) const override;
 
-  virtual bool IsUnwindTablesDefault() const;
-  virtual bool isPICDefault() const;
-  virtual bool isPIEDefault() const;
-  virtual bool isPICDefaultForced() const;
+  bool IsUnwindTablesDefault() const override;
+  bool isPICDefault() const override;
+  bool isPIEDefault() const override;
+  bool isPICDefaultForced() const override;
+  bool IsIntegratedAssemblerDefault() const override;
 
 protected:
-  virtual Tool *getTool(Action::ActionClass AC) const;
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *getTool(Action::ActionClass AC) const override;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 
   /// \name ToolChain Implementation Helper Functions
   /// @{
@@ -182,27 +186,138 @@
   /// @}
 
 private:
-  mutable OwningPtr<tools::gcc::Preprocess> Preprocess;
-  mutable OwningPtr<tools::gcc::Precompile> Precompile;
-  mutable OwningPtr<tools::gcc::Compile> Compile;
+  mutable std::unique_ptr<tools::gcc::Preprocess> Preprocess;
+  mutable std::unique_ptr<tools::gcc::Compile> Compile;
+};
+
+class LLVM_LIBRARY_VISIBILITY MachO : public ToolChain {
+protected:
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
+  Tool *getTool(Action::ActionClass AC) const override;
+private:
+  mutable std::unique_ptr<tools::darwin::Lipo> Lipo;
+  mutable std::unique_ptr<tools::darwin::Dsymutil> Dsymutil;
+  mutable std::unique_ptr<tools::darwin::VerifyDebug> VerifyDebug;
+
+public:
+  MachO(const Driver &D, const llvm::Triple &Triple,
+             const llvm::opt::ArgList &Args);
+  ~MachO();
+
+  /// @name MachO specific toolchain API
+  /// {
+
+  /// Get the "MachO" arch name for a particular compiler invocation. For
+  /// example, Apple treats different ARM variations as distinct architectures.
+  StringRef getMachOArchName(const llvm::opt::ArgList &Args) const;
+
+
+  /// Add the linker arguments to link the ARC runtime library.
+  virtual void AddLinkARCArgs(const llvm::opt::ArgList &Args,
+                              llvm::opt::ArgStringList &CmdArgs) const {}
+
+  /// Add the linker arguments to link the compiler runtime library.
+  virtual void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
+                                     llvm::opt::ArgStringList &CmdArgs) const;
+
+  virtual void
+  addStartObjectFileArgs(const llvm::opt::ArgList &Args,
+                         llvm::opt::ArgStringList &CmdArgs) const {}
+
+  virtual void addMinVersionArgs(const llvm::opt::ArgList &Args,
+                                 llvm::opt::ArgStringList &CmdArgs) const {}
+
+  /// On some iOS platforms, kernel and kernel modules were built statically. Is
+  /// this such a target?
+  virtual bool isKernelStatic() const {
+    return false;
+  }
+
+  /// Is the target either iOS or an iOS simulator?
+  bool isTargetIOSBased() const {
+    return false;
+  }
+
+  void AddLinkRuntimeLib(const llvm::opt::ArgList &Args,
+                         llvm::opt::ArgStringList &CmdArgs,
+                         StringRef DarwinStaticLib,
+                         bool AlwaysLink = false,
+                         bool IsEmbedded = false) const;
+
+  /// }
+  /// @name ToolChain Implementation
+  /// {
+
+  std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
+                                          types::ID InputType) const override;
+
+  types::ID LookupTypeForExtension(const char *Ext) const override;
+
+  bool HasNativeLLVMSupport() const override;
+
+  llvm::opt::DerivedArgList *
+  TranslateArgs(const llvm::opt::DerivedArgList &Args,
+                const char *BoundArch) const override;
+
+  bool IsBlocksDefault() const override {
+    // Always allow blocks on Apple; users interested in versioning are
+    // expected to use /usr/include/Blocks.h.
+    return true;
+  }
+  bool IsIntegratedAssemblerDefault() const override {
+    // Default integrated assembler to on for Apple's MachO targets.
+    return true;
+  }
+
+  bool IsMathErrnoDefault() const override {
+    return false;
+  }
+
+  bool IsEncodeExtendedBlockSignatureDefault() const override {
+    return true;
+  }
+
+  bool IsObjCNonFragileABIDefault() const override {
+    // Non-fragile ABI is default for everything but i386.
+    return getTriple().getArch() != llvm::Triple::x86;
+  }
+
+  bool UseObjCMixedDispatch() const override {
+    return true;
+  }
+
+  bool IsUnwindTablesDefault() const override;
+
+  RuntimeLibType GetDefaultRuntimeLibType() const override {
+    return ToolChain::RLT_CompilerRT;
+  }
+
+  bool isPICDefault() const override;
+  bool isPIEDefault() const override;
+  bool isPICDefaultForced() const override;
+
+  bool SupportsProfiling() const override;
+
+  bool SupportsObjCGC() const override {
+    return false;
+  }
+
+  bool UseDwarfDebugFlags() const override;
+
+  bool UseSjLjExceptions() const override {
+    return false;
+  }
+
+  /// }
 };
 
   /// Darwin - The base Darwin tool chain.
-class LLVM_LIBRARY_VISIBILITY Darwin : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY Darwin : public MachO {
 public:
   /// The host version.
   unsigned DarwinVersion[3];
 
-protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
-  virtual Tool *getTool(Action::ActionClass AC) const;
-
-private:
-  mutable OwningPtr<tools::darwin::Lipo> Lipo;
-  mutable OwningPtr<tools::darwin::Dsymutil> Dsymutil;
-  mutable OwningPtr<tools::darwin::VerifyDebug> VerifyDebug;
-
   /// Whether the information on the target has been initialized.
   //
   // FIXME: This should be eliminated. What we want to do is make this part of
@@ -210,11 +325,13 @@
   // the argument translation business.
   mutable bool TargetInitialized;
 
-  /// Whether we are targeting iPhoneOS target.
-  mutable bool TargetIsIPhoneOS;
+  enum DarwinPlatformKind {
+    MacOS,
+    IPhoneOS,
+    IPhoneOSSimulator
+  };
 
-  /// Whether we are targeting the iPhoneOS simulator target.
-  mutable bool TargetIsIPhoneOSSimulator;
+  mutable DarwinPlatformKind TargetPlatform;
 
   /// The OS version we are targeting.
   mutable VersionTuple TargetVersion;
@@ -237,43 +354,62 @@
   ~Darwin();
 
   std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
-                                          types::ID InputType) const;
+                                          types::ID InputType) const override;
 
-  /// @name Darwin Specific Toolchain API
+  /// @name Apple Specific Toolchain Implementation
+  /// {
+
+  void
+  addMinVersionArgs(const llvm::opt::ArgList &Args,
+                    llvm::opt::ArgStringList &CmdArgs) const override;
+
+  void
+  addStartObjectFileArgs(const llvm::opt::ArgList &Args,
+                         llvm::opt::ArgStringList &CmdArgs) const override;
+
+  bool isKernelStatic() const override {
+    return !isTargetIPhoneOS() || isIPhoneOSVersionLT(6, 0) ||
+           getTriple().getArch() == llvm::Triple::arm64;
+  }
+
+protected:
+  /// }
+  /// @name Darwin specific Toolchain functions
   /// {
 
   // FIXME: Eliminate these ...Target functions and derive separate tool chains
   // for these targets and put version in constructor.
-  void setTarget(bool IsIPhoneOS, unsigned Major, unsigned Minor,
-                 unsigned Micro, bool IsIOSSim) const {
-    assert((!IsIOSSim || IsIPhoneOS) && "Unexpected deployment target!");
-
+  void setTarget(DarwinPlatformKind Platform, unsigned Major, unsigned Minor,
+                 unsigned Micro) const {
     // FIXME: For now, allow reinitialization as long as values don't
     // change. This will go away when we move away from argument translation.
-    if (TargetInitialized && TargetIsIPhoneOS == IsIPhoneOS &&
-        TargetIsIPhoneOSSimulator == IsIOSSim &&
+    if (TargetInitialized && TargetPlatform == Platform &&
         TargetVersion == VersionTuple(Major, Minor, Micro))
       return;
 
     assert(!TargetInitialized && "Target already initialized!");
     TargetInitialized = true;
-    TargetIsIPhoneOS = IsIPhoneOS;
-    TargetIsIPhoneOSSimulator = IsIOSSim;
+    TargetPlatform = Platform;
     TargetVersion = VersionTuple(Major, Minor, Micro);
   }
 
   bool isTargetIPhoneOS() const {
     assert(TargetInitialized && "Target not initialized!");
-    return TargetIsIPhoneOS;
+    return TargetPlatform == IPhoneOS;
   }
 
   bool isTargetIOSSimulator() const {
     assert(TargetInitialized && "Target not initialized!");
-    return TargetIsIPhoneOSSimulator;
+    return TargetPlatform == IPhoneOSSimulator;
+  }
+
+  bool isTargetIOSBased() const {
+    assert(TargetInitialized && "Target not initialized!");
+    return isTargetIPhoneOS() || isTargetIOSSimulator();
   }
 
   bool isTargetMacOS() const {
-    return !isTargetIOSSimulator() && !isTargetIPhoneOS();
+    return TargetPlatform == MacOS;
   }
 
   bool isTargetInitialized() const { return TargetInitialized; }
@@ -283,101 +419,53 @@
     return TargetVersion;
   }
 
-  /// getDarwinArchName - Get the "Darwin" arch name for a particular compiler
-  /// invocation. For example, Darwin treats different ARM variations as
-  /// distinct architectures.
-  StringRef getDarwinArchName(const llvm::opt::ArgList &Args) const;
-
   bool isIPhoneOSVersionLT(unsigned V0, unsigned V1=0, unsigned V2=0) const {
-    assert(isTargetIPhoneOS() && "Unexpected call for OS X target!");
+    assert(isTargetIOSBased() && "Unexpected call for non iOS target!");
     return TargetVersion < VersionTuple(V0, V1, V2);
   }
 
   bool isMacosxVersionLT(unsigned V0, unsigned V1=0, unsigned V2=0) const {
-    assert(!isTargetIPhoneOS() && "Unexpected call for iPhoneOS target!");
+    assert(isTargetMacOS() && "Unexpected call for non OS X target!");
     return TargetVersion < VersionTuple(V0, V1, V2);
   }
 
-  /// AddLinkARCArgs - Add the linker arguments to link the ARC runtime library.
-  virtual void AddLinkARCArgs(const llvm::opt::ArgList &Args,
-                              llvm::opt::ArgStringList &CmdArgs) const = 0;
-
-  /// AddLinkRuntimeLibArgs - Add the linker arguments to link the compiler
-  /// runtime library.
-  virtual void
-  AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
-                        llvm::opt::ArgStringList &CmdArgs) const = 0;
-
+public:
   /// }
   /// @name ToolChain Implementation
   /// {
 
-  virtual types::ID LookupTypeForExtension(const char *Ext) const;
-
-  virtual bool HasNativeLLVMSupport() const;
-
-  virtual ObjCRuntime getDefaultObjCRuntime(bool isNonFragile) const;
-  virtual bool hasBlocksRuntime() const;
-
-  virtual llvm::opt::DerivedArgList *
+  llvm::opt::DerivedArgList *
   TranslateArgs(const llvm::opt::DerivedArgList &Args,
-                const char *BoundArch) const;
+                const char *BoundArch) const override;
 
-  virtual bool IsBlocksDefault() const {
-    // Always allow blocks on Darwin; users interested in versioning are
-    // expected to use /usr/include/Blocks.h.
-    return true;
-  }
-  virtual bool IsIntegratedAssemblerDefault() const {
-    // Default integrated assembler to on for Darwin.
-    return true;
-  }
+  ObjCRuntime getDefaultObjCRuntime(bool isNonFragile) const override;
+  bool hasBlocksRuntime() const override;
 
-  virtual bool IsMathErrnoDefault() const {
-    return false;
-  }
-
-  virtual bool IsEncodeExtendedBlockSignatureDefault() const {
-    return true;
-  }
-  
-  virtual bool IsObjCNonFragileABIDefault() const {
-    // Non-fragile ABI is default for everything but i386.
-    return getTriple().getArch() != llvm::Triple::x86;
-  }
-
-  virtual bool UseObjCMixedDispatch() const {
+  bool UseObjCMixedDispatch() const override {
     // This is only used with the non-fragile ABI and non-legacy dispatch.
 
     // Mixed dispatch is used everywhere except OS X before 10.6.
-    return !(!isTargetIPhoneOS() && isMacosxVersionLT(10, 6));
+    return !(isTargetMacOS() && isMacosxVersionLT(10, 6));
   }
-  virtual bool IsUnwindTablesDefault() const;
-  virtual unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const {
+
+  unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
     // Stack protectors default to on for user code on 10.5,
     // and for everything in 10.6 and beyond
-    return isTargetIPhoneOS() ||
-      (!isMacosxVersionLT(10, 6) ||
-         (!isMacosxVersionLT(10, 5) && !KernelOrKext));
+    if (isTargetIOSBased())
+      return 1;
+    else if (isTargetMacOS() && !isMacosxVersionLT(10, 6))
+      return 1;
+    else if (isTargetMacOS() && !isMacosxVersionLT(10, 5) && !KernelOrKext)
+      return 1;
+
+    return 0;
   }
-  virtual RuntimeLibType GetDefaultRuntimeLibType() const {
-    return ToolChain::RLT_CompilerRT;
-  }
-  virtual bool isPICDefault() const;
-  virtual bool isPIEDefault() const;
-  virtual bool isPICDefaultForced() const;
 
-  virtual bool SupportsProfiling() const;
+  bool SupportsObjCGC() const override;
 
-  virtual bool SupportsObjCGC() const;
+  void CheckObjCARC() const override;
 
-  virtual void CheckObjCARC() const;
-
-  virtual bool UseDwarfDebugFlags() const;
-
-  virtual bool UseSjLjExceptions() const;
-
-  /// }
+  bool UseSjLjExceptions() const override;
 };
 
 /// DarwinClang - The Darwin toolchain used by Clang.
@@ -386,40 +474,29 @@
   DarwinClang(const Driver &D, const llvm::Triple &Triple,
               const llvm::opt::ArgList &Args);
 
-  /// @name Darwin ToolChain Implementation
+  /// @name Apple ToolChain Implementation
   /// {
 
-  virtual void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
-                                     llvm::opt::ArgStringList &CmdArgs) const;
-  void AddLinkRuntimeLib(const llvm::opt::ArgList &Args,
-                         llvm::opt::ArgStringList &CmdArgs,
-                         const char *DarwinStaticLib,
-                         bool AlwaysLink = false) const;
+  void
+  AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
+                        llvm::opt::ArgStringList &CmdArgs) const override;
 
-  virtual void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
-                                   llvm::opt::ArgStringList &CmdArgs) const;
+  void
+  AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+                      llvm::opt::ArgStringList &CmdArgs) const override;
 
-  virtual void AddCCKextLibArgs(const llvm::opt::ArgList &Args,
-                                llvm::opt::ArgStringList &CmdArgs) const;
+  void
+  AddCCKextLibArgs(const llvm::opt::ArgList &Args,
+                   llvm::opt::ArgStringList &CmdArgs) const override;
 
-  virtual void AddLinkARCArgs(const llvm::opt::ArgList &Args,
-                              llvm::opt::ArgStringList &CmdArgs) const;
+  virtual void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const;
+
+  void
+  AddLinkARCArgs(const llvm::opt::ArgList &Args,
+                 llvm::opt::ArgStringList &CmdArgs) const override;
   /// }
 };
 
-/// Darwin_Generic_GCC - Generic Darwin tool chain using gcc.
-class LLVM_LIBRARY_VISIBILITY Darwin_Generic_GCC : public Generic_GCC {
-public:
-  Darwin_Generic_GCC(const Driver &D, const llvm::Triple &Triple,
-                     const llvm::opt::ArgList &Args)
-      : Generic_GCC(D, Triple, Args) {}
-
-  std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
-                                          types::ID InputType) const;
-
-  virtual bool isPICDefault() const { return false; }
-};
-
 class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC {
   virtual void anchor();
 public:
@@ -427,12 +504,8 @@
               const llvm::opt::ArgList &Args)
       : Generic_GCC(D, Triple, Args) {}
 
-  virtual bool IsIntegratedAssemblerDefault() const {
-    // Default integrated assembler to on for x86.
-    return (getTriple().getArch() == llvm::Triple::aarch64 ||
-            getTriple().getArch() == llvm::Triple::x86 ||
-            getTriple().getArch() == llvm::Triple::x86_64);
-  }
+  void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+                             llvm::opt::ArgStringList &CC1Args) const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY AuroraUX : public Generic_GCC {
@@ -441,8 +514,8 @@
            const llvm::opt::ArgList &Args);
 
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY Solaris : public Generic_GCC {
@@ -450,10 +523,10 @@
   Solaris(const Driver &D, const llvm::Triple &Triple,
           const llvm::opt::ArgList &Args);
 
-  virtual bool IsIntegratedAssemblerDefault() const { return true; }
+  bool IsIntegratedAssemblerDefault() const override { return true; }
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 
 };
 
@@ -463,17 +536,17 @@
   OpenBSD(const Driver &D, const llvm::Triple &Triple,
           const llvm::opt::ArgList &Args);
 
-  virtual bool IsMathErrnoDefault() const { return false; }
-  virtual bool IsObjCNonFragileABIDefault() const { return true; }
-  virtual bool isPIEDefault() const { return true; }
+  bool IsMathErrnoDefault() const override { return false; }
+  bool IsObjCNonFragileABIDefault() const override { return true; }
+  bool isPIEDefault() const override { return true; }
 
-  virtual unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const {
+  unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
     return 1;
   }
 
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY Bitrig : public Generic_ELF {
@@ -481,43 +554,48 @@
   Bitrig(const Driver &D, const llvm::Triple &Triple,
          const llvm::opt::ArgList &Args);
 
-  virtual bool IsMathErrnoDefault() const { return false; }
-  virtual bool IsObjCNonFragileABIDefault() const { return true; }
-  virtual bool IsObjCLegacyDispatchDefault() const { return false; }
+  bool IsMathErrnoDefault() const override { return false; }
+  bool IsObjCNonFragileABIDefault() const override { return true; }
 
-  virtual void
+  void
   AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                               llvm::opt::ArgStringList &CC1Args) const;
-  virtual void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
-                                   llvm::opt::ArgStringList &CmdArgs) const;
-  virtual unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const {
+                              llvm::opt::ArgStringList &CC1Args) const override;
+  void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+                           llvm::opt::ArgStringList &CmdArgs) const override;
+  unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
      return 1;
   }
 
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY FreeBSD : public Generic_ELF {
 public:
   FreeBSD(const Driver &D, const llvm::Triple &Triple,
           const llvm::opt::ArgList &Args);
-  virtual bool HasNativeLLVMSupport() const;
+  bool HasNativeLLVMSupport() const override;
 
-  virtual bool IsMathErrnoDefault() const { return false; }
-  virtual bool IsObjCNonFragileABIDefault() const { return true; }
+  bool IsMathErrnoDefault() const override { return false; }
+  bool IsObjCNonFragileABIDefault() const override { return true; }
 
-  virtual CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const;
-  virtual void
+  CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+  void
   AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                               llvm::opt::ArgStringList &CC1Args) const;
+                               llvm::opt::ArgStringList &CC1Args) const override;
+  bool IsIntegratedAssemblerDefault() const override {
+    if (getTriple().getArch() == llvm::Triple::ppc ||
+        getTriple().getArch() == llvm::Triple::ppc64)
+      return true;
+    return Generic_ELF::IsIntegratedAssemblerDefault();
+  }
 
-
-  virtual bool UseSjLjExceptions() const;
+  bool UseSjLjExceptions() const override;
+  bool isPIEDefault() const override;
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY NetBSD : public Generic_ELF {
@@ -525,26 +603,26 @@
   NetBSD(const Driver &D, const llvm::Triple &Triple,
          const llvm::opt::ArgList &Args);
 
-  virtual bool IsMathErrnoDefault() const { return false; }
-  virtual bool IsObjCNonFragileABIDefault() const { return true; }
+  bool IsMathErrnoDefault() const override { return false; }
+  bool IsObjCNonFragileABIDefault() const override { return true; }
 
-  virtual CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const;
+  CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
 
-  virtual void
+  void
   AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                               llvm::opt::ArgStringList &CC1Args) const;
-  virtual bool IsUnwindTablesDefault() const {
+                              llvm::opt::ArgStringList &CC1Args) const override;
+  bool IsUnwindTablesDefault() const override {
     return true;
   }
-  virtual bool IsIntegratedAssemblerDefault() const {
+  bool IsIntegratedAssemblerDefault() const override {
     if (getTriple().getArch() == llvm::Triple::ppc)
       return true;
     return Generic_ELF::IsIntegratedAssemblerDefault();
   }
 
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY Minix : public Generic_ELF {
@@ -553,8 +631,8 @@
         const llvm::opt::ArgList &Args);
 
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_ELF {
@@ -562,11 +640,11 @@
   DragonFly(const Driver &D, const llvm::Triple &Triple,
             const llvm::opt::ArgList &Args);
 
-  virtual bool IsMathErrnoDefault() const { return false; }
+  bool IsMathErrnoDefault() const override { return false; }
 
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY Linux : public Generic_ELF {
@@ -574,30 +652,26 @@
   Linux(const Driver &D, const llvm::Triple &Triple,
         const llvm::opt::ArgList &Args);
 
-  virtual bool HasNativeLLVMSupport() const;
+  bool HasNativeLLVMSupport() const override;
 
-  virtual void
+  void
   AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                            llvm::opt::ArgStringList &CC1Args) const;
-  virtual void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
-                                     llvm::opt::ArgStringList &CC1Args) const;
-  virtual void
+                            llvm::opt::ArgStringList &CC1Args) const override;
+  void
   AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                               llvm::opt::ArgStringList &CC1Args) const;
-  virtual bool isPIEDefault() const;
+                               llvm::opt::ArgStringList &CC1Args) const override;
+  bool isPIEDefault() const override;
 
   std::string Linker;
   std::vector<std::string> ExtraOpts;
 
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 
 private:
   static bool addLibStdCXXIncludePaths(Twine Base, Twine Suffix,
-                                       Twine TargetArchDir,
-                                       Twine BiarchSuffix,
-                                       Twine MIPSABIDirSuffix,
+                                       Twine TargetArchDir, Twine IncludeSuffix,
                                        const llvm::opt::ArgList &DriverArgs,
                                        llvm::opt::ArgStringList &CC1Args);
   static bool addLibStdCXXIncludePaths(Twine Base, Twine TargetArchDir,
@@ -610,21 +684,21 @@
 class LLVM_LIBRARY_VISIBILITY Hexagon_TC : public Linux {
 protected:
   GCCVersion GCCLibAndIncVersion;
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 
 public:
   Hexagon_TC(const Driver &D, const llvm::Triple &Triple,
              const llvm::opt::ArgList &Args);
   ~Hexagon_TC();
 
-  virtual void
+  void
   AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                            llvm::opt::ArgStringList &CC1Args) const;
-  virtual void
+                            llvm::opt::ArgStringList &CC1Args) const override;
+  void
   AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                               llvm::opt::ArgStringList &CC1Args) const;
-  virtual CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const;
+                              llvm::opt::ArgStringList &CC1Args) const override;
+  CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
 
   StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
 
@@ -641,10 +715,10 @@
                const llvm::opt::ArgList &Args);
   ~TCEToolChain();
 
-  bool IsMathErrnoDefault() const;
-  bool isPICDefault() const;
-  bool isPIEDefault() const;
-  bool isPICDefaultForced() const;
+  bool IsMathErrnoDefault() const override;
+  bool isPICDefault() const override;
+  bool isPIEDefault() const override;
+  bool isPICDefaultForced() const override;
 };
 
 class LLVM_LIBRARY_VISIBILITY Windows : public ToolChain {
@@ -652,22 +726,22 @@
   Windows(const Driver &D, const llvm::Triple &Triple,
           const llvm::opt::ArgList &Args);
 
-  virtual bool IsIntegratedAssemblerDefault() const;
-  virtual bool IsUnwindTablesDefault() const;
-  virtual bool isPICDefault() const;
-  virtual bool isPIEDefault() const;
-  virtual bool isPICDefaultForced() const;
+  bool IsIntegratedAssemblerDefault() const override;
+  bool IsUnwindTablesDefault() const override;
+  bool isPICDefault() const override;
+  bool isPIEDefault() const override;
+  bool isPICDefaultForced() const override;
 
-  virtual void
+  void
   AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                            llvm::opt::ArgStringList &CC1Args) const;
-  virtual void
+                            llvm::opt::ArgStringList &CC1Args) const override;
+  void
   AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                               llvm::opt::ArgStringList &CC1Args) const;
+                               llvm::opt::ArgStringList &CC1Args) const override;
 
 protected:
-  virtual Tool *buildLinker() const;
-  virtual Tool *buildAssembler() const;
+  Tool *buildLinker() const override;
+  Tool *buildAssembler() const override;
 };
 
 
@@ -676,22 +750,22 @@
   XCore(const Driver &D, const llvm::Triple &Triple,
           const llvm::opt::ArgList &Args);
 protected:
-  virtual Tool *buildAssembler() const;
-  virtual Tool *buildLinker() const;
+  Tool *buildAssembler() const override;
+  Tool *buildLinker() const override;
 public:
-  virtual bool isPICDefault() const;
-  virtual bool isPIEDefault() const;
-  virtual bool isPICDefaultForced() const;
-  virtual bool SupportsProfiling() const;
-  virtual bool hasBlocksRuntime() const;
-  virtual void AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                            llvm::opt::ArgStringList &CC1Args) const;
-  virtual void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
-                                     llvm::opt::ArgStringList &CC1Args) const;
-  virtual void AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
-                               llvm::opt::ArgStringList &CC1Args) const;
-  virtual void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
-                                   llvm::opt::ArgStringList &CmdArgs) const;
+  bool isPICDefault() const override;
+  bool isPIEDefault() const override;
+  bool isPICDefaultForced() const override;
+  bool SupportsProfiling() const override;
+  bool hasBlocksRuntime() const override;
+  void AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+                    llvm::opt::ArgStringList &CC1Args) const override;
+  void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+                             llvm::opt::ArgStringList &CC1Args) const override;
+  void AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+                       llvm::opt::ArgStringList &CC1Args) const override;
+  void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+                           llvm::opt::ArgStringList &CmdArgs) const override;
 };
 
 } // end namespace toolchains
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 29713ed..6acc3d5 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -10,6 +10,7 @@
 #include "Tools.h"
 #include "InputInfo.h"
 #include "ToolChains.h"
+#include "clang/Basic/LangOptions.h"
 #include "clang/Basic/ObjCRuntime.h"
 #include "clang/Basic/Version.h"
 #include "clang/Driver/Action.h"
@@ -21,7 +22,6 @@
 #include "clang/Driver/SanitizerArgs.h"
 #include "clang/Driver/ToolChain.h"
 #include "clang/Driver/Util.h"
-#include "clang/Sema/SemaDiagnostic.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringSwitch.h"
@@ -29,13 +29,14 @@
 #include "llvm/Option/Arg.h"
 #include "llvm/Option/ArgList.h"
 #include "llvm/Option/Option.h"
+#include "llvm/Support/Compression.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/Format.h"
 #include "llvm/Support/Host.h"
 #include "llvm/Support/Path.h"
-#include "llvm/Support/Program.h"
 #include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
 #include "llvm/Support/raw_ostream.h"
 #include <sys/stat.h>
 
@@ -44,6 +45,21 @@
 using namespace clang;
 using namespace llvm::opt;
 
+static void addAssemblerKPIC(const ArgList &Args, ArgStringList &CmdArgs) {
+  Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
+                                    options::OPT_fpic, options::OPT_fno_pic,
+                                    options::OPT_fPIE, options::OPT_fno_PIE,
+                                    options::OPT_fpie, options::OPT_fno_pie);
+  if (!LastPICArg)
+    return;
+  if (LastPICArg->getOption().matches(options::OPT_fPIC) ||
+      LastPICArg->getOption().matches(options::OPT_fpic) ||
+      LastPICArg->getOption().matches(options::OPT_fPIE) ||
+      LastPICArg->getOption().matches(options::OPT_fpie)) {
+    CmdArgs.push_back("-KPIC");
+  }
+}
+
 /// CheckPreprocessingOptions - Perform some validation of preprocessing
 /// arguments that is shared with gcc.
 static void CheckPreprocessingOptions(const Driver &D, const ArgList &Args) {
@@ -190,7 +206,9 @@
   }
 
   // LIBRARY_PATH - included following the user specified library paths.
-  addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
+  //                and only supported on native toolchains.
+  if (!TC.isCrossCompiling())
+    addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
 }
 
 /// \brief Determine whether Objective-C automated reference counting is
@@ -208,25 +226,6 @@
   return Args.hasArg(options::OPT_fobjc_link_runtime);
 }
 
-static void addProfileRT(const ToolChain &TC, const ArgList &Args,
-                         ArgStringList &CmdArgs,
-                         llvm::Triple Triple) {
-  if (!(Args.hasArg(options::OPT_fprofile_arcs) ||
-        Args.hasArg(options::OPT_fprofile_generate) ||
-        Args.hasArg(options::OPT_fcreate_profile) ||
-        Args.hasArg(options::OPT_coverage)))
-    return;
-
-  // GCC links libgcov.a by adding -L<inst>/gcc/lib/gcc/<triple>/<ver> -lgcov to
-  // the link line. We cannot do the same thing because unlike gcov there is a
-  // libprofile_rt.so. We used to use the -l:libprofile_rt.a syntax, but that is
-  // not supported by old linkers.
-  std::string ProfileRT =
-    std::string(TC.getDriver().Dir) + "/../lib/libprofile_rt.a";
-
-  CmdArgs.push_back(Args.MakeArgString(ProfileRT));
-}
-
 static bool forwardToGCC(const Option &O) {
   // Don't forward inputs from the original command line.  They are added from
   // InputInfoList.
@@ -297,6 +296,9 @@
     if (A->getOption().matches(options::OPT_M) ||
         A->getOption().matches(options::OPT_MD))
       CmdArgs.push_back("-sys-header-deps");
+
+    if (isa<PrecompileJobAction>(JA))
+      CmdArgs.push_back("-module-file-deps");
   }
 
   if (Args.hasArg(options::OPT_MG)) {
@@ -442,109 +444,6 @@
   getToolChain().AddClangSystemIncludeArgs(Args, CmdArgs);
 }
 
-/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
-/// CPU.
-//
-// FIXME: This is redundant with -mcpu, why does LLVM use this.
-// FIXME: tblgen this, or kill it!
-static const char *getLLVMArchSuffixForARM(StringRef CPU) {
-  return llvm::StringSwitch<const char *>(CPU)
-    .Case("strongarm", "v4")
-    .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t")
-    .Cases("arm720t", "arm9", "arm9tdmi", "v4t")
-    .Cases("arm920", "arm920t", "arm922t", "v4t")
-    .Cases("arm940t", "ep9312","v4t")
-    .Cases("arm10tdmi",  "arm1020t", "v5")
-    .Cases("arm9e",  "arm926ej-s",  "arm946e-s", "v5e")
-    .Cases("arm966e-s",  "arm968e-s",  "arm10e", "v5e")
-    .Cases("arm1020e",  "arm1022e",  "xscale", "iwmmxt", "v5e")
-    .Cases("arm1136j-s",  "arm1136jf-s",  "arm1176jz-s", "v6")
-    .Cases("arm1176jzf-s",  "mpcorenovfp",  "mpcore", "v6")
-    .Cases("arm1156t2-s",  "arm1156t2f-s", "v6t2")
-    .Cases("cortex-a5", "cortex-a7", "cortex-a8", "v7")
-    .Cases("cortex-a9", "cortex-a12", "cortex-a15", "v7")
-    .Cases("cortex-r4", "cortex-r5", "v7r")
-    .Case("cortex-m0", "v6m")
-    .Case("cortex-m3", "v7m")
-    .Case("cortex-m4", "v7em")
-    .Case("cortex-a9-mp", "v7f")
-    .Case("swift", "v7s")
-    .Cases("cortex-a53", "cortex-a57", "v8")
-    .Default("");
-}
-
-/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
-//
-// FIXME: tblgen this.
-static std::string getARMTargetCPU(const ArgList &Args,
-                                   const llvm::Triple &Triple) {
-  // FIXME: Warn on inconsistent use of -mcpu and -march.
-
-  // If we have -mcpu=, use that.
-  if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
-    StringRef MCPU = A->getValue();
-    // Handle -mcpu=native.
-    if (MCPU == "native")
-      return llvm::sys::getHostCPUName();
-    else
-      return MCPU;
-  }
-
-  StringRef MArch;
-  if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
-    // Otherwise, if we have -march= choose the base CPU for that arch.
-    MArch = A->getValue();
-  } else {
-    // Otherwise, use the Arch from the triple.
-    MArch = Triple.getArchName();
-  }
-
-  if (Triple.getOS() == llvm::Triple::NetBSD) {
-    if (MArch == "armv6")
-      return "arm1176jzf-s";
-  }
-
-  // Handle -march=native.
-  std::string NativeMArch;
-  if (MArch == "native") {
-    std::string CPU = llvm::sys::getHostCPUName();
-    if (CPU != "generic") {
-      // Translate the native cpu into the architecture. The switch below will
-      // then chose the minimum cpu for that arch.
-      NativeMArch = std::string("arm") + getLLVMArchSuffixForARM(CPU);
-      MArch = NativeMArch;
-    }
-  }
-
-  return llvm::StringSwitch<const char *>(MArch)
-    .Cases("armv2", "armv2a","arm2")
-    .Case("armv3", "arm6")
-    .Case("armv3m", "arm7m")
-    .Case("armv4", "strongarm")
-    .Case("armv4t", "arm7tdmi")
-    .Cases("armv5", "armv5t", "arm10tdmi")
-    .Cases("armv5e", "armv5te", "arm1022e")
-    .Case("armv5tej", "arm926ej-s")
-    .Cases("armv6", "armv6k", "arm1136jf-s")
-    .Case("armv6j", "arm1136j-s")
-    .Cases("armv6z", "armv6zk", "arm1176jzf-s")
-    .Case("armv6t2", "arm1156t2-s")
-    .Cases("armv6m", "armv6-m", "cortex-m0")
-    .Cases("armv7", "armv7a", "armv7-a", "cortex-a8")
-    .Cases("armv7em", "armv7e-m", "cortex-m4")
-    .Cases("armv7f", "armv7-f", "cortex-a9-mp")
-    .Cases("armv7s", "armv7-s", "swift")
-    .Cases("armv7r", "armv7-r", "cortex-r4")
-    .Cases("armv7m", "armv7-m", "cortex-m3")
-    .Cases("armv8", "armv8a", "armv8-a", "cortex-a53")
-    .Case("ep9312", "ep9312")
-    .Case("iwmmxt", "iwmmxt")
-    .Case("xscale", "xscale")
-    // If all else failed, return the most base CPU with thumb interworking
-    // supported by LLVM.
-    .Default("arm7tdmi");
-}
-
 /// getAArch64TargetCPU - Get the (LLVM) name of the AArch64 cpu we are targeting.
 //
 // FIXME: tblgen this.
@@ -572,7 +471,10 @@
     return true;
 
   case llvm::Triple::aarch64:
+  case llvm::Triple::aarch64_be:
+  case llvm::Triple::arm64:
   case llvm::Triple::arm:
+  case llvm::Triple::armeb:
   case llvm::Triple::ppc:
   case llvm::Triple::ppc64:
     if (Triple.isOSDarwin())
@@ -615,10 +517,6 @@
     Features.push_back("+crypto");
   } else if (FPU == "neon") {
     Features.push_back("+neon");
-  } else if (FPU == "none") {
-    Features.push_back("-fp-armv8");
-    Features.push_back("-crypto");
-    Features.push_back("-neon");
   } else
     D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
 }
@@ -659,16 +557,28 @@
     Features.push_back("-vfp2");
     Features.push_back("-vfp3");
     Features.push_back("-neon");
+  } else if (FPU == "vfp") {
+    Features.push_back("+vfp2");
+    Features.push_back("-neon");
   } else if (FPU == "vfp3-d16" || FPU == "vfpv3-d16") {
     Features.push_back("+vfp3");
     Features.push_back("+d16");
     Features.push_back("-neon");
-  } else if (FPU == "vfp") {
-    Features.push_back("+vfp2");
-    Features.push_back("-neon");
   } else if (FPU == "vfp3" || FPU == "vfpv3") {
     Features.push_back("+vfp3");
     Features.push_back("-neon");
+  } else if (FPU == "vfp4-d16" || FPU == "vfpv4-d16") {
+    Features.push_back("+vfp4");
+    Features.push_back("+d16");
+    Features.push_back("-neon");
+  } else if (FPU == "vfp4" || FPU == "vfpv4") {
+    Features.push_back("+vfp4");
+    Features.push_back("-neon");
+  } else if (FPU == "fp4-sp-d16" || FPU == "fpv4-sp-d16") {
+    Features.push_back("+vfp4");
+    Features.push_back("+d16");
+    Features.push_back("+fp-only-sp");
+    Features.push_back("-neon");
   } else if (FPU == "fp-armv8") {
     Features.push_back("+fp-armv8");
     Features.push_back("-neon");
@@ -696,9 +606,8 @@
 
 // Select the float ABI as determined by -msoft-float, -mhard-float, and
 // -mfloat-abi=.
-static StringRef getARMFloatABI(const Driver &D,
-                                const ArgList &Args,
-                                const llvm::Triple &Triple) {
+StringRef tools::arm::getARMFloatABI(const Driver &D, const ArgList &Args,
+                                     const llvm::Triple &Triple) {
   StringRef FloatABI;
   if (Arg *A = Args.getLastArg(options::OPT_msoft_float,
                                options::OPT_mhard_float,
@@ -727,7 +636,7 @@
       //
       // FIXME: Factor out an ARM class so we can cache the arch somewhere.
       std::string ArchName =
-        getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
+        arm::getLLVMArchSuffixForARM(arm::getARMTargetCPU(Args, Triple));
       if (StringRef(ArchName).startswith("v6") ||
           StringRef(ArchName).startswith("v7"))
         FloatABI = "softfp";
@@ -737,8 +646,15 @@
     }
 
     case llvm::Triple::FreeBSD:
-      // FreeBSD defaults to soft float
-      FloatABI = "soft";
+      switch(Triple.getEnvironment()) {
+      case llvm::Triple::GNUEABIHF:
+        FloatABI = "hard";
+        break;
+      default:
+        // FreeBSD defaults to soft float
+        FloatABI = "soft";
+        break;
+      }
       break;
 
     default:
@@ -749,13 +665,16 @@
       case llvm::Triple::GNUEABI:
         FloatABI = "softfp";
         break;
+      case llvm::Triple::EABIHF:
+        FloatABI = "hard";
+        break;
       case llvm::Triple::EABI:
         // EABI is always AAPCS, and if it was not marked 'hard', it's softfp
         FloatABI = "softfp";
         break;
       case llvm::Triple::Android: {
         std::string ArchName =
-          getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
+          arm::getLLVMArchSuffixForARM(arm::getARMTargetCPU(Args, Triple));
         if (StringRef(ArchName).startswith("v7"))
           FloatABI = "softfp";
         else
@@ -765,7 +684,9 @@
       default:
         // Assume "soft", but warn the user we are guessing.
         FloatABI = "soft";
-        D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
+        if (Triple.getOS() != llvm::Triple::UnknownOS ||
+            !Triple.isOSBinFormatMachO())
+          D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
         break;
       }
     }
@@ -776,18 +697,30 @@
 
 static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
                                  const ArgList &Args,
-                                 std::vector<const char *> &Features) {
-  StringRef FloatABI = getARMFloatABI(D, Args, Triple);
-  // FIXME: Note, this is a hack, the LLVM backend doesn't actually use these
-  // yet (it uses the -mfloat-abi and -msoft-float options), and it is
-  // stripped out by the ARM target.
-  // Use software floating point operations?
-  if (FloatABI == "soft")
-    Features.push_back("+soft-float");
+                                 std::vector<const char *> &Features,
+                                 bool ForAS) {
+  StringRef FloatABI = tools::arm::getARMFloatABI(D, Args, Triple);
+  if (!ForAS) {
+    // FIXME: Note, this is a hack, the LLVM backend doesn't actually use these
+    // yet (it uses the -mfloat-abi and -msoft-float options), and it is
+    // stripped out by the ARM target. We should probably pass this a new
+    // -target-option, which is handled by the -cc1/-cc1as invocation.
+    //
+    // FIXME2:  For consistency, it would be ideal if we set up the target
+    // machine state the same when using the frontend or the assembler. We don't
+    // currently do that for the assembler, we pass the options directly to the
+    // backend and never even instantiate the frontend TargetInfo. If we did,
+    // and used its handleTargetFeatures hook, then we could ensure the
+    // assembler and the frontend behave the same.
 
-  // Use software floating point argument passing?
-  if (FloatABI != "hard")
-    Features.push_back("+soft-float-abi");
+    // Use software floating point operations?
+    if (FloatABI == "soft")
+      Features.push_back("+soft-float");
+
+    // Use software floating point argument passing?
+    if (FloatABI != "hard")
+      Features.push_back("+soft-float-abi");
+  }
 
   // Honor -mfpu=.
   if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
@@ -797,8 +730,11 @@
 
   // Setting -msoft-float effectively disables NEON because of the GCC
   // implementation, although the same isn't true of VFP or VFP3.
-  if (FloatABI == "soft")
+  if (FloatABI == "soft") {
     Features.push_back("-neon");
+    // Also need to explicitly disable features which imply NEON.
+    Features.push_back("-crypto");
+  }
 
   // En/disable crc
   if (Arg *A = Args.getLastArg(options::OPT_mcrc,
@@ -817,7 +753,7 @@
   // Get the effective triple, which takes into account the deployment target.
   std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
   llvm::Triple Triple(TripleStr);
-  std::string CPUName = getARMTargetCPU(Args, Triple);
+  std::string CPUName = arm::getARMTargetCPU(Args, Triple);
 
   // Select the ABI to use.
   //
@@ -829,6 +765,8 @@
     // The backend is hardwired to assume AAPCS for M-class processors, ensure
     // the frontend matches that.
     if (Triple.getEnvironment() == llvm::Triple::EABI ||
+        (Triple.getOS() == llvm::Triple::UnknownOS &&
+         Triple.getObjectFormat() == llvm::Triple::MachO) ||
         StringRef(CPUName).startswith("cortex-m")) {
       ABIName = "aapcs";
     } else {
@@ -842,6 +780,7 @@
     case llvm::Triple::GNUEABIHF:
       ABIName = "aapcs-linux";
       break;
+    case llvm::Triple::EABIHF:
     case llvm::Triple::EABI:
       ABIName = "aapcs";
       break;
@@ -853,7 +792,7 @@
   CmdArgs.push_back(ABIName);
 
   // Determine floating point ABI from the options & target defaults.
-  StringRef FloatABI = getARMFloatABI(D, Args, Triple);
+  StringRef FloatABI = tools::arm::getARMFloatABI(D, Args, Triple);
   if (FloatABI == "soft") {
     // Floating point operations and argument passing are soft.
     //
@@ -900,13 +839,66 @@
                     true))
     CmdArgs.push_back("-no-implicit-float");
 
-    // llvm does not support reserving registers in general. There is support
-    // for reserving r9 on ARM though (defined as a platform-specific register
-    // in ARM EABI).
-    if (Args.hasArg(options::OPT_ffixed_r9)) {
-      CmdArgs.push_back("-backend-option");
-      CmdArgs.push_back("-arm-reserve-r9");
-    }
+  // llvm does not support reserving registers in general. There is support
+  // for reserving r9 on ARM though (defined as a platform-specific register
+  // in ARM EABI).
+  if (Args.hasArg(options::OPT_ffixed_r9)) {
+    CmdArgs.push_back("-backend-option");
+    CmdArgs.push_back("-arm-reserve-r9");
+  }
+}
+
+/// getARM64TargetCPU - Get the (LLVM) name of the ARM64 cpu we are targeting.
+static std::string getARM64TargetCPU(const ArgList &Args) {
+  // If we have -mcpu=, use that.
+  if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+    StringRef MCPU = A->getValue();
+    // Handle -mcpu=native.
+    if (MCPU == "native")
+      return llvm::sys::getHostCPUName();
+    else
+      return MCPU;
+  }
+
+  // At some point, we may need to check -march here, but for now we only
+  // one arm64 architecture.
+
+  // Default to "cyclone" CPU.
+  return "cyclone";
+}
+
+void Clang::AddARM64TargetArgs(const ArgList &Args,
+                               ArgStringList &CmdArgs) const {
+  std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+  llvm::Triple Triple(TripleStr);
+
+  if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
+      Args.hasArg(options::OPT_mkernel) ||
+      Args.hasArg(options::OPT_fapple_kext))
+    CmdArgs.push_back("-disable-red-zone");
+
+  if (!Args.hasFlag(options::OPT_mimplicit_float,
+                    options::OPT_mno_implicit_float, true))
+    CmdArgs.push_back("-no-implicit-float");
+
+  const char *ABIName = 0;
+  if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
+    ABIName = A->getValue();
+  else if (Triple.isOSDarwin())
+    ABIName = "darwinpcs";
+  else
+    ABIName = "aapcs";
+
+  CmdArgs.push_back("-target-abi");
+  CmdArgs.push_back(ABIName);
+
+  CmdArgs.push_back("-target-cpu");
+  CmdArgs.push_back(Args.MakeArgString(getARM64TargetCPU(Args)));
+
+  if (Args.hasArg(options::OPT_mstrict_align)) {
+    CmdArgs.push_back("-backend-option");
+    CmdArgs.push_back("-arm64-strict-align");
+  }
 }
 
 // Get CPU and ABI names. They are not independent
@@ -915,8 +907,8 @@
                              const llvm::Triple &Triple,
                              StringRef &CPUName,
                              StringRef &ABIName) {
-  const char *DefMips32CPU = "mips32";
-  const char *DefMips64CPU = "mips64";
+  const char *DefMips32CPU = "mips32r2";
+  const char *DefMips64CPU = "mips64r2";
 
   if (Arg *A = Args.getLastArg(options::OPT_march_EQ,
                                options::OPT_mcpu_EQ))
@@ -1020,8 +1012,7 @@
 static void getMIPSTargetFeatures(const Driver &D, const ArgList &Args,
                                   std::vector<const char *> &Features) {
   StringRef FloatABI = getMipsFloatABI(D, Args);
-  bool IsMips16 = Args.getLastArg(options::OPT_mips16) != NULL;
-  if (FloatABI == "soft" || (FloatABI == "hard" && IsMips16)) {
+  if (FloatABI == "soft") {
     // FIXME: Note, this is a hack. We need to pass the selected float
     // mode to the MipsTargetInfoBase to define appropriate macros there.
     // Now it is the only method.
@@ -1062,18 +1053,11 @@
 
   StringRef FloatABI = getMipsFloatABI(D, Args);
 
-  bool IsMips16 = Args.getLastArg(options::OPT_mips16) != NULL;
-
-  if (FloatABI == "soft" || (FloatABI == "hard" && IsMips16)) {
+  if (FloatABI == "soft") {
     // Floating point operations and argument passing are soft.
     CmdArgs.push_back("-msoft-float");
     CmdArgs.push_back("-mfloat-abi");
     CmdArgs.push_back("soft");
-
-    if (FloatABI == "hard" && IsMips16) {
-      CmdArgs.push_back("-mllvm");
-      CmdArgs.push_back("-mips16-hard-float");
-    }
   }
   else {
     // Floating point operations and argument passing are hard.
@@ -1308,10 +1292,9 @@
     return Is64Bit ? "core2" : "yonah";
   }
 
-  // All x86 devices running Android have core2 as their common
-  // denominator. This makes a better choice than pentium4.
+  // On Android use targets compatible with gcc
   if (Triple.getEnvironment() == llvm::Triple::Android)
-    return "core2";
+    return Is64Bit ? "x86-64" : "i686";
 
   // Everything else goes to x86-64 in 64-bit mode.
   if (Is64Bit)
@@ -1338,11 +1321,14 @@
     return "";
 
   case llvm::Triple::aarch64:
+  case llvm::Triple::aarch64_be:
     return getAArch64TargetCPU(Args, T);
 
   case llvm::Triple::arm:
+  case llvm::Triple::armeb:
   case llvm::Triple::thumb:
-    return getARMTargetCPU(Args, T);
+  case llvm::Triple::thumbeb:
+    return arm::getARMTargetCPU(Args, T);
 
   case llvm::Triple::mips:
   case llvm::Triple::mipsel:
@@ -1373,7 +1359,8 @@
   }
 
   case llvm::Triple::sparc:
-    if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+  case llvm::Triple::sparcv9:
+    if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
       return A->getValue();
     return "";
 
@@ -1392,6 +1379,24 @@
   }
 }
 
+static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
+                          ArgStringList &CmdArgs) {
+  // Tell the linker to load the plugin. This has to come before AddLinkerInputs
+  // as gold requires -plugin to come before any -plugin-opt that -Wl might
+  // forward.
+  CmdArgs.push_back("-plugin");
+  std::string Plugin = ToolChain.getDriver().Dir + "/../lib/LLVMgold.so";
+  CmdArgs.push_back(Args.MakeArgString(Plugin));
+
+  // Try to pass driver level flags relevant to LTO code generation down to
+  // the plugin.
+
+  // Handle flags for selecting CPU variants.
+  std::string CPU = getCPUName(Args, ToolChain.getTriple());
+  if (!CPU.empty())
+    CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=mcpu=") + CPU));
+}
+
 static void getX86TargetFeatures(const llvm::Triple &Triple,
                                  const ArgList &Args,
                                  std::vector<const char *> &Features) {
@@ -1406,6 +1411,11 @@
     Features.push_back("-fsgsbase");
   }
 
+  if (Triple.getEnvironment() == llvm::Triple::Android) {
+    // Add sse3 feature to comply with gcc on Android
+    Features.push_back("+sse3");
+  }
+
   // Now add any that the user explicitly requested on the command line,
   // which may override the defaults.
   for (arg_iterator it = Args.filtered_begin(options::OPT_m_x86_Features_Group),
@@ -1501,10 +1511,19 @@
   // Honor -mfpu=.
   if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
     getAArch64FPUFeatures(D, A, Args, Features);
+  else
+    Features.push_back("+neon");
+
+  if (Args.getLastArg(options::OPT_mgeneral_regs_only)) {
+    Features.push_back("-fp-armv8");
+    Features.push_back("-crypto");
+    Features.push_back("-neon");
+  }
 }
 
 static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
-                              const ArgList &Args, ArgStringList &CmdArgs) {
+                              const ArgList &Args, ArgStringList &CmdArgs,
+                              bool ForAS) {
   std::vector<const char *> Features;
   switch (Triple.getArch()) {
   default:
@@ -1517,8 +1536,10 @@
     break;
 
   case llvm::Triple::arm:
+  case llvm::Triple::armeb:
   case llvm::Triple::thumb:
-    getARMTargetFeatures(D, Triple, Args, Features);
+  case llvm::Triple::thumbeb:
+    getARMTargetFeatures(D, Triple, Args, Features, ForAS);
     break;
 
   case llvm::Triple::ppc:
@@ -1530,6 +1551,7 @@
     getSparcTargetFeatures(Args, Features);
     break;
   case llvm::Triple::aarch64:
+  case llvm::Triple::aarch64_be:
     getAArch64TargetFeatures(D, Args, Features);
     break;
   case llvm::Triple::x86:
@@ -1569,7 +1591,7 @@
   if (runtime.isNonFragile())
     return true;
 
-  if (!Triple.isOSDarwin())
+  if (!Triple.isMacOSX())
     return false;
 
   return (!Triple.isMacOSXVersionLT(10,5) &&
@@ -1577,6 +1599,44 @@
            Triple.getArch() == llvm::Triple::arm));
 }
 
+namespace {
+  struct ExceptionSettings {
+    bool ExceptionsEnabled;
+    bool ShouldUseExceptionTables;
+    ExceptionSettings() : ExceptionsEnabled(false),
+                          ShouldUseExceptionTables(false) {}
+  };
+} // end anonymous namespace.
+
+// exceptionSettings() exists to share the logic between -cc1 and linker invocations.
+static ExceptionSettings exceptionSettings(const ArgList &Args,
+                                           const llvm::Triple &Triple) {
+  ExceptionSettings ES;
+
+  // Are exceptions enabled by default?
+  ES.ExceptionsEnabled = (Triple.getArch() != llvm::Triple::xcore);
+
+  // This keeps track of whether exceptions were explicitly turned on or off.
+  bool DidHaveExplicitExceptionFlag = false;
+
+  if (Arg *A = Args.getLastArg(options::OPT_fexceptions,
+                               options::OPT_fno_exceptions)) {
+    if (A->getOption().matches(options::OPT_fexceptions))
+      ES.ExceptionsEnabled = true;
+    else
+      ES.ExceptionsEnabled = false;
+
+    DidHaveExplicitExceptionFlag = true;
+  }
+
+  // Exception tables and cleanups can be enabled with -fexceptions even if the
+  // language itself doesn't support exceptions.
+  if (ES.ExceptionsEnabled && DidHaveExplicitExceptionFlag)
+    ES.ShouldUseExceptionTables = true;
+
+  return ES;
+}
+
 /// addExceptionArgs - Adds exception related arguments to the driver command
 /// arguments. There's a master flag, -fexceptions and also language specific
 /// flags to enable/disable C++ and Objective-C exceptions.
@@ -1599,28 +1659,8 @@
     return;
   }
 
-  // Exceptions are enabled by default.
-  bool ExceptionsEnabled = true;
-
-  // This keeps track of whether exceptions were explicitly turned on or off.
-  bool DidHaveExplicitExceptionFlag = false;
-
-  if (Arg *A = Args.getLastArg(options::OPT_fexceptions,
-                               options::OPT_fno_exceptions)) {
-    if (A->getOption().matches(options::OPT_fexceptions))
-      ExceptionsEnabled = true;
-    else
-      ExceptionsEnabled = false;
-
-    DidHaveExplicitExceptionFlag = true;
-  }
-
-  bool ShouldUseExceptionTables = false;
-
-  // Exception tables and cleanups can be enabled with -fexceptions even if the
-  // language itself doesn't support exceptions.
-  if (ExceptionsEnabled && DidHaveExplicitExceptionFlag)
-    ShouldUseExceptionTables = true;
+   // Gather the exception settings from the command line arguments.
+   ExceptionSettings ES = exceptionSettings(Args, Triple);
 
   // Obj-C exceptions are enabled by default, regardless of -fexceptions. This
   // is not necessarily sensible, but follows GCC.
@@ -1630,12 +1670,12 @@
                    true)) {
     CmdArgs.push_back("-fobjc-exceptions");
 
-    ShouldUseExceptionTables |=
+    ES.ShouldUseExceptionTables |=
       shouldUseExceptionTablesForObjCExceptions(objcRuntime, Triple);
   }
 
   if (types::isCXX(InputType)) {
-    bool CXXExceptionsEnabled = ExceptionsEnabled;
+    bool CXXExceptionsEnabled = ES.ExceptionsEnabled;
 
     if (Arg *A = Args.getLastArg(options::OPT_fcxx_exceptions,
                                  options::OPT_fno_cxx_exceptions,
@@ -1650,11 +1690,11 @@
     if (CXXExceptionsEnabled) {
       CmdArgs.push_back("-fcxx-exceptions");
 
-      ShouldUseExceptionTables = true;
+      ES.ShouldUseExceptionTables = true;
     }
   }
 
-  if (ShouldUseExceptionTables)
+  if (ES.ShouldUseExceptionTables)
     CmdArgs.push_back("-fexceptions");
 }
 
@@ -1742,6 +1782,7 @@
 
     // When using an integrated assembler, translate -Wa, and -Xassembler
     // options.
+    bool CompressDebugSections = false;
     for (arg_iterator it = Args.filtered_begin(options::OPT_Wa_COMMA,
                                                options::OPT_Xassembler),
            ie = Args.filtered_end(); it != ie; ++it) {
@@ -1765,6 +1806,12 @@
           CmdArgs.push_back("-fatal-assembler-warnings");
         } else if (Value == "--noexecstack") {
           CmdArgs.push_back("-mnoexecstack");
+        } else if (Value == "-compress-debug-sections" ||
+                   Value == "--compress-debug-sections") {
+          CompressDebugSections = true;
+        } else if (Value == "-nocompress-debug-sections" ||
+                   Value == "--nocompress-debug-sections") {
+          CompressDebugSections = false;
         } else if (Value.startswith("-I")) {
           CmdArgs.push_back(Value.data());
           // We need to consume the next argument if the current arg is a plain
@@ -1777,36 +1824,88 @@
         }
       }
     }
+    if (CompressDebugSections) {
+      if (llvm::zlib::isAvailable())
+        CmdArgs.push_back("-compress-debug-sections");
+      else
+        D.Diag(diag::warn_debug_compression_unavailable);
+    }
 }
 
-static void addProfileRTLinux(
+// Until ARM libraries are build separately, we have them all in one library
+static StringRef getArchNameForCompilerRTLib(const ToolChain &TC) {
+  if (TC.getArch() == llvm::Triple::arm ||
+      TC.getArch() == llvm::Triple::armeb)
+    return "arm";
+  else
+    return TC.getArchName();
+}
+
+static SmallString<128> getCompilerRTLibDir(const ToolChain &TC) {
+  // The runtimes are located in the OS-specific resource directory.
+  SmallString<128> Res(TC.getDriver().ResourceDir);
+  const llvm::Triple &Triple = TC.getTriple();
+  // TC.getOS() yield "freebsd10.0" whereas "freebsd" is expected.
+  StringRef OSLibName = (Triple.getOS() == llvm::Triple::FreeBSD) ?
+    "freebsd" : TC.getOS();
+  llvm::sys::path::append(Res, "lib", OSLibName);
+  return Res;
+}
+
+// This adds the static libclang_rt.arch.a directly to the command line
+// FIXME: Make sure we can also emit shared objects if they're requested
+// and available, check for possible errors, etc.
+static void addClangRTLinux(
+    const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) {
+  SmallString<128> LibClangRT = getCompilerRTLibDir(TC);
+  llvm::sys::path::append(LibClangRT,
+      Twine("libclang_rt.") + getArchNameForCompilerRTLib(TC) + ".a");
+
+  CmdArgs.push_back(Args.MakeArgString(LibClangRT));
+  CmdArgs.push_back("-lgcc_s");
+  if (TC.getDriver().CCCIsCXX())
+    CmdArgs.push_back("-lgcc_eh");
+}
+
+static void addProfileRT(
     const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) {
   if (!(Args.hasArg(options::OPT_fprofile_arcs) ||
         Args.hasArg(options::OPT_fprofile_generate) ||
+        Args.hasArg(options::OPT_fprofile_instr_generate) ||
         Args.hasArg(options::OPT_fcreate_profile) ||
         Args.hasArg(options::OPT_coverage)))
     return;
 
-  // The profile runtime is located in the Linux library directory and has name
-  // "libclang_rt.profile-<ArchName>.a".
-  SmallString<128> LibProfile(TC.getDriver().ResourceDir);
-  llvm::sys::path::append(
-      LibProfile, "lib", "linux",
-      Twine("libclang_rt.profile-") + TC.getArchName() + ".a");
+  SmallString<128> LibProfile = getCompilerRTLibDir(TC);
+  llvm::sys::path::append(LibProfile,
+      Twine("libclang_rt.profile-") + getArchNameForCompilerRTLib(TC) + ".a");
 
   CmdArgs.push_back(Args.MakeArgString(LibProfile));
 }
 
-static void addSanitizerRTLinkFlagsLinux(
-    const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs,
-    const StringRef Sanitizer, bool BeforeLibStdCXX,
-    bool ExportSymbols = true) {
-  // Sanitizer runtime is located in the Linux library directory and
-  // has name "libclang_rt.<Sanitizer>-<ArchName>.a".
-  SmallString<128> LibSanitizer(TC.getDriver().ResourceDir);
-  llvm::sys::path::append(
-      LibSanitizer, "lib", "linux",
-      (Twine("libclang_rt.") + Sanitizer + "-" + TC.getArchName() + ".a"));
+static SmallString<128> getSanitizerRTLibName(const ToolChain &TC,
+                                              const StringRef Sanitizer,
+                                              bool Shared) {
+  // Sanitizer runtime has name "libclang_rt.<Sanitizer>-<ArchName>.{a,so}"
+  // (or "libclang_rt.<Sanitizer>-<ArchName>-android.so for Android)
+  const char *EnvSuffix =
+    TC.getTriple().getEnvironment() == llvm::Triple::Android ?  "-android" : "";
+  SmallString<128> LibSanitizer = getCompilerRTLibDir(TC);
+  llvm::sys::path::append(LibSanitizer,
+                          Twine("libclang_rt.") + Sanitizer + "-" +
+                              getArchNameForCompilerRTLib(TC) + EnvSuffix +
+                              (Shared ? ".so" : ".a"));
+  return LibSanitizer;
+}
+
+static void addSanitizerRTLinkFlags(const ToolChain &TC, const ArgList &Args,
+                                    ArgStringList &CmdArgs,
+                                    const StringRef Sanitizer,
+                                    bool BeforeLibStdCXX,
+                                    bool ExportSymbols = true,
+                                    bool LinkDeps = true) {
+  SmallString<128> LibSanitizer =
+      getSanitizerRTLibName(TC, Sanitizer, /*Shared*/ false);
 
   // Sanitizer runtime may need to come before -lstdc++ (or -lc++, libstdc++.a,
   // etc.) so that the linker picks custom versions of the global 'operator
@@ -1822,10 +1921,15 @@
   CmdArgs.insert(BeforeLibStdCXX ? CmdArgs.begin() : CmdArgs.end(),
                  LibSanitizerArgs.begin(), LibSanitizerArgs.end());
 
-  CmdArgs.push_back("-lpthread");
-  CmdArgs.push_back("-lrt");
-  CmdArgs.push_back("-ldl");
-  CmdArgs.push_back("-lm");
+  if (LinkDeps) {
+    // Link sanitizer dependencies explicitly
+    CmdArgs.push_back("-lpthread");
+    CmdArgs.push_back("-lrt");
+    CmdArgs.push_back("-lm");
+    // There's no libdl on FreeBSD.
+    if (TC.getTriple().getOS() != llvm::Triple::FreeBSD)
+      CmdArgs.push_back("-ldl");
+  }
 
   // If possible, use a dynamic symbols file to export the symbols from the
   // runtime library. If we can't do so, use -export-dynamic instead to export
@@ -1841,66 +1945,93 @@
 
 /// If AddressSanitizer is enabled, add appropriate linker flags (Linux).
 /// This needs to be called before we add the C run-time (malloc, etc).
-static void addAsanRTLinux(const ToolChain &TC, const ArgList &Args,
-                           ArgStringList &CmdArgs) {
-  if (TC.getTriple().getEnvironment() == llvm::Triple::Android) {
-    SmallString<128> LibAsan(TC.getDriver().ResourceDir);
-    llvm::sys::path::append(LibAsan, "lib", "linux",
-        (Twine("libclang_rt.asan-") +
-            TC.getArchName() + "-android.so"));
-    CmdArgs.insert(CmdArgs.begin(), Args.MakeArgString(LibAsan));
-  } else {
-    if (!Args.hasArg(options::OPT_shared))
-      addSanitizerRTLinkFlagsLinux(TC, Args, CmdArgs, "asan", true);
+static void addAsanRT(const ToolChain &TC, const ArgList &Args,
+                      ArgStringList &CmdArgs, bool Shared) {
+  if (Shared) {
+    // Link dynamic runtime if necessary.
+    SmallString<128> LibSanitizer =
+        getSanitizerRTLibName(TC, "asan", Shared);
+    CmdArgs.insert(CmdArgs.begin(), Args.MakeArgString(LibSanitizer));
   }
+
+  // Do not link static runtime to DSOs or if compiling for Android.
+  if (Args.hasArg(options::OPT_shared) ||
+      (TC.getTriple().getEnvironment() == llvm::Triple::Android))
+    return;
+
+  const char *LibAsanStaticPart = Shared ? "asan-preinit" : "asan";
+  addSanitizerRTLinkFlags(TC, Args, CmdArgs, LibAsanStaticPart,
+                          /*BeforeLibStdCXX*/ true, /*ExportSymbols*/ !Shared,
+                          /*LinkDeps*/ !Shared);
 }
 
 /// If ThreadSanitizer is enabled, add appropriate linker flags (Linux).
 /// This needs to be called before we add the C run-time (malloc, etc).
-static void addTsanRTLinux(const ToolChain &TC, const ArgList &Args,
-                           ArgStringList &CmdArgs) {
+static void addTsanRT(const ToolChain &TC, const ArgList &Args,
+                      ArgStringList &CmdArgs) {
   if (!Args.hasArg(options::OPT_shared))
-    addSanitizerRTLinkFlagsLinux(TC, Args, CmdArgs, "tsan", true);
+    addSanitizerRTLinkFlags(TC, Args, CmdArgs, "tsan", true);
 }
 
 /// If MemorySanitizer is enabled, add appropriate linker flags (Linux).
 /// This needs to be called before we add the C run-time (malloc, etc).
-static void addMsanRTLinux(const ToolChain &TC, const ArgList &Args,
-                           ArgStringList &CmdArgs) {
+static void addMsanRT(const ToolChain &TC, const ArgList &Args,
+                      ArgStringList &CmdArgs) {
   if (!Args.hasArg(options::OPT_shared))
-    addSanitizerRTLinkFlagsLinux(TC, Args, CmdArgs, "msan", true);
+    addSanitizerRTLinkFlags(TC, Args, CmdArgs, "msan", true);
 }
 
 /// If LeakSanitizer is enabled, add appropriate linker flags (Linux).
 /// This needs to be called before we add the C run-time (malloc, etc).
-static void addLsanRTLinux(const ToolChain &TC, const ArgList &Args,
-                           ArgStringList &CmdArgs) {
+static void addLsanRT(const ToolChain &TC, const ArgList &Args,
+                      ArgStringList &CmdArgs) {
   if (!Args.hasArg(options::OPT_shared))
-    addSanitizerRTLinkFlagsLinux(TC, Args, CmdArgs, "lsan", true);
+    addSanitizerRTLinkFlags(TC, Args, CmdArgs, "lsan", true);
 }
 
 /// If UndefinedBehaviorSanitizer is enabled, add appropriate linker flags
 /// (Linux).
-static void addUbsanRTLinux(const ToolChain &TC, const ArgList &Args,
-                            ArgStringList &CmdArgs, bool IsCXX,
-                            bool HasOtherSanitizerRt) {
+static void addUbsanRT(const ToolChain &TC, const ArgList &Args,
+                       ArgStringList &CmdArgs, bool IsCXX,
+                       bool HasOtherSanitizerRt) {
   // Need a copy of sanitizer_common. This could come from another sanitizer
   // runtime; if we're not including one, include our own copy.
   if (!HasOtherSanitizerRt)
-    addSanitizerRTLinkFlagsLinux(TC, Args, CmdArgs, "san", true, false);
+    addSanitizerRTLinkFlags(TC, Args, CmdArgs, "san", true, false);
 
-  addSanitizerRTLinkFlagsLinux(TC, Args, CmdArgs, "ubsan", false);
+  addSanitizerRTLinkFlags(TC, Args, CmdArgs, "ubsan", false);
 
   // Only include the bits of the runtime which need a C++ ABI library if
   // we're linking in C++ mode.
   if (IsCXX)
-    addSanitizerRTLinkFlagsLinux(TC, Args, CmdArgs, "ubsan_cxx", false);
+    addSanitizerRTLinkFlags(TC, Args, CmdArgs, "ubsan_cxx", false);
 }
 
-static void addDfsanRTLinux(const ToolChain &TC, const ArgList &Args,
-                            ArgStringList &CmdArgs) {
+static void addDfsanRT(const ToolChain &TC, const ArgList &Args,
+                       ArgStringList &CmdArgs) {
   if (!Args.hasArg(options::OPT_shared))
-    addSanitizerRTLinkFlagsLinux(TC, Args, CmdArgs, "dfsan", true);
+    addSanitizerRTLinkFlags(TC, Args, CmdArgs, "dfsan", true);
+}
+
+// Should be called before we add C++ ABI library.
+static void addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
+                                 ArgStringList &CmdArgs) {
+  const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
+  const Driver &D = TC.getDriver();
+  if (Sanitize.needsUbsanRt())
+    addUbsanRT(TC, Args, CmdArgs, D.CCCIsCXX(),
+                    Sanitize.needsAsanRt() || Sanitize.needsTsanRt() ||
+                    Sanitize.needsMsanRt() || Sanitize.needsLsanRt());
+  if (Sanitize.needsAsanRt())
+    addAsanRT(TC, Args, CmdArgs, Sanitize.needsSharedAsanRt());
+  if (Sanitize.needsTsanRt())
+    addTsanRT(TC, Args, CmdArgs);
+  if (Sanitize.needsMsanRt())
+    addMsanRT(TC, Args, CmdArgs);
+  if (Sanitize.needsLsanRt())
+    addLsanRT(TC, Args, CmdArgs);
+  if (Sanitize.needsDfsanRt())
+    addDfsanRT(TC, Args, CmdArgs);
 }
 
 static bool shouldUseFramePointerForTarget(const ArgList &Args,
@@ -1995,13 +2126,6 @@
   C.addCommand(new Command(JA, T, Exec, StripArgs));
 }
 
-static bool isOptimizationLevelFast(const ArgList &Args) {
-  if (Arg *A = Args.getLastArg(options::OPT_O_Group))
-    if (A->getOption().matches(options::OPT_Ofast))
-      return true;
-  return false;
-}
-
 /// \brief Vectorize at all optimization levels greater than 1 except for -Oz.
 static bool shouldEnableVectorizerAtOLevel(const ArgList &Args) {
   if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
@@ -2033,6 +2157,21 @@
   return false;
 }
 
+/// Add -x lang to \p CmdArgs for \p Input.
+static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
+                             ArgStringList &CmdArgs) {
+  // When using -verify-pch, we don't want to provide the type
+  // 'precompiled-header' if it was inferred from the file extension
+  if (Args.hasArg(options::OPT_verify_pch) && Input.getType() == types::TY_PCH)
+    return;
+
+  CmdArgs.push_back("-x");
+  if (Args.hasArg(options::OPT_rewrite_objc))
+    CmdArgs.push_back(types::getTypeName(types::TY_PP_ObjCXX));
+  else
+    CmdArgs.push_back(types::getTypeName(Input.getType()));
+}
+
 void Clang::ConstructJob(Compilation &C, const JobAction &JA,
                          const InputInfo &Output,
                          const InputInfoList &Inputs,
@@ -2043,6 +2182,10 @@
   const Driver &D = getToolChain().getDriver();
   ArgStringList CmdArgs;
 
+  bool IsWindowsGNU = getToolChain().getTriple().isWindowsGNUEnvironment();
+  bool IsWindowsCygnus = getToolChain().getTriple().isWindowsCygwinEnvironment();
+  bool IsWindowsMSVC = getToolChain().getTriple().isWindowsMSVCEnvironment();
+
   assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
 
   // Invoke ourselves in -cc1 mode.
@@ -2055,6 +2198,11 @@
   std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
   CmdArgs.push_back(Args.MakeArgString(TripleStr));
 
+  // Push all default warning arguments that are specific to
+  // the given target.  These come before user provided warning options
+  // are provided.
+  getToolChain().addClangWarningOptions(CmdArgs);
+
   // Select the appropriate action.
   RewriteKind rewriteKind = RK_None;
   
@@ -2089,6 +2237,8 @@
       CmdArgs.push_back("-emit-pch");
     else
       CmdArgs.push_back("-emit-pth");
+  } else if (isa<VerifyPCHJobAction>(JA)) {
+    CmdArgs.push_back("-verify-pch");
   } else {
     assert(isa<CompileJobAction>(JA) && "Invalid action for clang tool.");
 
@@ -2149,7 +2299,7 @@
     if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
       CmdArgs.push_back("-analyzer-checker=core");
 
-      if (getToolChain().getTriple().getOS() != llvm::Triple::Win32)
+      if (!IsWindowsMSVC)
         CmdArgs.push_back("-analyzer-checker=unix");
 
       if (getToolChain().getTriple().getVendor() == llvm::Triple::Apple)
@@ -2192,6 +2342,31 @@
   bool PIC = PIE || getToolChain().isPICDefault();
   bool IsPICLevelTwo = PIC;
 
+  // Android-specific defaults for PIC/PIE
+  if (getToolChain().getTriple().getEnvironment() == llvm::Triple::Android) {
+    switch (getToolChain().getTriple().getArch()) {
+    case llvm::Triple::arm:
+    case llvm::Triple::armeb:
+    case llvm::Triple::thumb:
+    case llvm::Triple::thumbeb:
+    case llvm::Triple::mips:
+    case llvm::Triple::mipsel:
+    case llvm::Triple::mips64:
+    case llvm::Triple::mips64el:
+      PIC = true; // "-fpic"
+      break;
+
+    case llvm::Triple::x86:
+    case llvm::Triple::x86_64:
+      PIC = true; // "-fPIC"
+      IsPICLevelTwo = true;
+      break;
+
+    default:
+      break;
+    }
+  }
+
   // For the PIC and PIE flag options, this logic is different from the
   // legacy logic in very old versions of GCC, as that logic was just
   // a bug no one had ever fixed. This logic is both more rational and
@@ -2232,8 +2407,8 @@
   // Note that these flags are trump-cards. Regardless of the order w.r.t. the
   // PIC or PIE options above, if these show up, PIC is disabled.
   llvm::Triple Triple(TripleStr);
-  if (KernelOrKext &&
-      (!Triple.isiOS() || Triple.isOSVersionLT(6)))
+  if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6) ||
+                       Triple.getArch() == llvm::Triple::arm64))
     PIC = PIE = false;
   if (Args.hasArg(options::OPT_static))
     PIC = PIE = false;
@@ -2465,12 +2640,17 @@
 
   // Decide whether to use verbose asm. Verbose assembly is the default on
   // toolchains which have the integrated assembler on by default.
-  bool IsVerboseAsmDefault = getToolChain().IsIntegratedAssemblerDefault();
+  bool IsIntegratedAssemblerDefault =
+      getToolChain().IsIntegratedAssemblerDefault();
   if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
-                   IsVerboseAsmDefault) ||
+                   IsIntegratedAssemblerDefault) ||
       Args.hasArg(options::OPT_dA))
     CmdArgs.push_back("-masm-verbose");
 
+  if (!Args.hasFlag(options::OPT_fintegrated_as, options::OPT_fno_integrated_as,
+                    IsIntegratedAssemblerDefault))
+    CmdArgs.push_back("-no-integrated-as");
+
   if (Args.hasArg(options::OPT_fdebug_pass_structure)) {
     CmdArgs.push_back("-mdebug-pass");
     CmdArgs.push_back("Structure");
@@ -2498,10 +2678,11 @@
   // -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
   // complicated ways.
   bool AsynchronousUnwindTables =
-    Args.hasFlag(options::OPT_fasynchronous_unwind_tables,
-                 options::OPT_fno_asynchronous_unwind_tables,
-                 getToolChain().IsUnwindTablesDefault() &&
-                 !KernelOrKext);
+      Args.hasFlag(options::OPT_fasynchronous_unwind_tables,
+                   options::OPT_fno_asynchronous_unwind_tables,
+                   (getToolChain().IsUnwindTablesDefault() ||
+                    getToolChain().getSanitizerArgs().needsUnwindTables()) &&
+                       !KernelOrKext);
   if (Args.hasFlag(options::OPT_funwind_tables, options::OPT_fno_unwind_tables,
                    AsynchronousUnwindTables))
     CmdArgs.push_back("-munwind-tables");
@@ -2536,7 +2717,7 @@
   }
 
   // Add the target features
-  getTargetFeatures(D, ETriple, Args, CmdArgs);
+  getTargetFeatures(D, ETriple, Args, CmdArgs, false);
 
   // Add target specific flags.
   switch(getToolChain().getArch()) {
@@ -2544,10 +2725,16 @@
     break;
 
   case llvm::Triple::arm:
+  case llvm::Triple::armeb:
   case llvm::Triple::thumb:
+  case llvm::Triple::thumbeb:
     AddARMTargetArgs(Args, CmdArgs, KernelOrKext);
     break;
 
+  case llvm::Triple::arm64:
+    AddARM64TargetArgs(Args, CmdArgs);
+    break;
+
   case llvm::Triple::mips:
   case llvm::Triple::mipsel:
   case llvm::Triple::mips64:
@@ -2613,13 +2800,18 @@
                       D.CCLogDiagnosticsFilename : "-");
   }
 
-  // Use the last option from "-g" group. "-gline-tables-only"
-  // is preserved, all other debug options are substituted with "-g".
+  // Use the last option from "-g" group. "-gline-tables-only" and "-gdwarf-x"
+  // are preserved, all other debug options are substituted with "-g".
   Args.ClaimAllArgs(options::OPT_g_Group);
   if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
-    if (A->getOption().matches(options::OPT_gline_tables_only))
+    if (A->getOption().matches(options::OPT_gline_tables_only)) {
+      // FIXME: we should support specifying dwarf version with
+      // -gline-tables-only.
       CmdArgs.push_back("-gline-tables-only");
-    else if (A->getOption().matches(options::OPT_gdwarf_2))
+      // Default is dwarf-2 for darwin.
+      if (getToolChain().getTriple().isOSDarwin())
+        CmdArgs.push_back("-gdwarf-2");
+    } else if (A->getOption().matches(options::OPT_gdwarf_2))
       CmdArgs.push_back("-gdwarf-2");
     else if (A->getOption().matches(options::OPT_gdwarf_3))
       CmdArgs.push_back("-gdwarf-3");
@@ -2657,13 +2849,44 @@
     CmdArgs.push_back("-generate-gnu-dwarf-pub-sections");
   }
 
-  Args.AddAllArgs(CmdArgs, options::OPT_fdebug_types_section);
+  // -gdwarf-aranges turns on the emission of the aranges section in the
+  // backend.
+  if (Args.hasArg(options::OPT_gdwarf_aranges)) {
+    CmdArgs.push_back("-backend-option");
+    CmdArgs.push_back("-generate-arange-section");
+  }
 
-  Args.AddAllArgs(CmdArgs, options::OPT_ffunction_sections);
-  Args.AddAllArgs(CmdArgs, options::OPT_fdata_sections);
+  if (Args.hasFlag(options::OPT_fdebug_types_section,
+                   options::OPT_fno_debug_types_section, false)) {
+    CmdArgs.push_back("-backend-option");
+    CmdArgs.push_back("-generate-type-units");
+  }
+
+  if (Args.hasFlag(options::OPT_ffunction_sections,
+                   options::OPT_fno_function_sections, false)) {
+    CmdArgs.push_back("-ffunction-sections");
+  }
+
+  if (Args.hasFlag(options::OPT_fdata_sections,
+                   options::OPT_fno_data_sections, false)) {
+    CmdArgs.push_back("-fdata-sections");
+  }
 
   Args.AddAllArgs(CmdArgs, options::OPT_finstrument_functions);
 
+  if (Args.hasArg(options::OPT_fprofile_instr_generate) &&
+      (Args.hasArg(options::OPT_fprofile_instr_use) ||
+       Args.hasArg(options::OPT_fprofile_instr_use_EQ)))
+    D.Diag(diag::err_drv_argument_not_allowed_with)
+      << "-fprofile-instr-generate" << "-fprofile-instr-use";
+
+  Args.AddAllArgs(CmdArgs, options::OPT_fprofile_instr_generate);
+
+  if (Arg *A = Args.getLastArg(options::OPT_fprofile_instr_use_EQ))
+    A->render(Args, CmdArgs);
+  else if (Args.hasArg(options::OPT_fprofile_instr_use))
+    CmdArgs.push_back("-fprofile-instr-use=pgo-data");
+
   if (Args.hasArg(options::OPT_ftest_coverage) ||
       Args.hasArg(options::OPT_coverage))
     CmdArgs.push_back("-femit-coverage-notes");
@@ -2769,7 +2992,8 @@
     Args.AddLastArg(CmdArgs, options::OPT_objcmt_atomic_property);
     Args.AddLastArg(CmdArgs, options::OPT_objcmt_returns_innerpointer_property);
     Args.AddLastArg(CmdArgs, options::OPT_objcmt_ns_nonatomic_iosonly);
-    Args.AddLastArg(CmdArgs, options::OPT_objcmt_white_list_dir_path);
+    Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_designated_init);
+    Args.AddLastArg(CmdArgs, options::OPT_objcmt_whitelist_dir_path);
   }
 
   // Add preprocessing options like -I, -D, etc. if we are using the
@@ -2833,7 +3057,7 @@
     if (!types::isCXX(InputType))
       Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
                                 "-std=", /*Joined=*/true);
-    else if (getToolChain().getTriple().getOS() == llvm::Triple::Win32)
+    else if (IsWindowsMSVC)
       CmdArgs.push_back("-std=c++11");
 
     Args.AddLastArg(CmdArgs, options::OPT_trigraphs);
@@ -2849,9 +3073,13 @@
   // behavior for now. FIXME: Directly diagnose uses of a string literal as
   // a non-const char* in C, rather than using this crude hack.
   if (!types::isCXX(InputType)) {
-    DiagnosticsEngine::Level DiagLevel = D.getDiags().getDiagnosticLevel(
-        diag::warn_deprecated_string_literal_conversion_c, SourceLocation());
-    if (DiagLevel > DiagnosticsEngine::Ignored)
+    // FIXME: This should behave just like a warning flag, and thus should also
+    // respect -Weverything, -Wno-everything, -Werror=write-strings, and so on.
+    Arg *WriteStrings =
+        Args.getLastArg(options::OPT_Wwrite_strings,
+                        options::OPT_Wno_write_strings, options::OPT_w);
+    if (WriteStrings &&
+        WriteStrings->getOption().matches(options::OPT_Wwrite_strings))
       CmdArgs.push_back("-fconst-strings");
   }
 
@@ -2992,8 +3220,8 @@
   // Forward -f (flag) options which we can pass directly.
   Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
   Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
-  Args.AddLastArg(CmdArgs, options::OPT_flimit_debug_info);
-  Args.AddLastArg(CmdArgs, options::OPT_fno_limit_debug_info);
+  Args.AddLastArg(CmdArgs, options::OPT_fstandalone_debug);
+  Args.AddLastArg(CmdArgs, options::OPT_fno_standalone_debug);
   Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
   // AltiVec language extensions aren't relevant for assembling.
   if (!isa<PreprocessJobAction>(JA) || 
@@ -3010,8 +3238,7 @@
                     true))
     CmdArgs.push_back("-fno-sanitize-recover");
 
-  if (Args.hasArg(options::OPT_fcatch_undefined_behavior) ||
-      Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error,
+  if (Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error,
                    options::OPT_fno_sanitize_undefined_trap_on_error, false))
     CmdArgs.push_back("-fsanitize-undefined-trap-on-error");
 
@@ -3079,11 +3306,14 @@
   unsigned StackProtectorLevel = 0;
   if (Arg *A = Args.getLastArg(options::OPT_fno_stack_protector,
                                options::OPT_fstack_protector_all,
+                               options::OPT_fstack_protector_strong,
                                options::OPT_fstack_protector)) {
     if (A->getOption().matches(options::OPT_fstack_protector))
-      StackProtectorLevel = 1;
+      StackProtectorLevel = LangOptions::SSPOn;
+    else if (A->getOption().matches(options::OPT_fstack_protector_strong))
+      StackProtectorLevel = LangOptions::SSPStrong;
     else if (A->getOption().matches(options::OPT_fstack_protector_all))
-      StackProtectorLevel = 2;
+      StackProtectorLevel = LangOptions::SSPReq;
   } else {
     StackProtectorLevel =
       getToolChain().GetDefaultStackProtectorLevel(KernelOrKext);
@@ -3247,11 +3477,30 @@
     CmdArgs.push_back(Args.MakeArgString(DefaultModuleCache));
   }
 
+  if (Arg *A = Args.getLastArg(options::OPT_fmodules_user_build_path)) {
+    A->claim();
+    if (HaveModules) {
+      A->render(Args, CmdArgs);
+    }
+  }
+
   // Pass through all -fmodules-ignore-macro arguments.
   Args.AddAllArgs(CmdArgs, options::OPT_fmodules_ignore_macro);
   Args.AddLastArg(CmdArgs, options::OPT_fmodules_prune_interval);
   Args.AddLastArg(CmdArgs, options::OPT_fmodules_prune_after);
 
+  Args.AddLastArg(CmdArgs, options::OPT_fbuild_session_timestamp);
+
+  if (Args.getLastArg(options::OPT_fmodules_validate_once_per_build_session)) {
+    if (!Args.getLastArg(options::OPT_fbuild_session_timestamp))
+      D.Diag(diag::err_drv_modules_validate_once_requires_timestamp);
+
+    Args.AddLastArg(CmdArgs,
+                    options::OPT_fmodules_validate_once_per_build_session);
+  }
+
+  Args.AddLastArg(CmdArgs, options::OPT_fmodules_validate_system_headers);
+
   // -faccess-control is default.
   if (Args.hasFlag(options::OPT_fno_access_control,
                    options::OPT_faccess_control,
@@ -3298,33 +3547,30 @@
     CmdArgs.push_back("-fno-threadsafe-statics");
 
   // -fuse-cxa-atexit is default.
-  if (!Args.hasFlag(
-           options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
-           getToolChain().getTriple().getOS() != llvm::Triple::Cygwin &&
-               getToolChain().getTriple().getOS() != llvm::Triple::MinGW32 &&
-               getToolChain().getArch() != llvm::Triple::hexagon &&
-               getToolChain().getArch() != llvm::Triple::xcore) ||
+  if (!Args.hasFlag(options::OPT_fuse_cxa_atexit,
+                    options::OPT_fno_use_cxa_atexit,
+                    !IsWindowsCygnus && !IsWindowsGNU &&
+                    getToolChain().getArch() != llvm::Triple::hexagon &&
+                    getToolChain().getArch() != llvm::Triple::xcore) ||
       KernelOrKext)
     CmdArgs.push_back("-fno-use-cxa-atexit");
 
   // -fms-extensions=0 is default.
   if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
-                   getToolChain().getTriple().getOS() == llvm::Triple::Win32))
+                   IsWindowsMSVC))
     CmdArgs.push_back("-fms-extensions");
 
   // -fms-compatibility=0 is default.
   if (Args.hasFlag(options::OPT_fms_compatibility, 
                    options::OPT_fno_ms_compatibility,
-                   (getToolChain().getTriple().getOS() == llvm::Triple::Win32 &&
-                    Args.hasFlag(options::OPT_fms_extensions, 
-                                 options::OPT_fno_ms_extensions,
-                                 true))))
+                   (IsWindowsMSVC && Args.hasFlag(options::OPT_fms_extensions,
+                                                  options::OPT_fno_ms_extensions,
+                                                  true))))
     CmdArgs.push_back("-fms-compatibility");
 
   // -fmsc-version=1700 is default.
   if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
-                   getToolChain().getTriple().getOS() == llvm::Triple::Win32) ||
-      Args.hasArg(options::OPT_fmsc_version)) {
+                   IsWindowsMSVC) || Args.hasArg(options::OPT_fmsc_version)) {
     StringRef msc_ver = Args.getLastArgValue(options::OPT_fmsc_version);
     if (msc_ver.empty())
       CmdArgs.push_back("-fmsc-version=1700");
@@ -3341,8 +3587,7 @@
   // -fno-delayed-template-parsing is default, except for Windows where MSVC STL
   // needs it.
   if (Args.hasFlag(options::OPT_fdelayed_template_parsing,
-                   options::OPT_fno_delayed_template_parsing,
-                   getToolChain().getTriple().getOS() == llvm::Triple::Win32))
+                   options::OPT_fno_delayed_template_parsing, IsWindowsMSVC))
     CmdArgs.push_back("-fdelayed-template-parsing");
 
   // -fgnu-keywords default varies depending on language; only pass if
@@ -3365,9 +3610,10 @@
   ObjCRuntime objcRuntime = AddObjCRuntimeArgs(Args, CmdArgs, rewriteKind);
 
   // -fobjc-dispatch-method is only relevant with the nonfragile-abi, and
-  // legacy is the default. Next runtime is always legacy dispatch and
-  // -fno-objc-legacy-dispatch gets ignored silently.
-  if (objcRuntime.isNonFragile() && !objcRuntime.isNeXTFamily()) {
+  // legacy is the default. Except for deployment taget of 10.5,
+  // next runtime is always legacy dispatch and -fno-objc-legacy-dispatch
+  // gets ignored silently.
+  if (objcRuntime.isNonFragile()) {
     if (!Args.hasFlag(options::OPT_fobjc_legacy_dispatch,
                       options::OPT_fno_objc_legacy_dispatch,
                       objcRuntime.isLegacyDispatchDefaultForArch(
@@ -3465,7 +3711,8 @@
 
   // -fshort-wchar default varies depending on platform; only
   // pass if specified.
-  if (Arg *A = Args.getLastArg(options::OPT_fshort_wchar))
+  if (Arg *A = Args.getLastArg(options::OPT_fshort_wchar,
+                               options::OPT_fno_short_wchar))
     A->render(Args, CmdArgs);
 
   // -fno-pascal-strings is default, only pass non-default.
@@ -3726,11 +3973,9 @@
   for (InputInfoList::const_iterator
          it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
     const InputInfo &II = *it;
-    CmdArgs.push_back("-x");
-    if (Args.hasArg(options::OPT_rewrite_objc))
-      CmdArgs.push_back(types::getTypeName(types::TY_PP_ObjCXX));
-    else
-      CmdArgs.push_back(types::getTypeName(II.getType()));
+
+    addDashXForInput(Args, II, CmdArgs);
+
     if (II.isFilename())
       CmdArgs.push_back(II.getFilename());
     else
@@ -3772,7 +4017,8 @@
   }
 
   // Finally add the compile command to the compilation.
-  if (Args.hasArg(options::OPT__SLASH_fallback)) {
+  if (Args.hasArg(options::OPT__SLASH_fallback) &&
+      Output.getType() == types::TY_Object) {
     tools::visualstudio::Compile CL(getToolChain());
     Command *CLCommand = CL.GetCommand(C, JA, Output, Inputs, Args,
                                        LinkingOutput);
@@ -3979,13 +4225,49 @@
   // implemented in clang.
   CmdArgs.push_back("--dependent-lib=oldnames");
 
-  // FIXME: Make this default for the win32 triple.
-  CmdArgs.push_back("-cxx-abi");
-  CmdArgs.push_back("microsoft");
-
   if (Arg *A = Args.getLastArg(options::OPT_show_includes))
     A->render(Args, CmdArgs);
 
+  // RTTI is currently not supported, so disable it by default.
+  if (!Args.hasArg(options::OPT_frtti, options::OPT_fno_rtti))
+    CmdArgs.push_back("-fno-rtti");
+
+  // Let -ffunction-sections imply -fdata-sections.
+  if (Arg * A = Args.getLastArg(options::OPT_ffunction_sections,
+                                options::OPT_fno_function_sections))
+    if (A->getOption().matches(options::OPT_ffunction_sections))
+      CmdArgs.push_back("-fdata-sections");
+
+  const Driver &D = getToolChain().getDriver();
+  Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg);
+  Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb);
+  if (MostGeneralArg && BestCaseArg)
+    D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+        << MostGeneralArg->getAsString(Args) << BestCaseArg->getAsString(Args);
+
+  if (MostGeneralArg) {
+    Arg *SingleArg = Args.getLastArg(options::OPT__SLASH_vms);
+    Arg *MultipleArg = Args.getLastArg(options::OPT__SLASH_vmm);
+    Arg *VirtualArg = Args.getLastArg(options::OPT__SLASH_vmv);
+
+    Arg *FirstConflict = SingleArg ? SingleArg : MultipleArg;
+    Arg *SecondConflict = VirtualArg ? VirtualArg : MultipleArg;
+    if (FirstConflict && SecondConflict && FirstConflict != SecondConflict)
+      D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+          << FirstConflict->getAsString(Args)
+          << SecondConflict->getAsString(Args);
+
+    if (SingleArg)
+      CmdArgs.push_back("-fms-memptr-rep=single");
+    else if (MultipleArg)
+      CmdArgs.push_back("-fms-memptr-rep=multiple");
+    else
+      CmdArgs.push_back("-fms-memptr-rep=virtual");
+  }
+
+  if (Arg *A = Args.getLastArg(options::OPT_vtordisp_mode_EQ))
+    A->render(Args, CmdArgs);
+
   if (!Args.hasArg(options::OPT_fdiagnostics_format_EQ)) {
     CmdArgs.push_back("-fdiagnostics-format");
     if (Args.hasArg(options::OPT__SLASH_fallback))
@@ -4041,7 +4323,7 @@
 
   // Add the target features
   const Driver &D = getToolChain().getDriver();
-  getTargetFeatures(D, Triple, Args, CmdArgs);
+  getTargetFeatures(D, Triple, Args, CmdArgs, true);
 
   // Ignore explicit -force_cpusubtype_ALL option.
   (void) Args.hasArg(options::OPT_force__cpusubtype__ALL);
@@ -4092,6 +4374,16 @@
 
   // FIXME: Add -static support, once we have it.
 
+  // Consume all the warning flags. Usually this would be handled more
+  // gracefully by -cc1 (warning about unknown warning flags, etc) but -cc1as
+  // doesn't handle that so rather than warning about unused flags that are
+  // actually used, we'll lie by omission instead.
+  // FIXME: Stop lying and consume only the appropriate driver flags
+  for (arg_iterator it = Args.filtered_begin(options::OPT_W_Group),
+                    ie = Args.filtered_end();
+       it != ie; ++it)
+    (*it)->claim();
+
   CollectArgsForIntegratedAssembler(C, Args, CmdArgs,
                                     getToolChain().getDriver());
 
@@ -4251,11 +4543,6 @@
   CmdArgs.push_back("-E");
 }
 
-void gcc::Precompile::RenderExtraToolArgs(const JobAction &JA,
-                                          ArgStringList &CmdArgs) const {
-  // The type is good enough.
-}
-
 void gcc::Compile::RenderExtraToolArgs(const JobAction &JA,
                                        ArgStringList &CmdArgs) const {
   const Driver &D = getToolChain().getDriver();
@@ -4273,11 +4560,6 @@
   }
 }
 
-void gcc::Assemble::RenderExtraToolArgs(const JobAction &JA,
-                                        ArgStringList &CmdArgs) const {
-  CmdArgs.push_back("-c");
-}
-
 void gcc::Link::RenderExtraToolArgs(const JobAction &JA,
                                     ArgStringList &CmdArgs) const {
   // The types are (hopefully) good enough.
@@ -4529,7 +4811,168 @@
 }
 // Hexagon tools end.
 
-llvm::Triple::ArchType darwin::getArchTypeForDarwinArchName(StringRef Str) {
+/// getARMCPUForMArch - Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting
+//
+// FIXME: tblgen this.
+const char *arm::getARMCPUForMArch(const ArgList &Args,
+                                   const llvm::Triple &Triple) {
+  StringRef MArch;
+  if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+    // Otherwise, if we have -march= choose the base CPU for that arch.
+    MArch = A->getValue();
+  } else {
+    // Otherwise, use the Arch from the triple.
+    MArch = Triple.getArchName();
+  }
+
+  // Handle -march=native.
+  if (MArch == "native") {
+    std::string CPU = llvm::sys::getHostCPUName();
+    if (CPU != "generic") {
+      // Translate the native cpu into the architecture. The switch below will
+      // then chose the minimum cpu for that arch.
+      MArch = std::string("arm") + arm::getLLVMArchSuffixForARM(CPU);
+    }
+  }
+
+  if (Triple.getOS() == llvm::Triple::NetBSD) {
+    if (MArch == "armv6")
+      return "arm1176jzf-s";
+  }
+
+  const char *result = llvm::StringSwitch<const char *>(MArch)
+    .Cases("armv2", "armv2a","arm2")
+    .Case("armv3", "arm6")
+    .Case("armv3m", "arm7m")
+    .Case("armv4", "strongarm")
+    .Case("armv4t", "arm7tdmi")
+    .Case("thumbv4t", "arm7tdmi")
+    .Cases("armv5", "armv5t", "arm10tdmi")
+    .Cases("thumbv5", "thumbv5t", "arm10tdmi")
+    .Cases("armv5e", "armv5te", "arm1022e")
+    .Cases("thumbv5e", "thumbv5te", "arm1022e")
+    .Case("armv5tej", "arm926ej-s")
+    .Case("thumbv5tej", "arm926ej-s")
+    .Cases("armv6", "armv6k", "arm1136jf-s")
+    .Cases("thumbv6", "thumbv6k", "arm1136jf-s")
+    .Case("armv6j", "arm1136j-s")
+    .Case("thumbv6j", "arm1136j-s")
+    .Cases("armv6z", "armv6zk", "arm1176jzf-s")
+    .Cases("thumbv6z", "thumbv6zk", "arm1176jzf-s")
+    .Case("armv6t2", "arm1156t2-s")
+    .Case("thumbv6t2", "arm1156t2-s")
+    .Cases("armv6m", "armv6-m", "cortex-m0")
+    .Case("thumbv6m", "cortex-m0")
+    .Cases("armv7", "armv7a", "armv7-a", "cortex-a8")
+    .Cases("armebv7", "armebv7a", "armebv7-a", "cortex-a8")
+    .Cases("thumbv7", "thumbv7a", "cortex-a8")
+    .Cases("thumbebv7", "thumbebv7a", "cortex-a8")
+    .Cases("armv7l", "armv7-l", "cortex-a8")
+    .Cases("armebv7l", "armebv7-l", "cortex-a8")
+    .Cases("armv7s", "armv7-s", "swift")
+    .Cases("armebv7s", "armebv7-s", "swift")
+    .Cases("armv7r", "armv7-r", "cortex-r4")
+    .Cases("armebv7r", "armebv7-r", "cortex-r4")
+    .Case("thumbv7r", "cortex-r4")
+    .Case("thumbebv7r", "cortex-r4")
+    .Cases("armv7m", "armv7-m", "cortex-m3")
+    .Cases("armebv7m", "armebv7-m", "cortex-m3")
+    .Case("thumbv7m", "cortex-m3")
+    .Case("thumbebv7m", "cortex-m3")
+    .Cases("armv7em", "armv7e-m", "cortex-m4")
+    .Cases("armebv7em", "armebv7e-m", "cortex-m4")
+    .Cases("thumbv7em", "thumbv7e-m", "cortex-m4")
+    .Cases("thumbebv7em", "thumbebv7e-m", "cortex-m4")
+    .Cases("armv8", "armv8a", "armv8-a", "cortex-a53")
+    .Cases("armebv8", "armebv8a", "armebv8-a", "cortex-a53")
+    .Cases("thumbv8", "thumbv8a", "cortex-a53")
+    .Cases("thumbebv8", "thumbebv8a", "cortex-a53")
+    .Case("ep9312", "ep9312")
+    .Case("iwmmxt", "iwmmxt")
+    .Case("xscale", "xscale")
+    // If all else failed, return the most base CPU with thumb interworking
+    // supported by LLVM.
+    .Default(0);
+
+  if (result)
+    return result;
+
+  switch (Triple.getOS()) {
+  case llvm::Triple::NetBSD:
+    switch (Triple.getEnvironment()) {
+    case llvm::Triple::GNUEABIHF:
+    case llvm::Triple::GNUEABI:
+    case llvm::Triple::EABIHF:
+    case llvm::Triple::EABI:
+      return "arm926ej-s";
+    default:
+      return "strongarm";
+    }
+  default:
+    switch (Triple.getEnvironment()) {
+    case llvm::Triple::EABIHF:
+    case llvm::Triple::GNUEABIHF:
+      return "arm1176jzf-s";
+    default:
+      return "arm7tdmi";
+    }
+  }
+}
+
+/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
+StringRef arm::getARMTargetCPU(const ArgList &Args,
+                               const llvm::Triple &Triple) {
+  // FIXME: Warn on inconsistent use of -mcpu and -march.
+  // If we have -mcpu=, use that.
+  if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+    StringRef MCPU = A->getValue();
+    // Handle -mcpu=native.
+    if (MCPU == "native")
+      return llvm::sys::getHostCPUName();
+    else
+      return MCPU;
+  }
+
+  return getARMCPUForMArch(Args, Triple);
+}
+
+/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
+/// CPU.
+//
+// FIXME: This is redundant with -mcpu, why does LLVM use this.
+// FIXME: tblgen this, or kill it!
+const char *arm::getLLVMArchSuffixForARM(StringRef CPU) {
+  return llvm::StringSwitch<const char *>(CPU)
+    .Case("strongarm", "v4")
+    .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t")
+    .Cases("arm720t", "arm9", "arm9tdmi", "v4t")
+    .Cases("arm920", "arm920t", "arm922t", "v4t")
+    .Cases("arm940t", "ep9312","v4t")
+    .Cases("arm10tdmi",  "arm1020t", "v5")
+    .Cases("arm9e",  "arm926ej-s",  "arm946e-s", "v5e")
+    .Cases("arm966e-s",  "arm968e-s",  "arm10e", "v5e")
+    .Cases("arm1020e",  "arm1022e",  "xscale", "iwmmxt", "v5e")
+    .Cases("arm1136j-s",  "arm1136jf-s",  "arm1176jz-s", "v6")
+    .Cases("arm1176jzf-s",  "mpcorenovfp",  "mpcore", "v6")
+    .Cases("arm1156t2-s",  "arm1156t2f-s", "v6t2")
+    .Cases("cortex-a5", "cortex-a7", "cortex-a8", "cortex-a9-mp", "v7")
+    .Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "v7")
+    .Cases("cortex-r4", "cortex-r5", "v7r")
+    .Case("cortex-m0", "v6m")
+    .Case("cortex-m3", "v7m")
+    .Case("cortex-m4", "v7em")
+    .Case("swift", "v7s")
+    .Case("cyclone", "v8")
+    .Cases("cortex-a53", "cortex-a57", "v8")
+    .Default("");
+}
+
+bool mips::hasMipsAbiArg(const ArgList &Args, const char *Value) {
+  Arg *A = Args.getLastArg(options::OPT_mabi_EQ);
+  return A && (A->getValue() == StringRef(Value));
+}
+
+llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
   // See arch(3) and llvm-gcc's driver-driver.c. We don't implement support for
   // archs which Darwin doesn't use.
 
@@ -4552,8 +4995,9 @@
     .Cases("x86_64", "x86_64h", llvm::Triple::x86_64)
     // This is derived from the driver driver.
     .Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
-    .Cases("armv7", "armv7em", "armv7f", "armv7k", "armv7m", llvm::Triple::arm)
+    .Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
     .Cases("armv7s", "xscale", llvm::Triple::arm)
+    .Case("arm64", llvm::Triple::arm64)
     .Case("r600", llvm::Triple::r600)
     .Case("nvptx", llvm::Triple::nvptx)
     .Case("nvptx64", llvm::Triple::nvptx64)
@@ -4562,6 +5006,18 @@
     .Default(llvm::Triple::UnknownArch);
 }
 
+void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
+  llvm::Triple::ArchType Arch = getArchTypeForMachOArchName(Str);
+  T.setArch(Arch);
+
+  if (Str == "x86_64h")
+    T.setArchName(Str);
+  else if (Str == "armv6m" || Str == "armv7m" || Str == "armv7em") {
+    T.setOS(llvm::Triple::UnknownOS);
+    T.setObjectFormat(llvm::Triple::MachO);
+  }
+}
+
 const char *Clang::getBaseInputName(const ArgList &Args,
                                     const InputInfoList &Inputs) {
   return Args.MakeArgString(
@@ -4609,10 +5065,16 @@
     SourceAction = SourceAction->getInputs()[0];
   }
 
-  // If -no_integrated_as is used add -Q to the darwin assember driver to make
+  // If -fno_integrated_as is used add -Q to the darwin assember driver to make
   // sure it runs its system assembler not clang's integrated assembler.
-  if (Args.hasArg(options::OPT_no_integrated_as))
-    CmdArgs.push_back("-Q");
+  // Applicable to darwin11+ and Xcode 4+.  darwin<10 lacked integrated-as.
+  // FIXME: at run-time detect assembler capabilities or rely on version
+  // information forwarded by -target-assembler-version (future)
+  if (Args.hasArg(options::OPT_fno_integrated_as)) {
+    const llvm::Triple &T(getToolChain().getTriple());
+    if (!(T.isMacOSX() && T.isMacOSXVersionLT(10, 7)))
+      CmdArgs.push_back("-Q");
+  }
 
   // Forward -g, assuming we are dealing with an actual assembly file.
   if (SourceAction->getType() == types::TY_Asm ||
@@ -4624,7 +5086,7 @@
   }
 
   // Derived from asm spec.
-  AddDarwinArch(Args, CmdArgs);
+  AddMachOArch(Args, CmdArgs);
 
   // Use -force_cpusubtype_ALL on x86 by default.
   if (getToolChain().getArch() == llvm::Triple::x86 ||
@@ -4635,8 +5097,7 @@
   if (getToolChain().getArch() != llvm::Triple::x86_64 &&
       (((Args.hasArg(options::OPT_mkernel) ||
          Args.hasArg(options::OPT_fapple_kext)) &&
-        (!getDarwinToolChain().isTargetIPhoneOS() ||
-         getDarwinToolChain().isIPhoneOSVersionLT(6, 0))) ||
+        getMachOToolChain().isKernelStatic()) ||
        Args.hasArg(options::OPT_static)))
     CmdArgs.push_back("-static");
 
@@ -4657,11 +5118,11 @@
   C.addCommand(new Command(JA, *this, Exec, CmdArgs));
 }
 
-void darwin::DarwinTool::anchor() {}
+void darwin::MachOTool::anchor() {}
 
-void darwin::DarwinTool::AddDarwinArch(const ArgList &Args,
-                                       ArgStringList &CmdArgs) const {
-  StringRef ArchName = getDarwinToolChain().getDarwinArchName(Args);
+void darwin::MachOTool::AddMachOArch(const ArgList &Args,
+                                     ArgStringList &CmdArgs) const {
+  StringRef ArchName = getMachOToolChain().getMachOArchName(Args);
 
   // Derived from darwin_arch spec.
   CmdArgs.push_back("-arch");
@@ -4689,7 +5150,7 @@
                                ArgStringList &CmdArgs,
                                const InputInfoList &Inputs) const {
   const Driver &D = getToolChain().getDriver();
-  const toolchains::Darwin &DarwinTC = getDarwinToolChain();
+  const toolchains::MachO &MachOTC = getMachOToolChain();
 
   unsigned Version[3] = { 0, 0, 0 };
   if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
@@ -4701,27 +5162,10 @@
         << A->getAsString(Args);
   }
 
-  // Newer linkers support -demangle, pass it if supported and not disabled by
+  // Newer linkers support -demangle. Pass it if supported and not disabled by
   // the user.
-  if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) {
-    // Don't pass -demangle to ld_classic.
-    //
-    // FIXME: This is a temporary workaround, ld should be handling this.
-    bool UsesLdClassic = (getToolChain().getArch() == llvm::Triple::x86 &&
-                          Args.hasArg(options::OPT_static));
-    if (getToolChain().getArch() == llvm::Triple::x86) {
-      for (arg_iterator it = Args.filtered_begin(options::OPT_Xlinker,
-                                                 options::OPT_Wl_COMMA),
-             ie = Args.filtered_end(); it != ie; ++it) {
-        const Arg *A = *it;
-        for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
-          if (StringRef(A->getValue(i)) == "-kext")
-            UsesLdClassic = true;
-      }
-    }
-    if (!UsesLdClassic)
-      CmdArgs.push_back("-demangle");
-  }
+  if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+    CmdArgs.push_back("-demangle");
 
   if (Args.hasArg(options::OPT_rdynamic) && Version[0] >= 137)
     CmdArgs.push_back("-export_dynamic");
@@ -4747,7 +5191,7 @@
   }
 
   if (!Args.hasArg(options::OPT_dynamiclib)) {
-    AddDarwinArch(Args, CmdArgs);
+    AddMachOArch(Args, CmdArgs);
     // FIXME: Why do this only on this path?
     Args.AddLastArg(CmdArgs, options::OPT_force__cpusubtype__ALL);
 
@@ -4783,7 +5227,7 @@
     Args.AddAllArgsTranslated(CmdArgs, options::OPT_current__version,
                               "-dylib_current_version");
 
-    AddDarwinArch(Args, CmdArgs);
+    AddMachOArch(Args, CmdArgs);
 
     Args.AddAllArgsTranslated(CmdArgs, options::OPT_install__name,
                               "-dylib_install_name");
@@ -4792,7 +5236,7 @@
   Args.AddLastArg(CmdArgs, options::OPT_all__load);
   Args.AddAllArgs(CmdArgs, options::OPT_allowable__client);
   Args.AddLastArg(CmdArgs, options::OPT_bind__at__load);
-  if (DarwinTC.isTargetIPhoneOS())
+  if (MachOTC.isTargetIOSBased())
     Args.AddLastArg(CmdArgs, options::OPT_arch__errors__fatal);
   Args.AddLastArg(CmdArgs, options::OPT_dead__strip);
   Args.AddLastArg(CmdArgs, options::OPT_no__dead__strip__inits__and__terms);
@@ -4806,22 +5250,7 @@
   Args.AddAllArgs(CmdArgs, options::OPT_init);
 
   // Add the deployment target.
-  VersionTuple TargetVersion = DarwinTC.getTargetVersion();
-
-  // If we had an explicit -mios-simulator-version-min argument, honor that,
-  // otherwise use the traditional deployment targets. We can't just check the
-  // is-sim attribute because existing code follows this path, and the linker
-  // may not handle the argument.
-  //
-  // FIXME: We may be able to remove this, once we can verify no one depends on
-  // it.
-  if (Args.hasArg(options::OPT_mios_simulator_version_min_EQ))
-    CmdArgs.push_back("-ios_simulator_version_min");
-  else if (DarwinTC.isTargetIPhoneOS())
-    CmdArgs.push_back("-iphoneos_version_min");
-  else
-    CmdArgs.push_back("-macosx_version_min");
-  CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+  MachOTC.addMinVersionArgs(Args, CmdArgs);
 
   Args.AddLastArg(CmdArgs, options::OPT_nomultidefs);
   Args.AddLastArg(CmdArgs, options::OPT_multi__module);
@@ -4890,6 +5319,12 @@
   Args.AddLastArg(CmdArgs, options::OPT_Mach);
 }
 
+enum LibOpenMP {
+  LibUnknown,
+  LibGOMP,
+  LibIOMP5
+};
+
 void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
                                 const InputInfo &Output,
                                 const InputInfoList &Inputs,
@@ -4936,120 +5371,42 @@
   CmdArgs.push_back(Output.getFilename());
 
   if (!Args.hasArg(options::OPT_nostdlib) &&
-      !Args.hasArg(options::OPT_nostartfiles)) {
-    // Derived from startfile spec.
-    if (Args.hasArg(options::OPT_dynamiclib)) {
-      // Derived from darwin_dylib1 spec.
-      if (getDarwinToolChain().isTargetIOSSimulator()) {
-        // The simulator doesn't have a versioned crt1 file.
-        CmdArgs.push_back("-ldylib1.o");
-      } else if (getDarwinToolChain().isTargetIPhoneOS()) {
-        if (getDarwinToolChain().isIPhoneOSVersionLT(3, 1))
-          CmdArgs.push_back("-ldylib1.o");
-      } else {
-        if (getDarwinToolChain().isMacosxVersionLT(10, 5))
-          CmdArgs.push_back("-ldylib1.o");
-        else if (getDarwinToolChain().isMacosxVersionLT(10, 6))
-          CmdArgs.push_back("-ldylib1.10.5.o");
-      }
-    } else {
-      if (Args.hasArg(options::OPT_bundle)) {
-        if (!Args.hasArg(options::OPT_static)) {
-          // Derived from darwin_bundle1 spec.
-          if (getDarwinToolChain().isTargetIOSSimulator()) {
-            // The simulator doesn't have a versioned crt1 file.
-            CmdArgs.push_back("-lbundle1.o");
-          } else if (getDarwinToolChain().isTargetIPhoneOS()) {
-            if (getDarwinToolChain().isIPhoneOSVersionLT(3, 1))
-              CmdArgs.push_back("-lbundle1.o");
-          } else {
-            if (getDarwinToolChain().isMacosxVersionLT(10, 6))
-              CmdArgs.push_back("-lbundle1.o");
-          }
-        }
-      } else {
-        if (Args.hasArg(options::OPT_pg) &&
-            getToolChain().SupportsProfiling()) {
-          if (Args.hasArg(options::OPT_static) ||
-              Args.hasArg(options::OPT_object) ||
-              Args.hasArg(options::OPT_preload)) {
-            CmdArgs.push_back("-lgcrt0.o");
-          } else {
-            CmdArgs.push_back("-lgcrt1.o");
-
-            // darwin_crt2 spec is empty.
-          }
-          // By default on OS X 10.8 and later, we don't link with a crt1.o
-          // file and the linker knows to use _main as the entry point.  But,
-          // when compiling with -pg, we need to link with the gcrt1.o file,
-          // so pass the -no_new_main option to tell the linker to use the
-          // "start" symbol as the entry point.
-          if (getDarwinToolChain().isTargetMacOS() &&
-              !getDarwinToolChain().isMacosxVersionLT(10, 8))
-            CmdArgs.push_back("-no_new_main");
-        } else {
-          if (Args.hasArg(options::OPT_static) ||
-              Args.hasArg(options::OPT_object) ||
-              Args.hasArg(options::OPT_preload)) {
-            CmdArgs.push_back("-lcrt0.o");
-          } else {
-            // Derived from darwin_crt1 spec.
-            if (getDarwinToolChain().isTargetIOSSimulator()) {
-              // The simulator doesn't have a versioned crt1 file.
-              CmdArgs.push_back("-lcrt1.o");
-            } else if (getDarwinToolChain().isTargetIPhoneOS()) {
-              if (getDarwinToolChain().isIPhoneOSVersionLT(3, 1))
-                CmdArgs.push_back("-lcrt1.o");
-              else if (getDarwinToolChain().isIPhoneOSVersionLT(6, 0))
-                CmdArgs.push_back("-lcrt1.3.1.o");
-            } else {
-              if (getDarwinToolChain().isMacosxVersionLT(10, 5))
-                CmdArgs.push_back("-lcrt1.o");
-              else if (getDarwinToolChain().isMacosxVersionLT(10, 6))
-                CmdArgs.push_back("-lcrt1.10.5.o");
-              else if (getDarwinToolChain().isMacosxVersionLT(10, 8))
-                CmdArgs.push_back("-lcrt1.10.6.o");
-
-              // darwin_crt2 spec is empty.
-            }
-          }
-        }
-      }
-    }
-
-    if (!getDarwinToolChain().isTargetIPhoneOS() &&
-        Args.hasArg(options::OPT_shared_libgcc) &&
-        getDarwinToolChain().isMacosxVersionLT(10, 5)) {
-      const char *Str =
-        Args.MakeArgString(getToolChain().GetFilePath("crt3.o"));
-      CmdArgs.push_back(Str);
-    }
-  }
+      !Args.hasArg(options::OPT_nostartfiles))
+    getMachOToolChain().addStartObjectFileArgs(Args, CmdArgs);
 
   Args.AddAllArgs(CmdArgs, options::OPT_L);
 
-  if (Args.hasArg(options::OPT_fopenmp))
-    // This is more complicated in gcc...
+  LibOpenMP UsedOpenMPLib = LibUnknown;
+  if (Args.hasArg(options::OPT_fopenmp)) {
+    UsedOpenMPLib = LibGOMP;
+  } else if (const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ)) {
+    UsedOpenMPLib = llvm::StringSwitch<LibOpenMP>(A->getValue())
+        .Case("libgomp",  LibGOMP)
+        .Case("libiomp5", LibIOMP5)
+        .Default(LibUnknown);
+    if (UsedOpenMPLib == LibUnknown)
+      getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument)
+        << A->getOption().getName() << A->getValue();
+  }
+  switch (UsedOpenMPLib) {
+  case LibGOMP:
     CmdArgs.push_back("-lgomp");
+    break;
+  case LibIOMP5:
+    CmdArgs.push_back("-liomp5");
+    break;
+  case LibUnknown:
+    break;
+  }
 
   AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
   
   if (isObjCRuntimeLinked(Args) &&
       !Args.hasArg(options::OPT_nostdlib) &&
       !Args.hasArg(options::OPT_nodefaultlibs)) {
-    // Avoid linking compatibility stubs on i386 mac.
-    if (!getDarwinToolChain().isTargetMacOS() ||
-        getDarwinToolChain().getArch() != llvm::Triple::x86) {
-      // If we don't have ARC or subscripting runtime support, link in the
-      // runtime stubs.  We have to do this *before* adding any of the normal
-      // linker inputs so that its initializer gets run first.
-      ObjCRuntime runtime =
-        getDarwinToolChain().getDefaultObjCRuntime(/*nonfragile*/ true);
-      // We use arclite library for both ARC and subscripting support.
-      if ((!runtime.hasNativeARC() && isObjCAutoRefCount(Args)) ||
-          !runtime.hasSubscripting())
-        getDarwinToolChain().AddLinkARCArgs(Args, CmdArgs);
-    }
+    // We use arclite library for both ARC and subscripting support.
+    getMachOToolChain().AddLinkARCArgs(Args, CmdArgs);
+
     CmdArgs.push_back("-framework");
     CmdArgs.push_back("Foundation");
     // Link libobj.
@@ -5073,7 +5430,7 @@
     // link_ssp spec is empty.
 
     // Let the tool chain choose which runtime library to link.
-    getDarwinToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs);
+    getMachOToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs);
   }
 
   if (!Args.hasArg(options::OPT_nostdlib) &&
@@ -5279,7 +5636,7 @@
   }
   CmdArgs.push_back(Args.MakeArgString(LibPath + "crtn.o"));
 
-  addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+  addProfileRT(getToolChain(), Args, CmdArgs);
 
   const char *Exec =
     Args.MakeArgString(getToolChain().GetProgramPath("ld"));
@@ -5391,7 +5748,7 @@
                                 getToolChain().GetFilePath("crtend.o")));
   }
 
-  addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+  addProfileRT(getToolChain(), Args, CmdArgs);
 
   const char *Exec =
     Args.MakeArgString(getToolChain().GetProgramPath("ld"));
@@ -5404,16 +5761,33 @@
                                      const ArgList &Args,
                                      const char *LinkingOutput) const {
   ArgStringList CmdArgs;
+  bool NeedsKPIC = false;
 
-  // When building 32-bit code on OpenBSD/amd64, we have to explicitly
-  // instruct as in the base system to assemble 32-bit code.
-  if (getToolChain().getArch() == llvm::Triple::x86)
+  switch (getToolChain().getArch()) {
+  case llvm::Triple::x86:
+    // When building 32-bit code on OpenBSD/amd64, we have to explicitly
+    // instruct as in the base system to assemble 32-bit code.
     CmdArgs.push_back("--32");
-  else if (getToolChain().getArch() == llvm::Triple::ppc) {
+    break;
+
+  case llvm::Triple::ppc:
     CmdArgs.push_back("-mppc");
     CmdArgs.push_back("-many");
-  } else if (getToolChain().getArch() == llvm::Triple::mips64 ||
-             getToolChain().getArch() == llvm::Triple::mips64el) {
+    break;
+
+  case llvm::Triple::sparc:
+    CmdArgs.push_back("-32");
+    NeedsKPIC = true;
+    break;
+
+  case llvm::Triple::sparcv9:
+    CmdArgs.push_back("-64");
+    CmdArgs.push_back("-Av9a");
+    NeedsKPIC = true;
+    break;
+
+  case llvm::Triple::mips64:
+  case llvm::Triple::mips64el: {
     StringRef CPUName;
     StringRef ABIName;
     getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
@@ -5426,19 +5800,17 @@
     else
       CmdArgs.push_back("-EL");
 
-    Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
-                                      options::OPT_fpic, options::OPT_fno_pic,
-                                      options::OPT_fPIE, options::OPT_fno_PIE,
-                                      options::OPT_fpie, options::OPT_fno_pie);
-    if (LastPICArg &&
-        (LastPICArg->getOption().matches(options::OPT_fPIC) ||
-         LastPICArg->getOption().matches(options::OPT_fpic) ||
-         LastPICArg->getOption().matches(options::OPT_fPIE) ||
-         LastPICArg->getOption().matches(options::OPT_fpie))) {
-      CmdArgs.push_back("-KPIC");
-    }
+    NeedsKPIC = true;
+    break;
   }
 
+  default:
+    break;
+  }
+
+  if (NeedsKPIC)
+    addAssemblerKPIC(Args, CmdArgs);
+
   Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
                        options::OPT_Xassembler);
 
@@ -5761,21 +6133,23 @@
     else
       CmdArgs.push_back("-EL");
 
-    Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
-                                      options::OPT_fpic, options::OPT_fno_pic,
-                                      options::OPT_fPIE, options::OPT_fno_PIE,
-                                      options::OPT_fpie, options::OPT_fno_pie);
-    if (LastPICArg &&
-        (LastPICArg->getOption().matches(options::OPT_fPIC) ||
-         LastPICArg->getOption().matches(options::OPT_fpic) ||
-         LastPICArg->getOption().matches(options::OPT_fPIE) ||
-         LastPICArg->getOption().matches(options::OPT_fpie))) {
-      CmdArgs.push_back("-KPIC");
-    }
+    addAssemblerKPIC(Args, CmdArgs);
   } else if (getToolChain().getArch() == llvm::Triple::arm ||
-             getToolChain().getArch() == llvm::Triple::thumb) {
-    CmdArgs.push_back("-mfpu=softvfp");
+             getToolChain().getArch() == llvm::Triple::armeb ||
+             getToolChain().getArch() == llvm::Triple::thumb ||
+             getToolChain().getArch() == llvm::Triple::thumbeb) {
+    const Driver &D = getToolChain().getDriver();
+    const llvm::Triple &Triple = getToolChain().getTriple();
+    StringRef FloatABI = arm::getARMFloatABI(D, Args, Triple);
+
+    if (FloatABI == "hard") {
+      CmdArgs.push_back("-mfpu=vfp");
+    } else {
+      CmdArgs.push_back("-mfpu=softvfp");
+    }
+
     switch(getToolChain().getTriple().getEnvironment()) {
+    case llvm::Triple::GNUEABIHF:
     case llvm::Triple::GNUEABI:
     case llvm::Triple::EABI:
       CmdArgs.push_back("-meabi=5");
@@ -5784,6 +6158,14 @@
     default:
       CmdArgs.push_back("-matpcs");
     }
+  } else if (getToolChain().getArch() == llvm::Triple::sparc ||
+             getToolChain().getArch() == llvm::Triple::sparcv9) {
+    if (getToolChain().getArch() == llvm::Triple::sparc)
+      CmdArgs.push_back("-Av8plusa");
+    else
+      CmdArgs.push_back("-Av9a");
+
+    addAssemblerKPIC(Args, CmdArgs);
   }
 
   Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
@@ -5811,6 +6193,9 @@
   const toolchains::FreeBSD& ToolChain = 
     static_cast<const toolchains::FreeBSD&>(getToolChain());
   const Driver &D = ToolChain.getDriver();
+  const bool IsPIE =
+    !Args.hasArg(options::OPT_shared) &&
+    (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault());
   ArgStringList CmdArgs;
 
   // Silence warning for "clang -g foo.o -o foo"
@@ -5824,7 +6209,7 @@
   if (!D.SysRoot.empty())
     CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
 
-  if (Args.hasArg(options::OPT_pie))
+  if (IsPIE)
     CmdArgs.push_back("-pie");
 
   if (Args.hasArg(options::OPT_static)) {
@@ -5874,7 +6259,7 @@
     if (!Args.hasArg(options::OPT_shared)) {
       if (Args.hasArg(options::OPT_pg))
         crt1 = "gcrt1.o";
-      else if (Args.hasArg(options::OPT_pie))
+      else if (IsPIE)
         crt1 = "Scrt1.o";
       else
         crt1 = "crt1.o";
@@ -5887,7 +6272,7 @@
     const char *crtbegin = NULL;
     if (Args.hasArg(options::OPT_static))
       crtbegin = "crtbeginT.o";
-    else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+    else if (Args.hasArg(options::OPT_shared) || IsPIE)
       crtbegin = "crtbeginS.o";
     else
       crtbegin = "crtbegin.o";
@@ -5907,25 +6292,8 @@
   Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
   Args.AddAllArgs(CmdArgs, options::OPT_r);
 
-  // Tell the linker to load the plugin. This has to come before AddLinkerInputs
-  // as gold requires -plugin to come before any -plugin-opt that -Wl might
-  // forward.
-  if (D.IsUsingLTO(Args)) {
-    CmdArgs.push_back("-plugin");
-    std::string Plugin = ToolChain.getDriver().Dir + "/../lib/LLVMgold.so";
-    CmdArgs.push_back(Args.MakeArgString(Plugin));
-
-    // Try to pass driver level flags relevant to LTO code generation down to
-    // the plugin.
-
-    // Handle flags for selecting CPU variants.
-    std::string CPU = getCPUName(Args, ToolChain.getTriple());
-    if (!CPU.empty()) {
-      CmdArgs.push_back(
-                        Args.MakeArgString(Twine("-plugin-opt=mcpu=") +
-                                           CPU));
-    }
-  }
+  if (D.IsUsingLTO(Args))
+    AddGoldPlugin(ToolChain, Args, CmdArgs);
 
   AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
 
@@ -5985,14 +6353,16 @@
 
   if (!Args.hasArg(options::OPT_nostdlib) &&
       !Args.hasArg(options::OPT_nostartfiles)) {
-    if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+    if (Args.hasArg(options::OPT_shared) || IsPIE)
       CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
     else
       CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
     CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
   }
 
-  addProfileRT(ToolChain, Args, CmdArgs, ToolChain.getTriple());
+  addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
+
+  addProfileRT(ToolChain, Args, CmdArgs);
 
   const char *Exec =
     Args.MakeArgString(ToolChain.GetProgramPath("ld"));
@@ -6006,22 +6376,25 @@
                                      const char *LinkingOutput) const {
   ArgStringList CmdArgs;
 
-  // When building 32-bit code on NetBSD/amd64, we have to explicitly
-  // instruct as in the base system to assemble 32-bit code.
-  if (getToolChain().getArch() == llvm::Triple::x86)
+  // GNU as needs different flags for creating the correct output format
+  // on architectures with different ABIs or optional feature sets.
+  switch (getToolChain().getArch()) {
+  case llvm::Triple::x86:
     CmdArgs.push_back("--32");
-
-  // Pass the target CPU to GNU as for ARM, since the source code might
-  // not have the correct .cpu annotation.
-  if (getToolChain().getArch() == llvm::Triple::arm) {
-    std::string MArch(getARMTargetCPU(Args, getToolChain().getTriple()));
+    break;
+  case llvm::Triple::arm:
+  case llvm::Triple::armeb:
+  case llvm::Triple::thumb:
+  case llvm::Triple::thumbeb: {
+    std::string MArch(arm::getARMTargetCPU(Args, getToolChain().getTriple()));
     CmdArgs.push_back(Args.MakeArgString("-mcpu=" + MArch));
+    break;
   }
 
-  if (getToolChain().getArch() == llvm::Triple::mips ||
-      getToolChain().getArch() == llvm::Triple::mipsel ||
-      getToolChain().getArch() == llvm::Triple::mips64 ||
-      getToolChain().getArch() == llvm::Triple::mips64el) {
+  case llvm::Triple::mips:
+  case llvm::Triple::mipsel:
+  case llvm::Triple::mips64:
+  case llvm::Triple::mips64el: {
     StringRef CPUName;
     StringRef ABIName;
     getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
@@ -6038,17 +6411,23 @@
     else
       CmdArgs.push_back("-EL");
 
-    Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
-                                      options::OPT_fpic, options::OPT_fno_pic,
-                                      options::OPT_fPIE, options::OPT_fno_PIE,
-                                      options::OPT_fpie, options::OPT_fno_pie);
-    if (LastPICArg &&
-        (LastPICArg->getOption().matches(options::OPT_fPIC) ||
-         LastPICArg->getOption().matches(options::OPT_fpic) ||
-         LastPICArg->getOption().matches(options::OPT_fPIE) ||
-         LastPICArg->getOption().matches(options::OPT_fpie))) {
-      CmdArgs.push_back("-KPIC");
-    }
+    addAssemblerKPIC(Args, CmdArgs);
+    break;
+  }
+
+  case llvm::Triple::sparc:
+    CmdArgs.push_back("-32");
+    addAssemblerKPIC(Args, CmdArgs);
+    break;
+
+  case llvm::Triple::sparcv9:
+    CmdArgs.push_back("-64");
+    CmdArgs.push_back("-Av9");
+    addAssemblerKPIC(Args, CmdArgs);
+    break;
+
+  default:
+    break;  
   }
 
   Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
@@ -6078,12 +6457,12 @@
   if (!D.SysRoot.empty())
     CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
 
+  CmdArgs.push_back("--eh-frame-hdr");
   if (Args.hasArg(options::OPT_static)) {
     CmdArgs.push_back("-Bstatic");
   } else {
     if (Args.hasArg(options::OPT_rdynamic))
       CmdArgs.push_back("-export-dynamic");
-    CmdArgs.push_back("--eh-frame-hdr");
     if (Args.hasArg(options::OPT_shared)) {
       CmdArgs.push_back("-Bshareable");
     } else {
@@ -6092,11 +6471,61 @@
     }
   }
 
-  // When building 32-bit code on NetBSD/amd64, we have to explicitly
-  // instruct ld in the base system to link 32-bit code.
-  if (getToolChain().getArch() == llvm::Triple::x86) {
+  // Many NetBSD architectures support more than one ABI.
+  // Determine the correct emulation for ld.
+  switch (getToolChain().getArch()) {
+  case llvm::Triple::x86:
     CmdArgs.push_back("-m");
     CmdArgs.push_back("elf_i386");
+    break;
+  case llvm::Triple::arm:
+  case llvm::Triple::armeb:
+  case llvm::Triple::thumb:
+  case llvm::Triple::thumbeb:
+    CmdArgs.push_back("-m");
+    switch (getToolChain().getTriple().getEnvironment()) {
+    case llvm::Triple::EABI:
+    case llvm::Triple::GNUEABI:
+      CmdArgs.push_back("armelf_nbsd_eabi");
+      break;
+    case llvm::Triple::EABIHF:
+    case llvm::Triple::GNUEABIHF:
+      CmdArgs.push_back("armelf_nbsd_eabihf");
+      break;
+    default:
+      CmdArgs.push_back("armelf_nbsd");
+      break;
+    }
+    break;
+  case llvm::Triple::mips64:
+  case llvm::Triple::mips64el:
+    if (mips::hasMipsAbiArg(Args, "32")) {
+      CmdArgs.push_back("-m");
+      if (getToolChain().getArch() == llvm::Triple::mips64)
+        CmdArgs.push_back("elf32btsmip");
+      else
+        CmdArgs.push_back("elf32ltsmip");
+   } else if (mips::hasMipsAbiArg(Args, "64")) {
+     CmdArgs.push_back("-m");
+     if (getToolChain().getArch() == llvm::Triple::mips64)
+       CmdArgs.push_back("elf64btsmip");
+     else
+       CmdArgs.push_back("elf64ltsmip");
+   }
+   break;
+
+  case llvm::Triple::sparc:
+    CmdArgs.push_back("-m");
+    CmdArgs.push_back("elf32_sparc");
+    break;
+
+  case llvm::Triple::sparcv9:
+    CmdArgs.push_back("-m");
+    CmdArgs.push_back("elf64_sparc");
+    break;
+
+  default:
+    break;
   }
 
   if (Output.isFilename()) {
@@ -6137,9 +6566,14 @@
   getToolChain().getTriple().getOSVersion(Major, Minor, Micro);
   bool useLibgcc = true;
   if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 23) || Major == 0) {
-    if (getToolChain().getArch() == llvm::Triple::x86 ||
-        getToolChain().getArch() == llvm::Triple::x86_64)
+    switch(getToolChain().getArch()) {
+    case llvm::Triple::x86:
+    case llvm::Triple::x86_64:
       useLibgcc = false;
+      break;
+    default:
+      break;
+    }
   }
 
   if (!Args.hasArg(options::OPT_nostdlib) &&
@@ -6181,7 +6615,7 @@
                                                                     "crtn.o")));
   }
 
-  addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+  addProfileRT(getToolChain(), Args, CmdArgs);
 
   const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("ld"));
   C.addCommand(new Command(JA, *this, Exec, CmdArgs));
@@ -6193,6 +6627,7 @@
                                       const ArgList &Args,
                                       const char *LinkingOutput) const {
   ArgStringList CmdArgs;
+  bool NeedsKPIC = false;
 
   // Add --32/--64 to make sure we get the format we want.
   // This is incomplete
@@ -6210,21 +6645,41 @@
     CmdArgs.push_back("-many");
   } else if (getToolChain().getArch() == llvm::Triple::ppc64le) {
     CmdArgs.push_back("-a64");
-    CmdArgs.push_back("-mppc64le");
+    CmdArgs.push_back("-mppc64");
     CmdArgs.push_back("-many");
-  } else if (getToolChain().getArch() == llvm::Triple::arm) {
+    CmdArgs.push_back("-mlittle-endian");
+  } else if (getToolChain().getArch() == llvm::Triple::sparc) {
+    CmdArgs.push_back("-32");
+    CmdArgs.push_back("-Av8plusa");
+    NeedsKPIC = true;
+  } else if (getToolChain().getArch() == llvm::Triple::sparcv9) {
+    CmdArgs.push_back("-64");
+    CmdArgs.push_back("-Av9a");
+    NeedsKPIC = true;
+  } else if (getToolChain().getArch() == llvm::Triple::arm ||
+             getToolChain().getArch() == llvm::Triple::armeb) {
     StringRef MArch = getToolChain().getArchName();
     if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a")
       CmdArgs.push_back("-mfpu=neon");
-    if (MArch == "armv8" || MArch == "armv8a" || MArch == "armv8-a")
+    if (MArch == "armv8" || MArch == "armv8a" || MArch == "armv8-a" ||
+        MArch == "armebv8" || MArch == "armebv8a" || MArch == "armebv8-a")
       CmdArgs.push_back("-mfpu=crypto-neon-fp-armv8");
 
-    StringRef ARMFloatABI = getARMFloatABI(getToolChain().getDriver(), Args,
-                                           getToolChain().getTriple());
+    StringRef ARMFloatABI = tools::arm::getARMFloatABI(
+        getToolChain().getDriver(), Args, getToolChain().getTriple());
     CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=" + ARMFloatABI));
 
     Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
-    Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
+
+    // FIXME: remove krait check when GNU tools support krait cpu
+    // for now replace it with -march=armv7-a  to avoid a lower
+    // march from being picked in the absence of a cpu flag.
+    Arg *A;
+    if ((A = Args.getLastArg(options::OPT_mcpu_EQ)) &&
+      StringRef(A->getValue()) == "krait")
+        CmdArgs.push_back("-march=armv7-a");
+    else
+      Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
     Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
   } else if (getToolChain().getArch() == llvm::Triple::mips ||
              getToolChain().getArch() == llvm::Triple::mipsel ||
@@ -6251,13 +6706,7 @@
         CmdArgs.push_back(Args.MakeArgString("-mnan=2008"));
     }
 
-    if (Arg *A = Args.getLastArg(options::OPT_mfp32, options::OPT_mfp64)) {
-      if (A->getOption().matches(options::OPT_mfp32))
-        CmdArgs.push_back(Args.MakeArgString("-mfp32"));
-      else
-        CmdArgs.push_back(Args.MakeArgString("-mfp64"));
-    }
-
+    Args.AddLastArg(CmdArgs, options::OPT_mfp32, options::OPT_mfp64);
     Args.AddLastArg(CmdArgs, options::OPT_mips16, options::OPT_mno_mips16);
     Args.AddLastArg(CmdArgs, options::OPT_mmicromips,
                     options::OPT_mno_micromips);
@@ -6271,17 +6720,7 @@
         CmdArgs.push_back(Args.MakeArgString("-mmsa"));
     }
 
-    Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
-                                      options::OPT_fpic, options::OPT_fno_pic,
-                                      options::OPT_fPIE, options::OPT_fno_PIE,
-                                      options::OPT_fpie, options::OPT_fno_pie);
-    if (LastPICArg &&
-        (LastPICArg->getOption().matches(options::OPT_fPIC) ||
-         LastPICArg->getOption().matches(options::OPT_fpic) ||
-         LastPICArg->getOption().matches(options::OPT_fPIE) ||
-         LastPICArg->getOption().matches(options::OPT_fpie))) {
-      CmdArgs.push_back("-KPIC");
-    }
+    NeedsKPIC = true;
   } else if (getToolChain().getArch() == llvm::Triple::systemz) {
     // Always pass an -march option, since our default of z10 is later
     // than the GNU assembler's default.
@@ -6289,6 +6728,9 @@
     CmdArgs.push_back(Args.MakeArgString("-march=" + CPUName));
   }
 
+  if (NeedsKPIC)
+    addAssemblerKPIC(Args, CmdArgs);
+
   Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
                        options::OPT_Xassembler);
 
@@ -6314,7 +6756,7 @@
                    SplitDebugName(Args, Inputs));
 }
 
-static void AddLibgcc(llvm::Triple Triple, const Driver &D,
+static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
                       ArgStringList &CmdArgs, const ArgList &Args) {
   bool isAndroid = Triple.getEnvironment() == llvm::Triple::Android;
   bool StaticLibgcc = Args.hasArg(options::OPT_static_libgcc) ||
@@ -6347,31 +6789,39 @@
     CmdArgs.push_back("-ldl");
 }
 
-static bool hasMipsN32ABIArg(const ArgList &Args) {
-  Arg *A = Args.getLastArg(options::OPT_mabi_EQ);
-  return A && (A->getValue() == StringRef("n32"));
-}
-
 static StringRef getLinuxDynamicLinker(const ArgList &Args,
                                        const toolchains::Linux &ToolChain) {
-  if (ToolChain.getTriple().getEnvironment() == llvm::Triple::Android)
-    return "/system/bin/linker";
-  else if (ToolChain.getArch() == llvm::Triple::x86)
+  if (ToolChain.getTriple().getEnvironment() == llvm::Triple::Android) {
+    if (ToolChain.getTriple().isArch64Bit())
+      return "/system/bin/linker64";
+    else
+      return "/system/bin/linker";
+  } else if (ToolChain.getArch() == llvm::Triple::x86 ||
+             ToolChain.getArch() == llvm::Triple::sparc)
     return "/lib/ld-linux.so.2";
-  else if (ToolChain.getArch() == llvm::Triple::aarch64)
+  else if (ToolChain.getArch() == llvm::Triple::aarch64 ||
+           ToolChain.getArch() == llvm::Triple::arm64)
     return "/lib/ld-linux-aarch64.so.1";
+  else if (ToolChain.getArch() == llvm::Triple::aarch64_be)
+    return "/lib/ld-linux-aarch64_be.so.1";
   else if (ToolChain.getArch() == llvm::Triple::arm ||
            ToolChain.getArch() == llvm::Triple::thumb) {
     if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
       return "/lib/ld-linux-armhf.so.3";
     else
       return "/lib/ld-linux.so.3";
+  } else if (ToolChain.getArch() == llvm::Triple::armeb ||
+             ToolChain.getArch() == llvm::Triple::thumbeb) {
+    if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
+      return "/lib/ld-linux-armhf.so.3";        /* TODO: check which dynamic linker name.  */
+    else
+      return "/lib/ld-linux.so.3";              /* TODO: check which dynamic linker name.  */
   } else if (ToolChain.getArch() == llvm::Triple::mips ||
              ToolChain.getArch() == llvm::Triple::mipsel)
     return "/lib/ld.so.1";
   else if (ToolChain.getArch() == llvm::Triple::mips64 ||
            ToolChain.getArch() == llvm::Triple::mips64el) {
-    if (hasMipsN32ABIArg(Args))
+    if (mips::hasMipsAbiArg(Args, "n32"))
       return "/lib32/ld.so.1";
     else
       return "/lib64/ld.so.1";
@@ -6381,10 +6831,27 @@
            ToolChain.getArch() == llvm::Triple::ppc64le ||
            ToolChain.getArch() == llvm::Triple::systemz)
     return "/lib64/ld64.so.1";
+  else if (ToolChain.getArch() == llvm::Triple::sparcv9)
+    return "/lib64/ld-linux.so.2";
   else
     return "/lib64/ld-linux-x86-64.so.2";
 }
 
+static void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
+                      ArgStringList &CmdArgs, const ArgList &Args) {
+  // Make use of compiler-rt if --rtlib option is used
+  ToolChain::RuntimeLibType RLT = TC.GetRuntimeLibType(Args);
+
+  switch(RLT) {
+  case ToolChain::RLT_CompilerRT:
+    addClangRTLinux(TC, Args, CmdArgs);
+    break;
+  case ToolChain::RLT_Libgcc:
+    AddLibgcc(TC.getTriple(), D, CmdArgs, Args);
+    break;
+  }
+}
+
 void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA,
                                   const InputInfo &Output,
                                   const InputInfoList &Inputs,
@@ -6395,10 +6862,9 @@
   const Driver &D = ToolChain.getDriver();
   const bool isAndroid =
     ToolChain.getTriple().getEnvironment() == llvm::Triple::Android;
-  const SanitizerArgs &Sanitize = ToolChain.getSanitizerArgs();
   const bool IsPIE =
     !Args.hasArg(options::OPT_shared) &&
-    (Args.hasArg(options::OPT_pie) || Sanitize.hasZeroBaseShadow());
+    (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault());
 
   ArgStringList CmdArgs;
 
@@ -6434,27 +6900,39 @@
   CmdArgs.push_back("-m");
   if (ToolChain.getArch() == llvm::Triple::x86)
     CmdArgs.push_back("elf_i386");
-  else if (ToolChain.getArch() == llvm::Triple::aarch64)
+  else if (ToolChain.getArch() == llvm::Triple::aarch64 ||
+           ToolChain.getArch() == llvm::Triple::arm64)
     CmdArgs.push_back("aarch64linux");
+  else if (ToolChain.getArch() == llvm::Triple::aarch64_be)
+    CmdArgs.push_back("aarch64_be_linux");
   else if (ToolChain.getArch() == llvm::Triple::arm
            ||  ToolChain.getArch() == llvm::Triple::thumb)
     CmdArgs.push_back("armelf_linux_eabi");
+  else if (ToolChain.getArch() == llvm::Triple::armeb
+           ||  ToolChain.getArch() == llvm::Triple::thumbeb)
+    CmdArgs.push_back("armebelf_linux_eabi"); /* TODO: check which NAME.  */
   else if (ToolChain.getArch() == llvm::Triple::ppc)
     CmdArgs.push_back("elf32ppclinux");
   else if (ToolChain.getArch() == llvm::Triple::ppc64)
     CmdArgs.push_back("elf64ppc");
+  else if (ToolChain.getArch() == llvm::Triple::ppc64le)
+    CmdArgs.push_back("elf64lppc");
+  else if (ToolChain.getArch() == llvm::Triple::sparc)
+    CmdArgs.push_back("elf32_sparc");
+  else if (ToolChain.getArch() == llvm::Triple::sparcv9)
+    CmdArgs.push_back("elf64_sparc");
   else if (ToolChain.getArch() == llvm::Triple::mips)
     CmdArgs.push_back("elf32btsmip");
   else if (ToolChain.getArch() == llvm::Triple::mipsel)
     CmdArgs.push_back("elf32ltsmip");
   else if (ToolChain.getArch() == llvm::Triple::mips64) {
-    if (hasMipsN32ABIArg(Args))
+    if (mips::hasMipsAbiArg(Args, "n32"))
       CmdArgs.push_back("elf32btsmipn32");
     else
       CmdArgs.push_back("elf64btsmip");
   }
   else if (ToolChain.getArch() == llvm::Triple::mips64el) {
-    if (hasMipsN32ABIArg(Args))
+    if (mips::hasMipsAbiArg(Args, "n32"))
       CmdArgs.push_back("elf32ltsmipn32");
     else
       CmdArgs.push_back("elf64ltsmip");
@@ -6465,8 +6943,10 @@
     CmdArgs.push_back("elf_x86_64");
 
   if (Args.hasArg(options::OPT_static)) {
-    if (ToolChain.getArch() == llvm::Triple::arm
-        || ToolChain.getArch() == llvm::Triple::thumb)
+    if (ToolChain.getArch() == llvm::Triple::arm ||
+        ToolChain.getArch() == llvm::Triple::armeb ||
+        ToolChain.getArch() == llvm::Triple::thumb ||
+        ToolChain.getArch() == llvm::Triple::thumbeb)
       CmdArgs.push_back("-Bstatic");
     else
       CmdArgs.push_back("-static");
@@ -6478,7 +6958,9 @@
   }
 
   if (ToolChain.getArch() == llvm::Triple::arm ||
+      ToolChain.getArch() == llvm::Triple::armeb ||
       ToolChain.getArch() == llvm::Triple::thumb ||
+      ToolChain.getArch() == llvm::Triple::thumbeb ||
       (!Args.hasArg(options::OPT_static) &&
        !Args.hasArg(options::OPT_shared))) {
     CmdArgs.push_back("-dynamic-linker");
@@ -6530,50 +7012,17 @@
        i != e; ++i)
     CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + *i));
 
-  // Tell the linker to load the plugin. This has to come before AddLinkerInputs
-  // as gold requires -plugin to come before any -plugin-opt that -Wl might
-  // forward.
-  if (D.IsUsingLTO(Args)) {
-    CmdArgs.push_back("-plugin");
-    std::string Plugin = ToolChain.getDriver().Dir + "/../lib/LLVMgold.so";
-    CmdArgs.push_back(Args.MakeArgString(Plugin));
-
-    // Try to pass driver level flags relevant to LTO code generation down to
-    // the plugin.
-
-    // Handle flags for selecting CPU variants.
-    std::string CPU = getCPUName(Args, ToolChain.getTriple());
-    if (!CPU.empty()) {
-      CmdArgs.push_back(
-                        Args.MakeArgString(Twine("-plugin-opt=mcpu=") +
-                                           CPU));
-    }
-  }
-
+  if (D.IsUsingLTO(Args))
+    AddGoldPlugin(ToolChain, Args, CmdArgs);
 
   if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
     CmdArgs.push_back("--no-demangle");
 
   AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
 
-  // Call these before we add the C++ ABI library.
-  if (Sanitize.needsUbsanRt())
-    addUbsanRTLinux(getToolChain(), Args, CmdArgs, D.CCCIsCXX(),
-                    Sanitize.needsAsanRt() || Sanitize.needsTsanRt() ||
-                    Sanitize.needsMsanRt() || Sanitize.needsLsanRt());
-  if (Sanitize.needsAsanRt())
-    addAsanRTLinux(getToolChain(), Args, CmdArgs);
-  if (Sanitize.needsTsanRt())
-    addTsanRTLinux(getToolChain(), Args, CmdArgs);
-  if (Sanitize.needsMsanRt())
-    addMsanRTLinux(getToolChain(), Args, CmdArgs);
-  if (Sanitize.needsLsanRt())
-    addLsanRTLinux(getToolChain(), Args, CmdArgs);
-  if (Sanitize.needsDfsanRt())
-    addDfsanRTLinux(getToolChain(), Args, CmdArgs);
-
+  addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
   // The profile runtime also needs access to system libraries.
-  addProfileRTLinux(getToolChain(), Args, CmdArgs);
+  addProfileRT(getToolChain(), Args, CmdArgs);
 
   if (D.CCCIsCXX() &&
       !Args.hasArg(options::OPT_nostdlib) &&
@@ -6593,19 +7042,36 @@
       if (Args.hasArg(options::OPT_static))
         CmdArgs.push_back("--start-group");
 
-      bool OpenMP = Args.hasArg(options::OPT_fopenmp);
-      if (OpenMP) {
+      LibOpenMP UsedOpenMPLib = LibUnknown;
+      if (Args.hasArg(options::OPT_fopenmp)) {
+        UsedOpenMPLib = LibGOMP;
+      } else if (const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ)) {
+        UsedOpenMPLib = llvm::StringSwitch<LibOpenMP>(A->getValue())
+            .Case("libgomp",  LibGOMP)
+            .Case("libiomp5", LibIOMP5)
+            .Default(LibUnknown);
+        if (UsedOpenMPLib == LibUnknown)
+          D.Diag(diag::err_drv_unsupported_option_argument)
+            << A->getOption().getName() << A->getValue();
+      }
+      switch (UsedOpenMPLib) {
+      case LibGOMP:
         CmdArgs.push_back("-lgomp");
 
-        // FIXME: Exclude this for platforms whith libgomp that doesn't require
-        // librt. Most modern Linux platfroms require it, but some may not.
+        // FIXME: Exclude this for platforms with libgomp that don't require
+        // librt. Most modern Linux platforms require it, but some may not.
         CmdArgs.push_back("-lrt");
+        break;
+      case LibIOMP5:
+        CmdArgs.push_back("-liomp5");
+        break;
+      case LibUnknown:
+        break;
       }
-
-      AddLibgcc(ToolChain.getTriple(), D, CmdArgs, Args);
+      AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
 
       if (Args.hasArg(options::OPT_pthread) ||
-          Args.hasArg(options::OPT_pthreads) || OpenMP)
+          Args.hasArg(options::OPT_pthreads) || UsedOpenMPLib != LibUnknown)
         CmdArgs.push_back("-lpthread");
 
       CmdArgs.push_back("-lc");
@@ -6613,7 +7079,7 @@
       if (Args.hasArg(options::OPT_static))
         CmdArgs.push_back("--end-group");
       else
-        AddLibgcc(ToolChain.getTriple(), D, CmdArgs, Args);
+        AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
     }
 
     if (!Args.hasArg(options::OPT_nostartfiles)) {
@@ -6687,7 +7153,7 @@
 
   AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
 
-  addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+  addProfileRT(getToolChain(), Args, CmdArgs);
 
   if (!Args.hasArg(options::OPT_nostdlib) &&
       !Args.hasArg(options::OPT_nodefaultlibs)) {
@@ -6889,7 +7355,7 @@
                             getToolChain().GetFilePath("crtn.o")));
   }
 
-  addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+  addProfileRT(getToolChain(), Args, CmdArgs);
 
   const char *Exec =
     Args.MakeArgString(getToolChain().GetProgramPath("ld"));
@@ -7038,8 +7504,15 @@
   if (Arg *A = Args.getLastArg(options::OPT_frtti, options::OPT_fno_rtti))
     CmdArgs.push_back(A->getOption().getID() == options::OPT_frtti ? "/GR"
                                                                    : "/GR-");
+  if (Arg *A = Args.getLastArg(options::OPT_ffunction_sections,
+                               options::OPT_fno_function_sections))
+    CmdArgs.push_back(A->getOption().getID() == options::OPT_ffunction_sections
+                          ? "/Gy"
+                          : "/Gy-");
   if (Args.hasArg(options::OPT_fsyntax_only))
     CmdArgs.push_back("/Zs");
+  if (Args.hasArg(options::OPT_g_Flag, options::OPT_gline_tables_only))
+    CmdArgs.push_back("/Z7");
 
   std::vector<std::string> Includes = Args.getAllArgValues(options::OPT_include);
   for (size_t I = 0, E = Includes.size(); I != E; ++I)
@@ -7093,9 +7566,15 @@
 
   CmdArgs.push_back("-c");
 
-  if (Args.hasArg(options::OPT_g_Group)) {
+  if (Args.hasArg(options::OPT_v))
+    CmdArgs.push_back("-v");
+
+  if (Args.hasArg(options::OPT_g_Group))
     CmdArgs.push_back("-g");
-  }
+
+  if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
+                   false))
+    CmdArgs.push_back("-fverbose-asm");
 
   Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
                        options::OPT_Xassembler);
@@ -7125,6 +7604,13 @@
     assert(Output.isNothing() && "Invalid output.");
   }
 
+  if (Args.hasArg(options::OPT_v))
+    CmdArgs.push_back("-v");
+
+  ExceptionSettings EH = exceptionSettings(Args, getToolChain().getTriple());
+  if (EH.ShouldUseExceptionTables)
+    CmdArgs.push_back("-fexceptions");
+
   AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
 
   const char *Exec =
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index d5b2848..575c988 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -25,7 +25,7 @@
   class Driver;
 
 namespace toolchains {
-  class Darwin;
+  class MachO;
 }
 
 namespace tools {
@@ -54,6 +54,8 @@
     void AddARMTargetArgs(const llvm::opt::ArgList &Args,
                           llvm::opt::ArgStringList &CmdArgs,
                           bool KernelOrKext) const;
+    void AddARM64TargetArgs(const llvm::opt::ArgList &Args,
+                            llvm::opt::ArgStringList &CmdArgs) const;
     void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
                            llvm::opt::ArgStringList &CmdArgs) const;
     void AddR600TargetArgs(const llvm::opt::ArgList &Args,
@@ -79,15 +81,14 @@
   public:
     Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC) {}
 
-    virtual bool hasGoodDiagnostics() const { return true; }
-    virtual bool hasIntegratedAssembler() const { return true; }
-    virtual bool hasIntegratedCPP() const { return true; }
+    bool hasGoodDiagnostics() const override { return true; }
+    bool hasIntegratedAssembler() const override { return true; }
+    bool hasIntegratedCPP() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
   /// \brief Clang integrated assembler tool.
@@ -96,15 +97,14 @@
     ClangAs(const ToolChain &TC) : Tool("clang::as",
                                         "clang integrated assembler", TC) {}
 
-    virtual bool hasGoodDiagnostics() const { return true; }
-    virtual bool hasIntegratedAssembler() const { return false; }
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasGoodDiagnostics() const override { return true; }
+    bool hasIntegratedAssembler() const override { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
   /// gcc - Generic GCC tool implementations.
@@ -114,11 +114,11 @@
     Common(const char *Name, const char *ShortName,
            const ToolChain &TC) : Tool(Name, ShortName, TC) {}
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output,
+                      const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
 
     /// RenderExtraToolArgs - Render any arguments necessary to force
     /// the particular tool mode.
@@ -132,23 +132,11 @@
     Preprocess(const ToolChain &TC) : Common("gcc::Preprocess",
                                              "gcc preprocessor", TC) {}
 
-    virtual bool hasGoodDiagnostics() const { return true; }
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasGoodDiagnostics() const override { return true; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void RenderExtraToolArgs(const JobAction &JA,
-                                     llvm::opt::ArgStringList &CmdArgs) const;
-  };
-
-  class LLVM_LIBRARY_VISIBILITY Precompile : public Common  {
-  public:
-    Precompile(const ToolChain &TC) : Common("gcc::Precompile",
-                                             "gcc precompile", TC) {}
-
-    virtual bool hasGoodDiagnostics() const { return true; }
-    virtual bool hasIntegratedCPP() const { return true; }
-
-    virtual void RenderExtraToolArgs(const JobAction &JA,
-                                     llvm::opt::ArgStringList &CmdArgs) const;
+    void RenderExtraToolArgs(const JobAction &JA,
+                             llvm::opt::ArgStringList &CmdArgs) const override;
   };
 
   class LLVM_LIBRARY_VISIBILITY Compile : public Common  {
@@ -156,22 +144,11 @@
     Compile(const ToolChain &TC) : Common("gcc::Compile",
                                           "gcc frontend", TC) {}
 
-    virtual bool hasGoodDiagnostics() const { return true; }
-    virtual bool hasIntegratedCPP() const { return true; }
+    bool hasGoodDiagnostics() const override { return true; }
+    bool hasIntegratedCPP() const override { return true; }
 
-    virtual void RenderExtraToolArgs(const JobAction &JA,
-                                     llvm::opt::ArgStringList &CmdArgs) const;
-  };
-
-  class LLVM_LIBRARY_VISIBILITY Assemble : public Common  {
-  public:
-    Assemble(const ToolChain &TC) : Common("gcc::Assemble",
-                                           "assembler (via gcc)", TC) {}
-
-    virtual bool hasIntegratedCPP() const { return false; }
-
-    virtual void RenderExtraToolArgs(const JobAction &JA,
-                                     llvm::opt::ArgStringList &CmdArgs) const;
+    void RenderExtraToolArgs(const JobAction &JA,
+                             llvm::opt::ArgStringList &CmdArgs) const override;
   };
 
   class LLVM_LIBRARY_VISIBILITY Link : public Common  {
@@ -179,11 +156,11 @@
     Link(const ToolChain &TC) : Common("gcc::Link",
                                        "linker (via gcc)", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void RenderExtraToolArgs(const JobAction &JA,
-                                     llvm::opt::ArgStringList &CmdArgs) const;
+    void RenderExtraToolArgs(const JobAction &JA,
+                             llvm::opt::ArgStringList &CmdArgs) const override;
   };
 } // end namespace gcc
 
@@ -195,15 +172,14 @@
     Assemble(const ToolChain &TC) : Tool("hexagon::Assemble",
       "hexagon-as", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void RenderExtraToolArgs(const JobAction &JA,
-                                     llvm::opt::ArgStringList &CmdArgs) const;
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void RenderExtraToolArgs(const JobAction &JA,
+                             llvm::opt::ArgStringList &CmdArgs) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
   class LLVM_LIBRARY_VISIBILITY Link : public Tool {
@@ -211,111 +187,118 @@
     Link(const ToolChain &TC) : Tool("hexagon::Link",
       "hexagon-ld", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
     virtual void RenderExtraToolArgs(const JobAction &JA,
                                      llvm::opt::ArgStringList &CmdArgs) const;
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace hexagon.
 
+namespace arm {
+  StringRef getARMTargetCPU(const llvm::opt::ArgList &Args,
+                            const llvm::Triple &Triple);
+  const char* getARMCPUForMArch(const llvm::opt::ArgList &Args,
+                                const llvm::Triple &Triple);
+  const char* getLLVMArchSuffixForARM(StringRef CPU);
+}
+
+namespace mips {
+  bool hasMipsAbiArg(const llvm::opt::ArgList &Args, const char *Value);
+}
 
 namespace darwin {
-  llvm::Triple::ArchType getArchTypeForDarwinArchName(StringRef Str);
+  llvm::Triple::ArchType getArchTypeForMachOArchName(StringRef Str);
+  void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str);
 
-  class LLVM_LIBRARY_VISIBILITY DarwinTool : public Tool {
+  class LLVM_LIBRARY_VISIBILITY MachOTool : public Tool {
     virtual void anchor();
   protected:
-    void AddDarwinArch(const llvm::opt::ArgList &Args,
+    void AddMachOArch(const llvm::opt::ArgList &Args,
                        llvm::opt::ArgStringList &CmdArgs) const;
 
-    const toolchains::Darwin &getDarwinToolChain() const {
-      return reinterpret_cast<const toolchains::Darwin&>(getToolChain());
+    const toolchains::MachO &getMachOToolChain() const {
+      return reinterpret_cast<const toolchains::MachO&>(getToolChain());
     }
 
   public:
-    DarwinTool(const char *Name, const char *ShortName,
+    MachOTool(const char *Name, const char *ShortName,
                const ToolChain &TC) : Tool(Name, ShortName, TC) {}
   };
 
-  class LLVM_LIBRARY_VISIBILITY Assemble : public DarwinTool  {
+  class LLVM_LIBRARY_VISIBILITY Assemble : public MachOTool  {
   public:
-    Assemble(const ToolChain &TC) : DarwinTool("darwin::Assemble",
-                                               "assembler", TC) {}
+    Assemble(const ToolChain &TC) : MachOTool("darwin::Assemble",
+                                              "assembler", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
-  class LLVM_LIBRARY_VISIBILITY Link : public DarwinTool  {
+  class LLVM_LIBRARY_VISIBILITY Link : public MachOTool  {
     bool NeedsTempPath(const InputInfoList &Inputs) const;
     void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
                      llvm::opt::ArgStringList &CmdArgs,
                      const InputInfoList &Inputs) const;
 
   public:
-    Link(const ToolChain &TC) : DarwinTool("darwin::Link", "linker", TC) {}
+    Link(const ToolChain &TC) : MachOTool("darwin::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
-  class LLVM_LIBRARY_VISIBILITY Lipo : public DarwinTool  {
+  class LLVM_LIBRARY_VISIBILITY Lipo : public MachOTool  {
   public:
-    Lipo(const ToolChain &TC) : DarwinTool("darwin::Lipo", "lipo", TC) {}
+    Lipo(const ToolChain &TC) : MachOTool("darwin::Lipo", "lipo", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
-  class LLVM_LIBRARY_VISIBILITY Dsymutil : public DarwinTool  {
+  class LLVM_LIBRARY_VISIBILITY Dsymutil : public MachOTool  {
   public:
-    Dsymutil(const ToolChain &TC) : DarwinTool("darwin::Dsymutil",
-                                               "dsymutil", TC) {}
+    Dsymutil(const ToolChain &TC) : MachOTool("darwin::Dsymutil",
+                                              "dsymutil", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isDsymutilJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isDsymutilJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output,
+                      const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
-  class LLVM_LIBRARY_VISIBILITY VerifyDebug : public DarwinTool  {
+  class LLVM_LIBRARY_VISIBILITY VerifyDebug : public MachOTool  {
   public:
-    VerifyDebug(const ToolChain &TC) : DarwinTool("darwin::VerifyDebug",
-                                                  "dwarfdump", TC) {}
+    VerifyDebug(const ToolChain &TC) : MachOTool("darwin::VerifyDebug",
+                                                 "dwarfdump", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
 }
@@ -327,26 +310,25 @@
     Assemble(const ToolChain &TC) : Tool("openbsd::Assemble", "assembler",
                                          TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output,
+                      const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
   public:
     Link(const ToolChain &TC) : Tool("openbsd::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace openbsd
 
@@ -357,26 +339,24 @@
     Assemble(const ToolChain &TC) : Tool("bitrig::Assemble", "assembler",
                                          TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
   public:
     Link(const ToolChain &TC) : Tool("bitrig::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace bitrig
 
@@ -387,26 +367,24 @@
     Assemble(const ToolChain &TC) : Tool("freebsd::Assemble", "assembler",
                                          TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
   public:
     Link(const ToolChain &TC) : Tool("freebsd::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace freebsd
 
@@ -418,13 +396,12 @@
     Assemble(const ToolChain &TC)
       : Tool("netbsd::Assemble", "assembler", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
 
@@ -432,14 +409,13 @@
     Link(const ToolChain &TC)
       : Tool("netbsd::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace netbsd
 
@@ -449,26 +425,26 @@
   public:
     Assemble(const ToolChain &TC) : Tool("GNU::Assemble", "assembler", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output,
+                      const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
   public:
     Link(const ToolChain &TC) : Tool("GNU::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output,
+                      const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 }
   /// minix -- Directly call GNU Binutils assembler and linker
@@ -478,26 +454,26 @@
     Assemble(const ToolChain &TC) : Tool("minix::Assemble", "assembler",
                                          TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output,
+                      const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
   public:
     Link(const ToolChain &TC) : Tool("minix::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output,
+                      const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace minix
 
@@ -508,26 +484,24 @@
     Assemble(const ToolChain &TC) : Tool("solaris::Assemble", "assembler",
                                          TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
   public:
     Link(const ToolChain &TC) : Tool("solaris::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace solaris
 
@@ -538,26 +512,24 @@
     Assemble(const ToolChain &TC) : Tool("auroraux::Assemble", "assembler",
                                          TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
   public:
     Link(const ToolChain &TC) : Tool("auroraux::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace auroraux
 
@@ -568,26 +540,25 @@
     Assemble(const ToolChain &TC) : Tool("dragonfly::Assemble", "assembler",
                                          TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
+    bool hasIntegratedCPP() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
   class LLVM_LIBRARY_VISIBILITY Link : public Tool  {
   public:
     Link(const ToolChain &TC) : Tool("dragonfly::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output,
+                      const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace dragonfly
 
@@ -597,29 +568,27 @@
   public:
     Link(const ToolChain &TC) : Tool("visualstudio::Link", "linker", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
   class LLVM_LIBRARY_VISIBILITY Compile : public Tool {
   public:
     Compile(const ToolChain &TC) : Tool("visualstudio::Compile", "compiler", TC) {}
 
-    virtual bool hasIntegratedAssembler() const { return true; }
-    virtual bool hasIntegratedCPP() const { return true; }
-    virtual bool isLinkJob() const { return false; }
+    bool hasIntegratedAssembler() const override { return true; }
+    bool hasIntegratedCPP() const override { return true; }
+    bool isLinkJob() const override { return false; }
 
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
 
     Command *GetCommand(Compilation &C, const JobAction &JA,
                         const InputInfo &Output,
@@ -629,6 +598,10 @@
   };
 } // end namespace visualstudio
 
+namespace arm {
+  StringRef getARMFloatABI(const Driver &D, const llvm::opt::ArgList &Args,
+                         const llvm::Triple &Triple);
+}
 namespace XCore {
   // For XCore, we do not need to instantiate tools for PreProcess, PreCompile and Compile.
   // We simply use "clang -cc1" for those actions.
@@ -637,12 +610,11 @@
     Assemble(const ToolChain &TC) : Tool("XCore::Assemble",
       "XCore-as", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    bool hasIntegratedCPP() const override { return false; }
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 
   class LLVM_LIBRARY_VISIBILITY Link : public Tool {
@@ -650,13 +622,12 @@
     Link(const ToolChain &TC) : Tool("XCore::Link",
       "XCore-ld", TC) {}
 
-    virtual bool hasIntegratedCPP() const { return false; }
-    virtual bool isLinkJob() const { return true; }
-    virtual void ConstructJob(Compilation &C, const JobAction &JA,
-                              const InputInfo &Output,
-                              const InputInfoList &Inputs,
-                              const llvm::opt::ArgList &TCArgs,
-                              const char *LinkingOutput) const;
+    bool hasIntegratedCPP() const override { return false; }
+    bool isLinkJob() const override { return true; }
+    void ConstructJob(Compilation &C, const JobAction &JA,
+                      const InputInfo &Output, const InputInfoList &Inputs,
+                      const llvm::opt::ArgList &TCArgs,
+                      const char *LinkingOutput) const override;
   };
 } // end namespace XCore.
 
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
index d947ae7..3538dbc 100644
--- a/lib/Driver/Types.cpp
+++ b/lib/Driver/Types.cpp
@@ -174,6 +174,8 @@
            .Case("F95", TY_Fortran)
            .Case("mii", TY_PP_ObjCXX)
            .Case("pcm", TY_ModuleFile)
+           .Case("pch", TY_PCH)
+           .Case("gch", TY_PCH)
            .Default(TY_INVALID);
 }
 
diff --git a/lib/Driver/WindowsToolChain.cpp b/lib/Driver/WindowsToolChain.cpp
index 2b6320a..a32a7bf 100644
--- a/lib/Driver/WindowsToolChain.cpp
+++ b/lib/Driver/WindowsToolChain.cpp
@@ -43,7 +43,7 @@
 }
 
 Tool *Windows::buildAssembler() const {
-  if (getTriple().getEnvironment() == llvm::Triple::MachO)
+  if (getTriple().isOSBinFormatMachO())
     return new tools::darwin::Assemble(*this);
   getDriver().Diag(clang::diag::err_no_external_assembler);
   return NULL;
@@ -324,7 +324,6 @@
                        VSDir + "\\VC\\PlatformSDK\\Include");
     return;
   }
-#endif // _MSC_VER
 
   // As a fallback, select default install paths.
   const StringRef Paths[] = {
@@ -335,6 +334,7 @@
     "C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include"
   };
   addSystemIncludes(DriverArgs, CC1Args, Paths);
+#endif // _MSC_VER
 }
 
 void Windows::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
diff --git a/lib/Edit/CMakeLists.txt b/lib/Edit/CMakeLists.txt
index cce1c19..08495f8 100644
--- a/lib/Edit/CMakeLists.txt
+++ b/lib/Edit/CMakeLists.txt
@@ -1,19 +1,13 @@
+set(LLVM_LINK_COMPONENTS
+  Support
+  )
+
 add_clang_library(clangEdit
   Commit.cpp
   EditedSource.cpp
   RewriteObjCFoundationAPI.cpp
-  )
 
-add_dependencies(clangEdit
-  ClangAttrClasses
-  ClangAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangEdit
+  LINK_LIBS
   clangBasic
   clangAST
   clangLex
diff --git a/lib/Edit/EditedSource.cpp b/lib/Edit/EditedSource.cpp
index 34b5e62..8fa2ee5 100644
--- a/lib/Edit/EditedSource.cpp
+++ b/lib/Edit/EditedSource.cpp
@@ -72,13 +72,11 @@
     return true;
   }
 
-  Twine concat;
   if (beforePreviousInsertions)
-    concat = Twine(text) + FA.Text;
+    FA.Text = copyString(Twine(text) + FA.Text);
   else
-    concat = Twine(FA.Text) +  text;
+    FA.Text = copyString(Twine(FA.Text) + text);
 
-  FA.Text = copyString(concat);
   return true;
 }
 
diff --git a/lib/Edit/RewriteObjCFoundationAPI.cpp b/lib/Edit/RewriteObjCFoundationAPI.cpp
index f4206fb..879c17a 100644
--- a/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1033,6 +1033,7 @@
     case CK_IntegralComplexToReal:
     case CK_IntegralComplexToBoolean:
     case CK_AtomicToNonAtomic:
+    case CK_AddressSpaceConversion:
       needsCast = true;
       break;
 
diff --git a/lib/Format/Android.mk b/lib/Format/Android.mk
new file mode 100644
index 0000000..302f28f
--- /dev/null
+++ b/lib/Format/Android.mk
@@ -0,0 +1,43 @@
+LOCAL_PATH:= $(call my-dir)
+
+# For the host only
+# =====================================================
+include $(CLEAR_VARS)
+include $(CLEAR_TBLGEN_VARS)
+
+TBLGEN_TABLES := \
+  AttrHasAttributeImpl.inc \
+  DiagnosticASTKinds.inc \
+  DiagnosticAnalysisKinds.inc \
+  DiagnosticCommentKinds.inc \
+  DiagnosticCommonKinds.inc \
+  DiagnosticDriverKinds.inc \
+  DiagnosticFrontendKinds.inc \
+  DiagnosticGroups.inc \
+  DiagnosticIndexName.inc \
+  DiagnosticLexKinds.inc \
+  DiagnosticParseKinds.inc \
+  DiagnosticSemaKinds.inc \
+  DiagnosticSerializationKinds.inc \
+  arm_neon.inc
+
+clang_format_SRC_FILES := \
+  BreakableToken.cpp \
+  ContinuationIndenter.cpp \
+  Format.cpp \
+  FormatToken.cpp \
+  TokenAnnotator.cpp \
+  UnwrappedLineParser.cpp \
+  WhitespaceManager.cpp
+
+LOCAL_SRC_FILES := $(clang_format_SRC_FILES)
+
+LOCAL_MODULE:= libclangFormat
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_MODULE_TAGS := optional
+
+include $(CLANG_HOST_BUILD_MK)
+include $(CLANG_VERSION_INC_MK)
+include $(CLANG_TBLGEN_RULES_MK)
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/lib/Format/BreakableToken.cpp b/lib/Format/BreakableToken.cpp
index d720ce9..d43a774 100644
--- a/lib/Format/BreakableToken.cpp
+++ b/lib/Format/BreakableToken.cpp
@@ -92,9 +92,7 @@
     return BreakableToken::Split(StringRef::npos, 0);
   if (ColumnLimit <= UsedColumns)
     return BreakableToken::Split(StringRef::npos, 0);
-  unsigned MaxSplit = std::min<unsigned>(
-      ColumnLimit - UsedColumns,
-      encoding::columnWidthWithTabs(Text, UsedColumns, TabWidth, Encoding) - 1);
+  unsigned MaxSplit = ColumnLimit - UsedColumns;
   StringRef::size_type SpaceOffset = 0;
   StringRef::size_type SlashOffset = 0;
   StringRef::size_type WordStartOffset = 0;
@@ -110,7 +108,7 @@
           Text.substr(0, Advance), UsedColumns + Chars, TabWidth, Encoding);
     }
 
-    if (Chars > MaxSplit)
+    if (Chars > MaxSplit || Text.size() == Advance)
       break;
 
     if (IsBlank(Text[0]))
@@ -151,7 +149,7 @@
     encoding::Encoding Encoding, const FormatStyle &Style)
     : BreakableToken(Tok, IndentLevel, InPPDirective, Encoding, Style),
       StartColumn(StartColumn), Prefix(Prefix), Postfix(Postfix) {
-  assert(Tok.TokenText.startswith(Prefix) && Tok.TokenText.endswith(Postfix));
+  assert(Tok.TokenText.endswith(Postfix));
   Line = Tok.TokenText.substr(
       Prefix.size(), Tok.TokenText.size() - Prefix.size() - Postfix.size());
 }
@@ -174,24 +172,38 @@
 void BreakableStringLiteral::insertBreak(unsigned LineIndex,
                                          unsigned TailOffset, Split Split,
                                          WhitespaceManager &Whitespaces) {
+  unsigned LeadingSpaces = StartColumn;
+  // The '@' of an ObjC string literal (@"Test") does not become part of the
+  // string token.
+  // FIXME: It might be a cleaner solution to merge the tokens as a
+  // precomputation step.
+  if (Prefix.startswith("@"))
+    --LeadingSpaces;
   Whitespaces.replaceWhitespaceInToken(
       Tok, Prefix.size() + TailOffset + Split.first, Split.second, Postfix,
-      Prefix, InPPDirective, 1, IndentLevel, StartColumn);
+      Prefix, InPPDirective, 1, IndentLevel, LeadingSpaces);
 }
 
-static StringRef getLineCommentPrefix(StringRef Comment) {
-  static const char *const KnownPrefixes[] = { "/// ", "///", "// ", "//" };
-  for (size_t i = 0, e = llvm::array_lengthof(KnownPrefixes); i != e; ++i)
-    if (Comment.startswith(KnownPrefixes[i]))
-      return KnownPrefixes[i];
-  return "";
+static StringRef getLineCommentIndentPrefix(StringRef Comment) {
+  static const char *const KnownPrefixes[] = { "///", "//" };
+  StringRef LongestPrefix;
+  for (StringRef KnownPrefix : KnownPrefixes) {
+    if (Comment.startswith(KnownPrefix)) {
+      size_t PrefixLength = KnownPrefix.size();
+      while (PrefixLength < Comment.size() && Comment[PrefixLength] == ' ')
+        ++PrefixLength;
+      if (PrefixLength > LongestPrefix.size())
+        LongestPrefix = Comment.substr(0, PrefixLength);
+    }
+  }
+  return LongestPrefix;
 }
 
 BreakableLineComment::BreakableLineComment(
     const FormatToken &Token, unsigned IndentLevel, unsigned StartColumn,
     bool InPPDirective, encoding::Encoding Encoding, const FormatStyle &Style)
     : BreakableSingleLineToken(Token, IndentLevel, StartColumn,
-                               getLineCommentPrefix(Token.TokenText), "",
+                               getLineCommentIndentPrefix(Token.TokenText), "",
                                InPPDirective, Encoding, Style) {
   OriginalPrefix = Prefix;
   if (Token.TokenText.size() > Prefix.size() &&
@@ -337,7 +349,7 @@
   LeadingWhitespace[LineIndex] =
       Lines[LineIndex].begin() - Lines[LineIndex - 1].end();
 
-  // Adjust the start column uniformly accross all lines.
+  // Adjust the start column uniformly across all lines.
   StartOfLineColumn[LineIndex] = std::max<int>(
       0,
       encoding::columnWidthWithTabs(Whitespace, 0, Style.TabWidth, Encoding) +
diff --git a/lib/Format/BreakableToken.h b/lib/Format/BreakableToken.h
index b965190..e294a50 100644
--- a/lib/Format/BreakableToken.h
+++ b/lib/Format/BreakableToken.h
@@ -90,10 +90,9 @@
 /// \c getSplit() needs to be implemented by child classes.
 class BreakableSingleLineToken : public BreakableToken {
 public:
-  virtual unsigned getLineCount() const;
-  virtual unsigned getLineLengthAfterSplit(unsigned LineIndex,
-                                           unsigned TailOffset,
-                                           StringRef::size_type Length) const;
+  unsigned getLineCount() const override;
+  unsigned getLineLengthAfterSplit(unsigned LineIndex, unsigned TailOffset,
+                                   StringRef::size_type Length) const override;
 
 protected:
   BreakableSingleLineToken(const FormatToken &Tok, unsigned IndentLevel,
@@ -123,13 +122,12 @@
                          StringRef Postfix, bool InPPDirective,
                          encoding::Encoding Encoding, const FormatStyle &Style);
 
-  virtual Split getSplit(unsigned LineIndex, unsigned TailOffset,
-                         unsigned ColumnLimit) const;
-  virtual void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
-                           WhitespaceManager &Whitespaces);
-  virtual void replaceWhitespace(unsigned LineIndex, unsigned TailOffset,
-                                 Split Split,
-                                 WhitespaceManager &Whitespaces) {}
+  Split getSplit(unsigned LineIndex, unsigned TailOffset,
+                 unsigned ColumnLimit) const override;
+  void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
+                   WhitespaceManager &Whitespaces) override;
+  void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
+                         WhitespaceManager &Whitespaces) override {}
 };
 
 class BreakableLineComment : public BreakableSingleLineToken {
@@ -142,15 +140,14 @@
                        unsigned StartColumn, bool InPPDirective,
                        encoding::Encoding Encoding, const FormatStyle &Style);
 
-  virtual Split getSplit(unsigned LineIndex, unsigned TailOffset,
-                         unsigned ColumnLimit) const;
-  virtual void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
-                           WhitespaceManager &Whitespaces);
-  virtual void replaceWhitespace(unsigned LineIndex, unsigned TailOffset,
-                                 Split Split,
-                                 WhitespaceManager &Whitespaces);
-  virtual void replaceWhitespaceBefore(unsigned LineIndex,
-                                       WhitespaceManager &Whitespaces);
+  Split getSplit(unsigned LineIndex, unsigned TailOffset,
+                 unsigned ColumnLimit) const override;
+  void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
+                   WhitespaceManager &Whitespaces) override;
+  void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
+                         WhitespaceManager &Whitespaces) override;
+  void replaceWhitespaceBefore(unsigned LineIndex,
+                               WhitespaceManager &Whitespaces) override;
 
 private:
   // The prefix without an additional space if one was added.
@@ -170,19 +167,17 @@
                         bool FirstInLine, bool InPPDirective,
                         encoding::Encoding Encoding, const FormatStyle &Style);
 
-  virtual unsigned getLineCount() const;
-  virtual unsigned getLineLengthAfterSplit(unsigned LineIndex,
-                                           unsigned TailOffset,
-                                           StringRef::size_type Length) const;
-  virtual Split getSplit(unsigned LineIndex, unsigned TailOffset,
-                         unsigned ColumnLimit) const;
-  virtual void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
-                           WhitespaceManager &Whitespaces);
-  virtual void replaceWhitespace(unsigned LineIndex, unsigned TailOffset,
-                                 Split Split,
-                                 WhitespaceManager &Whitespaces);
-  virtual void replaceWhitespaceBefore(unsigned LineIndex,
-                                       WhitespaceManager &Whitespaces);
+  unsigned getLineCount() const override;
+  unsigned getLineLengthAfterSplit(unsigned LineIndex, unsigned TailOffset,
+                                   StringRef::size_type Length) const override;
+  Split getSplit(unsigned LineIndex, unsigned TailOffset,
+                 unsigned ColumnLimit) const override;
+  void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
+                   WhitespaceManager &Whitespaces) override;
+  void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
+                         WhitespaceManager &Whitespaces) override;
+  void replaceWhitespaceBefore(unsigned LineIndex,
+                               WhitespaceManager &Whitespaces) override;
 
 private:
   // Rearranges the whitespace between Lines[LineIndex-1] and Lines[LineIndex],
diff --git a/lib/Format/CMakeLists.txt b/lib/Format/CMakeLists.txt
index e3ef5bd..47e15bd 100644
--- a/lib/Format/CMakeLists.txt
+++ b/lib/Format/CMakeLists.txt
@@ -8,23 +8,9 @@
   TokenAnnotator.cpp
   UnwrappedLineParser.cpp
   WhitespaceManager.cpp
-  )
 
-add_dependencies(clangFormat
-  ClangAttrClasses
-  ClangAttrList
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticFrontend
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangFormat
+  LINK_LIBS
   clangBasic
-  clangFrontend
-  clangAST
-  clangASTMatchers
-  clangRewriteCore
-  clangRewriteFrontend
+  clangLex
   clangTooling
   )
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index 971acc2..5a3dee6 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -63,7 +63,8 @@
                                            bool BinPackInconclusiveFunctions)
     : Style(Style), SourceMgr(SourceMgr), Whitespaces(Whitespaces),
       Encoding(Encoding),
-      BinPackInconclusiveFunctions(BinPackInconclusiveFunctions) {}
+      BinPackInconclusiveFunctions(BinPackInconclusiveFunctions),
+      CommentPragmasRegex(Style.CommentPragmas) {}
 
 LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
                                                 const AnnotatedLine *Line,
@@ -107,7 +108,8 @@
   //   SomeParameter, OtherParameter).DoSomething(
   //   ...
   // As they hide "DoSomething" and are generally bad for readability.
-  if (Previous.opensScope() && State.LowestLevelOnLine < State.StartOfLineLevel)
+  if (Previous.opensScope() && Previous.isNot(tok::l_brace) &&
+      State.LowestLevelOnLine < State.StartOfLineLevel)
     return false;
   if (Current.isMemberAccess() && State.Stack.back().ContainsUnwrappedBuilder)
     return false;
@@ -136,13 +138,19 @@
   if (Style.AlwaysBreakBeforeMultilineStrings &&
       State.Column > State.Stack.back().Indent && // Breaking saves columns.
       !Previous.isOneOf(tok::kw_return, tok::lessless, tok::at) &&
-      Previous.Type != TT_InlineASMColon && NextIsMultilineString(State))
+      Previous.Type != TT_InlineASMColon && nextIsMultilineString(State))
     return true;
   if (((Previous.Type == TT_DictLiteral && Previous.is(tok::l_brace)) ||
        Previous.Type == TT_ArrayInitializerLSquare) &&
       getLengthToMatchingParen(Previous) + State.Column > getColumnLimit(State))
     return true;
+  if (Current.Type == TT_CtorInitializerColon &&
+      (!Style.AllowShortFunctionsOnASingleLine ||
+       Style.BreakConstructorInitializersBeforeComma || Style.ColumnLimit != 0))
+    return true;
 
+  if (State.Column < getNewLineColumn(State))
+    return false;
   if (!Style.BreakBeforeBinaryOperators) {
     // If we need to break somewhere inside the LHS of a binary expression, we
     // should also break after the operator. Otherwise, the formatting would
@@ -173,19 +181,17 @@
   }
 
   // Same as above, but for the first "<<" operator.
-  if (Current.is(tok::lessless) && State.Stack.back().BreakBeforeParameter &&
+  if (Current.is(tok::lessless) && Current.Type != TT_OverloadedOperator &&
+      State.Stack.back().BreakBeforeParameter &&
       State.Stack.back().FirstLessLess == 0)
     return true;
 
-  // FIXME: Comparing LongestObjCSelectorName to 0 is a hacky way of finding
-  // out whether it is the first parameter. Clean this up.
   if (Current.Type == TT_ObjCSelectorName &&
-      Current.LongestObjCSelectorName == 0 &&
+      State.Stack.back().ObjCSelectorNameFound &&
       State.Stack.back().BreakBeforeParameter)
     return true;
-  if ((Current.Type == TT_CtorInitializerColon ||
-       (Previous.ClosesTemplateDeclaration && State.ParenLevel == 0 &&
-        !Current.isTrailingComment())))
+  if (Previous.ClosesTemplateDeclaration && State.ParenLevel == 0 &&
+      !Current.isTrailingComment())
     return true;
 
   if ((Current.Type == TT_StartOfName || Current.is(tok::kw_operator)) &&
@@ -197,6 +203,14 @@
        (State.Stack.back().BreakBeforeParameter &&
         State.Stack.back().ContainsUnwrappedBuilder)))
     return true;
+
+  // The following could be precomputed as they do not depend on the state.
+  // However, as they should take effect only if the UnwrappedLine does not fit
+  // into the ColumnLimit, they are checked here in the ContinuationIndenter.
+  if (Previous.BlockKind == BK_Block && Previous.is(tok::l_brace) &&
+      !Current.isOneOf(tok::r_brace, tok::comment))
+    return true;
+
   return false;
 }
 
@@ -205,8 +219,8 @@
                                                unsigned ExtraSpaces) {
   const FormatToken &Current = *State.NextToken;
 
-  if (State.Stack.size() == 0 ||
-      (Current.Type == TT_ImplicitStringLiteral &&
+  assert(!State.Stack.empty());
+  if ((Current.Type == TT_ImplicitStringLiteral &&
        (Current.Previous->Tok.getIdentifierInfo() == NULL ||
         Current.Previous->Tok.getIdentifierInfo()->getPPKeywordID() ==
             tok::pp_not_keyword))) {
@@ -215,8 +229,8 @@
                                State.NextToken->WhitespaceRange.getEnd()) -
                            SourceMgr.getSpellingColumnNumber(
                                State.NextToken->WhitespaceRange.getBegin());
-    State.Column += WhitespaceLength + State.NextToken->ColumnWidth;
-    State.NextToken = State.NextToken->Next;
+    State.Column += WhitespaceLength;
+    moveStateToNextToken(State, DryRun, /*Newline=*/false);
     return 0;
   }
 
@@ -255,9 +269,12 @@
     Whitespaces.replaceWhitespace(Current, /*Newlines=*/0, /*IndentLevel=*/0,
                                   Spaces, State.Column + Spaces);
 
-  if (Current.Type == TT_ObjCSelectorName && State.Stack.back().ColonPos == 0) {
-    if (State.Stack.back().Indent + Current.LongestObjCSelectorName >
-        State.Column + Spaces + Current.ColumnWidth)
+  if (Current.Type == TT_ObjCSelectorName &&
+      !State.Stack.back().ObjCSelectorNameFound) {
+    if (Current.LongestObjCSelectorName == 0)
+      State.Stack.back().AlignColons = false;
+    else if (State.Stack.back().Indent + Current.LongestObjCSelectorName >
+             State.Column + Spaces + Current.ColumnWidth)
       State.Stack.back().ColonPos =
           State.Stack.back().Indent + Current.LongestObjCSelectorName;
     else
@@ -265,7 +282,7 @@
   }
 
   if (Previous.opensScope() && Previous.Type != TT_ObjCMethodExpr &&
-      Current.Type != TT_LineComment)
+      (Current.Type != TT_LineComment || Previous.BlockKind == BK_BracedInit))
     State.Stack.back().Indent = State.Column + Spaces;
   if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
     State.Stack.back().NoLineBreak = true;
@@ -277,11 +294,12 @@
     // Treat the condition inside an if as if it was a second function
     // parameter, i.e. let nested calls have a continuation indent.
     State.Stack.back().LastSpace = State.Column + 1; // 1 is length of "(".
-  else if (Previous.is(tok::comma) || Previous.Type == TT_ObjCMethodExpr)
+  else if (Current.isNot(tok::comment) &&
+           (Previous.is(tok::comma) ||
+            (Previous.is(tok::colon) && Previous.Type == TT_ObjCMethodExpr)))
     State.Stack.back().LastSpace = State.Column;
   else if ((Previous.Type == TT_BinaryOperator ||
             Previous.Type == TT_ConditionalExpr ||
-            Previous.Type == TT_UnaryOperator ||
             Previous.Type == TT_CtorInitializerColon) &&
            (Previous.getPrecedence() != prec::Assignment ||
             Current.StartsBinaryExpression))
@@ -313,16 +331,15 @@
                                                  bool DryRun) {
   FormatToken &Current = *State.NextToken;
   const FormatToken &Previous = *State.NextToken->Previous;
-  // If we are continuing an expression, we want to use the continuation indent.
-  unsigned ContinuationIndent =
-      std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
-      Style.ContinuationIndentWidth;
+
   // Extra penalty that needs to be added because of the way certain line
   // breaks are chosen.
   unsigned Penalty = 0;
 
-  const FormatToken *PreviousNonComment =
-      State.NextToken->getPreviousNonComment();
+  const FormatToken *PreviousNonComment = Current.getPreviousNonComment();
+  const FormatToken *NextNonComment = Previous.getNextNonComment();
+  if (!NextNonComment)
+    NextNonComment = &Current;
   // The first line break on any ParenLevel causes an extra penalty in order
   // prefer similar line breaks.
   if (!State.Stack.back().ContainsLineBreak)
@@ -332,79 +349,46 @@
   Penalty += State.NextToken->SplitPenalty;
 
   // Breaking before the first "<<" is generally not desirable if the LHS is
-  // short.
-  if (Current.is(tok::lessless) && State.Stack.back().FirstLessLess == 0 &&
-      State.Column <= Style.ColumnLimit / 2)
+  // short. Also always add the penalty if the LHS is split over mutliple lines
+  // to avoid unnecessary line breaks that just work around this penalty.
+  if (NextNonComment->is(tok::lessless) &&
+      State.Stack.back().FirstLessLess == 0 &&
+      (State.Column <= Style.ColumnLimit / 3 ||
+       State.Stack.back().BreakBeforeParameter))
     Penalty += Style.PenaltyBreakFirstLessLess;
 
-  if (Current.is(tok::l_brace) && Current.BlockKind == BK_Block) {
-    State.Column = State.FirstIndent;
-  } else if (Current.isOneOf(tok::r_brace, tok::r_square)) {
-    if (Current.closesBlockTypeList(Style) ||
-        (Current.MatchingParen &&
-         Current.MatchingParen->BlockKind == BK_BracedInit))
-      State.Column = State.Stack[State.Stack.size() - 2].LastSpace;
-    else
-      State.Column = State.FirstIndent;
-  } else if (Current.is(tok::string_literal) &&
-             State.StartOfStringLiteral != 0) {
-    State.Column = State.StartOfStringLiteral;
-    State.Stack.back().BreakBeforeParameter = true;
-  } else if (Current.is(tok::lessless) &&
-             State.Stack.back().FirstLessLess != 0) {
-    State.Column = State.Stack.back().FirstLessLess;
-  } else if (Current.isMemberAccess()) {
-    if (State.Stack.back().CallContinuation == 0) {
-      State.Column = ContinuationIndent;
+  State.Column = getNewLineColumn(State);
+  if (NextNonComment->isMemberAccess()) {
+    if (State.Stack.back().CallContinuation == 0)
       State.Stack.back().CallContinuation = State.Column;
-    } else {
-      State.Column = State.Stack.back().CallContinuation;
+  } else if (NextNonComment->Type == TT_ObjCSelectorName) {
+    if (!State.Stack.back().ObjCSelectorNameFound) {
+      if (NextNonComment->LongestObjCSelectorName == 0) {
+        State.Stack.back().AlignColons = false;
+      } else {
+        State.Stack.back().ColonPos =
+            State.Stack.back().Indent + NextNonComment->LongestObjCSelectorName;
+      }
+    } else if (State.Stack.back().AlignColons &&
+               State.Stack.back().ColonPos <= NextNonComment->ColumnWidth) {
+      State.Stack.back().ColonPos = State.Column + NextNonComment->ColumnWidth;
     }
-  } else if (State.Stack.back().QuestionColumn != 0 &&
-             (Current.Type == TT_ConditionalExpr ||
-              Previous.Type == TT_ConditionalExpr)) {
-    State.Column = State.Stack.back().QuestionColumn;
-  } else if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0) {
-    State.Column = State.Stack.back().VariablePos;
-  } else if ((PreviousNonComment &&
-              PreviousNonComment->ClosesTemplateDeclaration) ||
-             ((Current.Type == TT_StartOfName ||
-               Current.is(tok::kw_operator)) &&
-              State.ParenLevel == 0 &&
-              (!Style.IndentFunctionDeclarationAfterType ||
-               State.Line->StartsDefinition))) {
-    State.Column = State.Stack.back().Indent;
-  } else if (Current.Type == TT_ObjCSelectorName) {
-    if (State.Stack.back().ColonPos == 0) {
-      State.Stack.back().ColonPos =
-          State.Stack.back().Indent + Current.LongestObjCSelectorName;
-      State.Column = State.Stack.back().ColonPos - Current.ColumnWidth;
-    } else if (State.Stack.back().ColonPos > Current.ColumnWidth) {
-      State.Column = State.Stack.back().ColonPos - Current.ColumnWidth;
-    } else {
-      State.Column = State.Stack.back().Indent;
-      State.Stack.back().ColonPos = State.Column + Current.ColumnWidth;
-    }
-  } else if (Current.Type == TT_ArraySubscriptLSquare) {
-    if (State.Stack.back().StartOfArraySubscripts != 0)
-      State.Column = State.Stack.back().StartOfArraySubscripts;
-    else
-      State.Column = ContinuationIndent;
-  } else if (Current.Type == TT_StartOfName ||
-             Previous.isOneOf(tok::coloncolon, tok::equal) ||
-             Previous.Type == TT_ObjCMethodExpr) {
-    State.Column = ContinuationIndent;
-  } else if (Current.Type == TT_CtorInitializerColon) {
-    State.Column = State.FirstIndent + Style.ConstructorInitializerIndentWidth;
-  } else if (Current.Type == TT_CtorInitializerComma) {
-    State.Column = State.Stack.back().Indent;
-  } else {
-    State.Column = State.Stack.back().Indent;
-    // Ensure that we fall back to the continuation indent width instead of just
-    // flushing continuations left.
-    if (State.Column == State.FirstIndent &&
-        PreviousNonComment->isNot(tok::r_brace))
-      State.Column += Style.ContinuationIndentWidth;
+  } else if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
+             (PreviousNonComment->Type == TT_ObjCMethodExpr ||
+              PreviousNonComment->Type == TT_DictLiteral)) {
+    // FIXME: This is hacky, find a better way. The problem is that in an ObjC
+    // method expression, the block should be aligned to the line starting it,
+    // e.g.:
+    //   [aaaaaaaaaaaaaaa aaaaaaaaa: \\ break for some reason
+    //                        ^(int *i) {
+    //                            // ...
+    //                        }];
+    // Thus, we set LastSpace of the next higher ParenLevel, to which we move
+    // when we consume all of the "}"'s FakeRParens at the "{".
+    if (State.Stack.size() > 1)
+      State.Stack[State.Stack.size() - 2].LastSpace =
+          std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
+          Style.ContinuationIndentWidth;
   }
 
   if ((Previous.isOneOf(tok::comma, tok::semi) &&
@@ -413,7 +397,7 @@
     State.Stack.back().BreakBeforeParameter = false;
   if (Previous.Type == TT_TemplateCloser && State.ParenLevel == 0)
     State.Stack.back().BreakBeforeParameter = false;
-  if (Current.is(tok::question) ||
+  if (NextNonComment->is(tok::question) ||
       (PreviousNonComment && PreviousNonComment->is(tok::question)))
     State.Stack.back().BreakBeforeParameter = true;
 
@@ -429,8 +413,6 @@
 
   if (!Current.isTrailingComment())
     State.Stack.back().LastSpace = State.Column;
-  if (Current.isMemberAccess())
-    State.Stack.back().LastSpace += Current.ColumnWidth;
   State.StartOfLineLevel = State.ParenLevel;
   State.LowestLevelOnLine = State.ParenLevel;
 
@@ -443,7 +425,7 @@
       !PreviousNonComment->isOneOf(tok::comma, tok::semi) &&
       PreviousNonComment->Type != TT_TemplateCloser &&
       PreviousNonComment->Type != TT_BinaryOperator &&
-      Current.Type != TT_BinaryOperator && 
+      Current.Type != TT_BinaryOperator &&
       !PreviousNonComment->opensScope())
     State.Stack.back().BreakBeforeParameter = true;
 
@@ -465,6 +447,99 @@
   return Penalty;
 }
 
+unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
+  if (!State.NextToken || !State.NextToken->Previous)
+    return 0;
+  FormatToken &Current = *State.NextToken;
+  const FormatToken &Previous = *State.NextToken->Previous;
+  // If we are continuing an expression, we want to use the continuation indent.
+  unsigned ContinuationIndent =
+      std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
+      Style.ContinuationIndentWidth;
+  const FormatToken *PreviousNonComment = Current.getPreviousNonComment();
+  const FormatToken *NextNonComment = Previous.getNextNonComment();
+  if (!NextNonComment)
+    NextNonComment = &Current;
+  if (NextNonComment->is(tok::l_brace) &&
+      NextNonComment->BlockKind == BK_Block)
+    return State.ParenLevel == 0 ? State.FirstIndent
+                                 : State.Stack.back().Indent;
+  if (Current.isOneOf(tok::r_brace, tok::r_square)) {
+    if (Current.closesBlockTypeList(Style) ||
+        (Current.MatchingParen &&
+         Current.MatchingParen->BlockKind == BK_BracedInit))
+      return State.Stack[State.Stack.size() - 2].LastSpace;
+    else
+      return State.FirstIndent;
+  }
+  if (NextNonComment->isStringLiteral() && State.StartOfStringLiteral != 0)
+    return State.StartOfStringLiteral;
+  if (NextNonComment->is(tok::lessless) &&
+      State.Stack.back().FirstLessLess != 0)
+    return State.Stack.back().FirstLessLess;
+  if (NextNonComment->isMemberAccess()) {
+    if (State.Stack.back().CallContinuation == 0) {
+      return ContinuationIndent;
+    } else {
+      return State.Stack.back().CallContinuation;
+    }
+  }
+  if (State.Stack.back().QuestionColumn != 0 &&
+      (NextNonComment->Type == TT_ConditionalExpr ||
+       Previous.Type == TT_ConditionalExpr))
+    return State.Stack.back().QuestionColumn;
+  if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
+    return State.Stack.back().VariablePos;
+  if ((PreviousNonComment && (PreviousNonComment->ClosesTemplateDeclaration ||
+                              PreviousNonComment->Type == TT_AttributeParen)) ||
+      ((NextNonComment->Type == TT_StartOfName ||
+        NextNonComment->is(tok::kw_operator)) &&
+       State.ParenLevel == 0 && (!Style.IndentFunctionDeclarationAfterType ||
+                                 State.Line->StartsDefinition)))
+    return std::max(State.Stack.back().LastSpace, State.Stack.back().Indent);
+  if (NextNonComment->Type == TT_ObjCSelectorName) {
+    if (!State.Stack.back().ObjCSelectorNameFound) {
+      if (NextNonComment->LongestObjCSelectorName == 0) {
+        return State.Stack.back().Indent;
+      } else {
+        return State.Stack.back().Indent +
+               NextNonComment->LongestObjCSelectorName -
+               NextNonComment->ColumnWidth;
+      }
+    } else if (!State.Stack.back().AlignColons) {
+      return State.Stack.back().Indent;
+    } else if (State.Stack.back().ColonPos > NextNonComment->ColumnWidth) {
+      return State.Stack.back().ColonPos - NextNonComment->ColumnWidth;
+    } else {
+      return State.Stack.back().Indent;
+    }
+  }
+  if (NextNonComment->Type == TT_ArraySubscriptLSquare) {
+    if (State.Stack.back().StartOfArraySubscripts != 0)
+      return State.Stack.back().StartOfArraySubscripts;
+    else
+      return ContinuationIndent;
+  }
+  if (NextNonComment->Type == TT_StartOfName ||
+      Previous.isOneOf(tok::coloncolon, tok::equal)) {
+    return ContinuationIndent;
+  }
+  if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
+      (PreviousNonComment->Type == TT_ObjCMethodExpr ||
+       PreviousNonComment->Type == TT_DictLiteral))
+    return ContinuationIndent;
+  if (NextNonComment->Type == TT_CtorInitializerColon)
+    return State.FirstIndent + Style.ConstructorInitializerIndentWidth;
+  if (NextNonComment->Type == TT_CtorInitializerComma)
+    return State.Stack.back().Indent;
+  if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
+      PreviousNonComment->isNot(tok::r_brace))
+    // Ensure that we fall back to the continuation indent width instead of
+    // just flushing continuations left.
+    return State.Stack.back().Indent + Style.ContinuationIndentWidth;
+  return State.Stack.back().Indent;
+}
+
 unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
                                                     bool DryRun, bool Newline) {
   const FormatToken &Current = *State.NextToken;
@@ -472,7 +547,8 @@
 
   if (Current.Type == TT_InheritanceColon)
     State.Stack.back().AvoidBinPacking = true;
-  if (Current.is(tok::lessless) && State.Stack.back().FirstLessLess == 0)
+  if (Current.is(tok::lessless) && Current.Type != TT_OverloadedOperator &&
+      State.Stack.back().FirstLessLess == 0)
     State.Stack.back().FirstLessLess = State.Column;
   if (Current.Type == TT_ArraySubscriptLSquare &&
       State.Stack.back().StartOfArraySubscripts == 0)
@@ -488,6 +564,8 @@
   if (Current.isMemberAccess())
     State.Stack.back().StartOfFunctionCall =
         Current.LastInChainOfCalls ? 0 : State.Column + Current.ColumnWidth;
+  if (Current.Type == TT_ObjCSelectorName)
+    State.Stack.back().ObjCSelectorNameFound = true;
   if (Current.Type == TT_CtorInitializerColon) {
     // Indent 2 from the column, so:
     // SomeClass::SomeClass()
@@ -531,6 +609,18 @@
           std::max(std::max(State.Column, NewParenState.Indent),
                    State.Stack.back().LastSpace);
 
+    // Don't allow the RHS of an operator to be split over multiple lines unless
+    // there is a line-break right after the operator.
+    // Exclude relational operators, as there, it is always more desirable to
+    // have the LHS 'left' of the RHS.
+    // FIXME: Implement this for '<<' and BreakBeforeBinaryOperators.
+    if (!Newline && Previous && Previous->Type == TT_BinaryOperator &&
+        !Previous->isOneOf(tok::lessless, tok::question, tok::colon) &&
+        Previous->getPrecedence() > prec::Assignment &&
+        Previous->getPrecedence() != prec::Relational &&
+        !Style.BreakBeforeBinaryOperators)
+      NewParenState.NoLineBreak = true;
+
     // Do not indent relative to the fake parentheses inserted for "." or "->".
     // This is a special case to make the following to statements consistent:
     //   OuterFunction(InnerFunctionCall( // break
@@ -539,6 +629,7 @@
     //       ParameterToInnerFunction));
     if (*I > prec::Unknown)
       NewParenState.LastSpace = std::max(NewParenState.LastSpace, State.Column);
+    NewParenState.StartOfFunctionCall = State.Column;
 
     // Always indent conditional expressions. Never indent expression where
     // the 'operator' is ',', ';' or an assignment (i.e. *I <=
@@ -566,7 +657,7 @@
       if (Current.MatchingParen && Current.BlockKind == BK_Block) {
         // If this is an l_brace starting a nested block, we pretend (wrt. to
         // indentation) that we already consumed the corresponding r_brace.
-        // Thus, we remove all ParenStates caused bake fake parentheses that end
+        // Thus, we remove all ParenStates caused by fake parentheses that end
         // at the r_brace. The net effect of this is that we don't indent
         // relative to the l_brace, if the nested block is the last parameter of
         // a function. For example, this formats:
@@ -577,20 +668,32 @@
         //
         // instead of:
         //   SomeFunction(a, [] {
-        //                        f();  // break
-        //                      });
-        for (unsigned i = 0; i != Current.MatchingParen->FakeRParens; ++i)
+        //                     f();  // break
+        //                   });
+        for (unsigned i = 0; i != Current.MatchingParen->FakeRParens; ++i) {
+          assert(State.Stack.size() > 1);
+          if (State.Stack.size() == 1) {
+            // Do not pop the last element.
+            break;
+          }
           State.Stack.pop_back();
-        NewIndent = State.Stack.back().LastSpace + Style.IndentWidth;
+        }
+        // For some reason, ObjC blocks are indented like continuations.
+        NewIndent =
+            State.Stack.back().LastSpace + (Current.Type == TT_ObjCBlockLBrace
+                                                ? Style.ContinuationIndentWidth
+                                                : Style.IndentWidth);
         ++NewIndentLevel;
         BreakBeforeParameter = true;
       } else {
         NewIndent = State.Stack.back().LastSpace;
         if (Current.opensBlockTypeList(Style)) {
           NewIndent += Style.IndentWidth;
+          NewIndent = std::min(State.Column + 2, NewIndent);
           ++NewIndentLevel;
         } else {
           NewIndent += Style.ContinuationIndentWidth;
+          NewIndent = std::min(State.Column + 1, NewIndent);
         }
       }
       const FormatToken *NextNoComment = Current.getNextNonComment();
@@ -649,15 +752,20 @@
     // as they will have been removed early (see above).
     for (unsigned i = 0, e = Current.FakeRParens; i != e; ++i) {
       unsigned VariablePos = State.Stack.back().VariablePos;
+      assert(State.Stack.size() > 1);
+      if (State.Stack.size() == 1) {
+        // Do not pop the last element.
+        break;
+      }
       State.Stack.pop_back();
       State.Stack.back().VariablePos = VariablePos;
     }
   }
 
-  if (Current.is(tok::string_literal) && State.StartOfStringLiteral == 0) {
+  if (Current.isStringLiteral() && State.StartOfStringLiteral == 0) {
     State.StartOfStringLiteral = State.Column;
-  } else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash,
-                              tok::string_literal)) {
+  } else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
+             !Current.isStringLiteral()) {
     State.StartOfStringLiteral = 0;
   }
 
@@ -669,13 +777,15 @@
     Penalty += Style.PenaltyExcessCharacter * ExcessCharacters;
   }
 
+  if (Current.Role)
+    Current.Role->formatFromToken(State, this, DryRun);
   // If the previous has a special role, let it consume tokens as appropriate.
   // It is necessary to start at the previous token for the only implemented
   // role (comma separated list). That way, the decision whether or not to break
   // after the "{" is already done and both options are tried and evaluated.
   // FIXME: This is ugly, find a better way.
   if (Previous && Previous->Role)
-    Penalty += Previous->Role->format(State, this, DryRun);
+    Penalty += Previous->Role->formatAfterToken(State, this, DryRun);
 
   return Penalty;
 }
@@ -727,19 +837,14 @@
   if (Current.Type == TT_ImplicitStringLiteral)
     return 0;
 
-  if (!Current.isOneOf(tok::string_literal, tok::wide_string_literal,
-                       tok::utf8_string_literal, tok::utf16_string_literal,
-                       tok::utf32_string_literal, tok::comment))
+  if (!Current.isStringLiteral() && !Current.is(tok::comment))
     return 0;
 
-  llvm::OwningPtr<BreakableToken> Token;
+  std::unique_ptr<BreakableToken> Token;
   unsigned StartColumn = State.Column - Current.ColumnWidth;
   unsigned ColumnLimit = getColumnLimit(State);
 
-  if (Current.isOneOf(tok::string_literal, tok::wide_string_literal,
-                      tok::utf8_string_literal, tok::utf16_string_literal,
-                      tok::utf32_string_literal) &&
-      Current.Type != TT_ImplicitStringLiteral) {
+  if (Current.isStringLiteral()) {
     // Don't break string literals inside preprocessor directives (except for
     // #define directives, as their contents are stored in separate lines and
     // are not affected by this check).
@@ -755,13 +860,20 @@
     StringRef Text = Current.TokenText;
     StringRef Prefix;
     StringRef Postfix;
+    bool IsNSStringLiteral = false;
     // FIXME: Handle whitespace between '_T', '(', '"..."', and ')'.
     // FIXME: Store Prefix and Suffix (or PrefixLength and SuffixLength to
     // reduce the overhead) for each FormatToken, which is a string, so that we
     // don't run multiple checks here on the hot path.
+    if (Text.startswith("\"") && Current.Previous &&
+        Current.Previous->is(tok::at)) {
+      IsNSStringLiteral = true;
+      Prefix = "@\"";
+    }
     if ((Text.endswith(Postfix = "\"") &&
-         (Text.startswith(Prefix = "\"") || Text.startswith(Prefix = "u\"") ||
-          Text.startswith(Prefix = "U\"") || Text.startswith(Prefix = "u8\"") ||
+         (IsNSStringLiteral || Text.startswith(Prefix = "\"") ||
+          Text.startswith(Prefix = "u\"") || Text.startswith(Prefix = "U\"") ||
+          Text.startswith(Prefix = "u8\"") ||
           Text.startswith(Prefix = "L\""))) ||
         (Text.startswith(Prefix = "_T(\"") && Text.endswith(Postfix = "\")")) ||
         getRawStringLiteralPrefixPostfix(Text, Prefix, Postfix)) {
@@ -772,12 +884,16 @@
       return 0;
     }
   } else if (Current.Type == TT_BlockComment && Current.isTrailingComment()) {
+    if (CommentPragmasRegex.match(Current.TokenText.substr(2)))
+      return 0;
     Token.reset(new BreakableBlockComment(
         Current, State.Line->Level, StartColumn, Current.OriginalColumn,
         !Current.Previous, State.Line->InPPDirective, Encoding, Style));
   } else if (Current.Type == TT_LineComment &&
              (Current.Previous == NULL ||
               Current.Previous->Type != TT_ImplicitStringLiteral)) {
+    if (CommentPragmasRegex.match(Current.TokenText.substr(2)))
+      return 0;
     Token.reset(new BreakableLineComment(Current, State.Line->Level,
                                          StartColumn, /*InPPDirective=*/false,
                                          Encoding, Style));
@@ -848,8 +964,8 @@
         State.Stack[i].BreakBeforeParameter = true;
     }
 
-    Penalty += Current.is(tok::string_literal) ? Style.PenaltyBreakString
-                                               : Style.PenaltyBreakComment;
+    Penalty += Current.isStringLiteral() ? Style.PenaltyBreakString
+                                         : Style.PenaltyBreakComment;
 
     State.Stack.back().LastSpace = StartColumn;
   }
@@ -861,18 +977,19 @@
   return Style.ColumnLimit - (State.Line->InPPDirective ? 2 : 0);
 }
 
-bool ContinuationIndenter::NextIsMultilineString(const LineState &State) {
+bool ContinuationIndenter::nextIsMultilineString(const LineState &State) {
   const FormatToken &Current = *State.NextToken;
-  if (!Current.is(tok::string_literal))
+  if (!Current.isStringLiteral())
     return false;
   // We never consider raw string literals "multiline" for the purpose of
-  // AlwaysBreakBeforeMultilineStrings implementation.
+  // AlwaysBreakBeforeMultilineStrings implementation as they are special-cased
+  // (see TokenAnnotator::mustBreakBefore().
   if (Current.TokenText.startswith("R\""))
     return false;
   if (Current.IsMultiline)
     return true;
   if (Current.getNextNonComment() &&
-      Current.getNextNonComment()->is(tok::string_literal))
+      Current.getNextNonComment()->isStringLiteral())
     return true; // Implicit concatenation.
   if (State.Column + Current.ColumnWidth + Current.UnbreakableTailLength >
       Style.ColumnLimit)
diff --git a/lib/Format/ContinuationIndenter.h b/lib/Format/ContinuationIndenter.h
index b317565..9a3c118 100644
--- a/lib/Format/ContinuationIndenter.h
+++ b/lib/Format/ContinuationIndenter.h
@@ -18,6 +18,7 @@
 
 #include "Encoding.h"
 #include "clang/Format/Format.h"
+#include "llvm/Support/Regex.h"
 
 namespace clang {
 class SourceManager;
@@ -103,6 +104,9 @@
   /// \c Replacement.
   unsigned addTokenOnNewLine(LineState &State, bool DryRun);
 
+  /// \brief Calculate the new column for a line wrap before the next token.
+  unsigned getNewLineColumn(const LineState &State);
+
   /// \brief Adds a multiline token to the \p State.
   ///
   /// \returns Extra penalty for the first line of the literal: last line is
@@ -115,13 +119,14 @@
   ///
   /// This includes implicitly concatenated strings, strings that will be broken
   /// by clang-format and string literals with escaped newlines.
-  bool NextIsMultilineString(const LineState &State);
+  bool nextIsMultilineString(const LineState &State);
 
   FormatStyle Style;
   SourceManager &SourceMgr;
   WhitespaceManager &Whitespaces;
   encoding::Encoding Encoding;
   bool BinPackInconclusiveFunctions;
+  llvm::Regex CommentPragmasRegex;
 };
 
 struct ParenState {
@@ -133,7 +138,8 @@
         NoLineBreak(NoLineBreak), ColonPos(0), StartOfFunctionCall(0),
         StartOfArraySubscripts(0), NestedNameSpecifierContinuation(0),
         CallContinuation(0), VariablePos(0), ContainsLineBreak(false),
-        ContainsUnwrappedBuilder(0) {}
+        ContainsUnwrappedBuilder(0), AlignColons(true),
+        ObjCSelectorNameFound(false) {}
 
   /// \brief The position to which a specific parenthesis level needs to be
   /// indented.
@@ -210,6 +216,20 @@
   /// builder-type call on one line.
   bool ContainsUnwrappedBuilder;
 
+  /// \brief \c true if the colons of the curren ObjC method expression should
+  /// be aligned.
+  ///
+  /// Not considered for memoization as it will always have the same value at
+  /// the same token.
+  bool AlignColons;
+
+  /// \brief \c true if at least one selector name was found in the current
+  /// ObjC method expression.
+  ///
+  /// Not considered for memoization as it will always have the same value at
+  /// the same token.
+  bool ObjCSelectorNameFound;
+
   bool operator<(const ParenState &Other) const {
     if (Indent != Other.Indent)
       return Indent < Other.Indent;
diff --git a/lib/Format/Encoding.h b/lib/Format/Encoding.h
index 356334d..dba5174 100644
--- a/lib/Format/Encoding.h
+++ b/lib/Format/Encoding.h
@@ -64,6 +64,10 @@
 inline unsigned columnWidth(StringRef Text, Encoding Encoding) {
   if (Encoding == Encoding_UTF8) {
     int ContentWidth = llvm::sys::unicode::columnWidthUTF8(Text);
+    // FIXME: Figure out the correct way to handle this in the presence of both
+    // printable and unprintable multi-byte UTF-8 characters. Falling back to
+    // returning the number of bytes may cause problems, as columnWidth suddenly
+    // becomes non-additive.
     if (ContentWidth >= 0)
       return ContentWidth;
   }
@@ -81,9 +85,7 @@
     StringRef::size_type TabPos = Tail.find('\t');
     if (TabPos == StringRef::npos)
       return TotalWidth + columnWidth(Tail, Encoding);
-    int Width = columnWidth(Tail.substr(0, TabPos), Encoding);
-    assert(Width >= 0);
-    TotalWidth += Width;
+    TotalWidth += columnWidth(Tail.substr(0, TabPos), Encoding);
     TotalWidth += TabWidth - (TotalWidth + StartColumn) % TabWidth;
     Tail = Tail.substr(TabPos + 1);
   }
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 01c122e..cb07cb5 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -26,71 +26,93 @@
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/Allocator.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Support/YAMLTraits.h"
 #include "llvm/Support/Path.h"
+#include "llvm/Support/YAMLTraits.h"
 #include <queue>
 #include <string>
 
+using clang::format::FormatStyle;
+
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(std::string)
+
 namespace llvm {
 namespace yaml {
+template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
+  static void enumeration(IO &IO, FormatStyle::LanguageKind &Value) {
+    IO.enumCase(Value, "Cpp", FormatStyle::LK_Cpp);
+    IO.enumCase(Value, "JavaScript", FormatStyle::LK_JavaScript);
+    IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
+  }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::LanguageStandard> {
+  static void enumeration(IO &IO, FormatStyle::LanguageStandard &Value) {
+    IO.enumCase(Value, "Cpp03", FormatStyle::LS_Cpp03);
+    IO.enumCase(Value, "C++03", FormatStyle::LS_Cpp03);
+    IO.enumCase(Value, "Cpp11", FormatStyle::LS_Cpp11);
+    IO.enumCase(Value, "C++11", FormatStyle::LS_Cpp11);
+    IO.enumCase(Value, "Auto", FormatStyle::LS_Auto);
+  }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::UseTabStyle> {
+  static void enumeration(IO &IO, FormatStyle::UseTabStyle &Value) {
+    IO.enumCase(Value, "Never", FormatStyle::UT_Never);
+    IO.enumCase(Value, "false", FormatStyle::UT_Never);
+    IO.enumCase(Value, "Always", FormatStyle::UT_Always);
+    IO.enumCase(Value, "true", FormatStyle::UT_Always);
+    IO.enumCase(Value, "ForIndentation", FormatStyle::UT_ForIndentation);
+  }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::BraceBreakingStyle> {
+  static void enumeration(IO &IO, FormatStyle::BraceBreakingStyle &Value) {
+    IO.enumCase(Value, "Attach", FormatStyle::BS_Attach);
+    IO.enumCase(Value, "Linux", FormatStyle::BS_Linux);
+    IO.enumCase(Value, "Stroustrup", FormatStyle::BS_Stroustrup);
+    IO.enumCase(Value, "Allman", FormatStyle::BS_Allman);
+    IO.enumCase(Value, "GNU", FormatStyle::BS_GNU);
+  }
+};
+
 template <>
-struct ScalarEnumerationTraits<clang::format::FormatStyle::LanguageStandard> {
+struct ScalarEnumerationTraits<FormatStyle::NamespaceIndentationKind> {
   static void enumeration(IO &IO,
-                          clang::format::FormatStyle::LanguageStandard &Value) {
-    IO.enumCase(Value, "Cpp03", clang::format::FormatStyle::LS_Cpp03);
-    IO.enumCase(Value, "C++03", clang::format::FormatStyle::LS_Cpp03);
-    IO.enumCase(Value, "Cpp11", clang::format::FormatStyle::LS_Cpp11);
-    IO.enumCase(Value, "C++11", clang::format::FormatStyle::LS_Cpp11);
-    IO.enumCase(Value, "Auto", clang::format::FormatStyle::LS_Auto);
+                          FormatStyle::NamespaceIndentationKind &Value) {
+    IO.enumCase(Value, "None", FormatStyle::NI_None);
+    IO.enumCase(Value, "Inner", FormatStyle::NI_Inner);
+    IO.enumCase(Value, "All", FormatStyle::NI_All);
   }
 };
 
 template <>
-struct ScalarEnumerationTraits<clang::format::FormatStyle::UseTabStyle> {
+struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
   static void enumeration(IO &IO,
-                          clang::format::FormatStyle::UseTabStyle &Value) {
-    IO.enumCase(Value, "Never", clang::format::FormatStyle::UT_Never);
-    IO.enumCase(Value, "false", clang::format::FormatStyle::UT_Never);
-    IO.enumCase(Value, "Always", clang::format::FormatStyle::UT_Always);
-    IO.enumCase(Value, "true", clang::format::FormatStyle::UT_Always);
-    IO.enumCase(Value, "ForIndentation",
-                clang::format::FormatStyle::UT_ForIndentation);
+                          FormatStyle::SpaceBeforeParensOptions &Value) {
+    IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
+    IO.enumCase(Value, "ControlStatements",
+                FormatStyle::SBPO_ControlStatements);
+    IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
+
+    // For backward compatibility.
+    IO.enumCase(Value, "false", FormatStyle::SBPO_Never);
+    IO.enumCase(Value, "true", FormatStyle::SBPO_ControlStatements);
   }
 };
 
-template <>
-struct ScalarEnumerationTraits<clang::format::FormatStyle::BraceBreakingStyle> {
-  static void
-  enumeration(IO &IO, clang::format::FormatStyle::BraceBreakingStyle &Value) {
-    IO.enumCase(Value, "Attach", clang::format::FormatStyle::BS_Attach);
-    IO.enumCase(Value, "Linux", clang::format::FormatStyle::BS_Linux);
-    IO.enumCase(Value, "Stroustrup", clang::format::FormatStyle::BS_Stroustrup);
-    IO.enumCase(Value, "Allman", clang::format::FormatStyle::BS_Allman);
-  }
-};
+template <> struct MappingTraits<FormatStyle> {
+  static void mapping(IO &IO, FormatStyle &Style) {
+    // When reading, read the language first, we need it for getPredefinedStyle.
+    IO.mapOptional("Language", Style.Language);
 
-template <>
-struct ScalarEnumerationTraits<
-    clang::format::FormatStyle::NamespaceIndentationKind> {
-  static void
-  enumeration(IO &IO,
-              clang::format::FormatStyle::NamespaceIndentationKind &Value) {
-    IO.enumCase(Value, "None", clang::format::FormatStyle::NI_None);
-    IO.enumCase(Value, "Inner", clang::format::FormatStyle::NI_Inner);
-    IO.enumCase(Value, "All", clang::format::FormatStyle::NI_All);
-  }
-};
-
-template <> struct MappingTraits<clang::format::FormatStyle> {
-  static void mapping(llvm::yaml::IO &IO, clang::format::FormatStyle &Style) {
     if (IO.outputting()) {
       StringRef StylesArray[] = { "LLVM",    "Google", "Chromium",
-                                  "Mozilla", "WebKit" };
+                                  "Mozilla", "WebKit", "GNU" };
       ArrayRef<StringRef> Styles(StylesArray);
       for (size_t i = 0, e = Styles.size(); i < e; ++i) {
         StringRef StyleName(Styles[i]);
-        clang::format::FormatStyle PredefinedStyle;
-        if (clang::format::getPredefinedStyle(StyleName, &PredefinedStyle) &&
+        FormatStyle PredefinedStyle;
+        if (getPredefinedStyle(StyleName, Style.Language, &PredefinedStyle) &&
             Style == PredefinedStyle) {
           IO.mapOptional("# BasedOnStyle", StyleName);
           break;
@@ -99,11 +121,16 @@
     } else {
       StringRef BasedOnStyle;
       IO.mapOptional("BasedOnStyle", BasedOnStyle);
-      if (!BasedOnStyle.empty())
-        if (!clang::format::getPredefinedStyle(BasedOnStyle, &Style)) {
+      if (!BasedOnStyle.empty()) {
+        FormatStyle::LanguageKind OldLanguage = Style.Language;
+        FormatStyle::LanguageKind Language =
+            ((FormatStyle *)IO.getContext())->Language;
+        if (!getPredefinedStyle(BasedOnStyle, Language, &Style)) {
           IO.setError(Twine("Unknown value for BasedOnStyle: ", BasedOnStyle));
           return;
         }
+        Style.Language = OldLanguage;
+      }
     }
 
     IO.mapOptional("AccessModifierOffset", Style.AccessModifierOffset);
@@ -117,6 +144,8 @@
                    Style.AllowShortIfStatementsOnASingleLine);
     IO.mapOptional("AllowShortLoopsOnASingleLine",
                    Style.AllowShortLoopsOnASingleLine);
+    IO.mapOptional("AllowShortFunctionsOnASingleLine",
+                   Style.AllowShortFunctionsOnASingleLine);
     IO.mapOptional("AlwaysBreakTemplateDeclarations",
                    Style.AlwaysBreakTemplateDeclarations);
     IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
@@ -136,7 +165,10 @@
                    Style.ExperimentalAutoDetectBinPacking);
     IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
     IO.mapOptional("MaxEmptyLinesToKeep", Style.MaxEmptyLinesToKeep);
+    IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
+                   Style.KeepEmptyLinesAtTheStartOfBlocks);
     IO.mapOptional("NamespaceIndentation", Style.NamespaceIndentation);
+    IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
     IO.mapOptional("ObjCSpaceBeforeProtocolList",
                    Style.ObjCSpaceBeforeProtocolList);
     IO.mapOptional("PenaltyBreakBeforeFirstCallParameter",
@@ -164,11 +196,46 @@
     IO.mapOptional("SpaceInEmptyParentheses", Style.SpaceInEmptyParentheses);
     IO.mapOptional("SpacesInCStyleCastParentheses",
                    Style.SpacesInCStyleCastParentheses);
-    IO.mapOptional("SpaceAfterControlStatementKeyword",
-                   Style.SpaceAfterControlStatementKeyword);
+    IO.mapOptional("SpacesInContainerLiterals",
+                   Style.SpacesInContainerLiterals);
     IO.mapOptional("SpaceBeforeAssignmentOperators",
                    Style.SpaceBeforeAssignmentOperators);
     IO.mapOptional("ContinuationIndentWidth", Style.ContinuationIndentWidth);
+    IO.mapOptional("CommentPragmas", Style.CommentPragmas);
+    IO.mapOptional("ForEachMacros", Style.ForEachMacros); 
+
+    // For backward compatibility.
+    if (!IO.outputting()) {
+      IO.mapOptional("SpaceAfterControlStatementKeyword",
+                     Style.SpaceBeforeParens);
+    }
+    IO.mapOptional("SpaceBeforeParens", Style.SpaceBeforeParens);
+  }
+};
+
+// Allows to read vector<FormatStyle> while keeping default values.
+// IO.getContext() should contain a pointer to the FormatStyle structure, that
+// will be used to get default values for missing keys.
+// If the first element has no Language specified, it will be treated as the
+// default one for the following elements.
+template <> struct DocumentListTraits<std::vector<FormatStyle> > {
+  static size_t size(IO &IO, std::vector<FormatStyle> &Seq) {
+    return Seq.size();
+  }
+  static FormatStyle &element(IO &IO, std::vector<FormatStyle> &Seq,
+                              size_t Index) {
+    if (Index >= Seq.size()) {
+      assert(Index == Seq.size());
+      FormatStyle Template;
+      if (Seq.size() > 0 && Seq[0].Language == FormatStyle::LK_None) {
+        Template = Seq[0];
+      } else {
+        Template = *((const FormatStyle*)IO.getContext());
+        Template.Language = FormatStyle::LK_None;
+      }
+      Seq.resize(Index + 1, Template);
+    }
+    return Seq[Index];
   }
 };
 }
@@ -177,19 +244,14 @@
 namespace clang {
 namespace format {
 
-void setDefaultPenalties(FormatStyle &Style) {
-  Style.PenaltyBreakComment = 60;
-  Style.PenaltyBreakFirstLessLess = 120;
-  Style.PenaltyBreakString = 1000;
-  Style.PenaltyExcessCharacter = 1000000;
-}
-
 FormatStyle getLLVMStyle() {
   FormatStyle LLVMStyle;
+  LLVMStyle.Language = FormatStyle::LK_Cpp;
   LLVMStyle.AccessModifierOffset = -2;
   LLVMStyle.AlignEscapedNewlinesLeft = false;
   LLVMStyle.AlignTrailingComments = true;
   LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
+  LLVMStyle.AllowShortFunctionsOnASingleLine = true;
   LLVMStyle.AllowShortIfStatementsOnASingleLine = false;
   LLVMStyle.AllowShortLoopsOnASingleLine = false;
   LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
@@ -200,86 +262,84 @@
   LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
   LLVMStyle.BreakConstructorInitializersBeforeComma = false;
   LLVMStyle.ColumnLimit = 80;
+  LLVMStyle.CommentPragmas = "^ IWYU pragma:";
   LLVMStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = false;
   LLVMStyle.ConstructorInitializerIndentWidth = 4;
-  LLVMStyle.Cpp11BracedListStyle = false;
+  LLVMStyle.ContinuationIndentWidth = 4;
+  LLVMStyle.Cpp11BracedListStyle = true;
   LLVMStyle.DerivePointerBinding = false;
   LLVMStyle.ExperimentalAutoDetectBinPacking = false;
+  LLVMStyle.ForEachMacros.push_back("foreach");
+  LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
+  LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
   LLVMStyle.IndentCaseLabels = false;
   LLVMStyle.IndentFunctionDeclarationAfterType = false;
   LLVMStyle.IndentWidth = 2;
   LLVMStyle.TabWidth = 8;
   LLVMStyle.MaxEmptyLinesToKeep = 1;
+  LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
   LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
+  LLVMStyle.ObjCSpaceAfterProperty = false;
   LLVMStyle.ObjCSpaceBeforeProtocolList = true;
   LLVMStyle.PointerBindsToType = false;
   LLVMStyle.SpacesBeforeTrailingComments = 1;
-  LLVMStyle.Standard = FormatStyle::LS_Cpp03;
+  LLVMStyle.Standard = FormatStyle::LS_Cpp11;
   LLVMStyle.UseTab = FormatStyle::UT_Never;
   LLVMStyle.SpacesInParentheses = false;
   LLVMStyle.SpaceInEmptyParentheses = false;
+  LLVMStyle.SpacesInContainerLiterals = true;
   LLVMStyle.SpacesInCStyleCastParentheses = false;
-  LLVMStyle.SpaceAfterControlStatementKeyword = true;
+  LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
   LLVMStyle.SpaceBeforeAssignmentOperators = true;
-  LLVMStyle.ContinuationIndentWidth = 4;
   LLVMStyle.SpacesInAngles = false;
 
-  setDefaultPenalties(LLVMStyle);
+  LLVMStyle.PenaltyBreakComment = 300;
+  LLVMStyle.PenaltyBreakFirstLessLess = 120;
+  LLVMStyle.PenaltyBreakString = 1000;
+  LLVMStyle.PenaltyExcessCharacter = 1000000;
   LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
   LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
 
   return LLVMStyle;
 }
 
-FormatStyle getGoogleStyle() {
-  FormatStyle GoogleStyle;
+FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
+  FormatStyle GoogleStyle = getLLVMStyle();
+  GoogleStyle.Language = Language;
+
   GoogleStyle.AccessModifierOffset = -1;
   GoogleStyle.AlignEscapedNewlinesLeft = true;
-  GoogleStyle.AlignTrailingComments = true;
-  GoogleStyle.AllowAllParametersOfDeclarationOnNextLine = true;
   GoogleStyle.AllowShortIfStatementsOnASingleLine = true;
   GoogleStyle.AllowShortLoopsOnASingleLine = true;
   GoogleStyle.AlwaysBreakBeforeMultilineStrings = true;
   GoogleStyle.AlwaysBreakTemplateDeclarations = true;
-  GoogleStyle.BinPackParameters = true;
-  GoogleStyle.BreakBeforeBinaryOperators = false;
-  GoogleStyle.BreakBeforeTernaryOperators = true;
-  GoogleStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
-  GoogleStyle.BreakConstructorInitializersBeforeComma = false;
-  GoogleStyle.ColumnLimit = 80;
   GoogleStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = true;
-  GoogleStyle.ConstructorInitializerIndentWidth = 4;
-  GoogleStyle.Cpp11BracedListStyle = true;
   GoogleStyle.DerivePointerBinding = true;
-  GoogleStyle.ExperimentalAutoDetectBinPacking = false;
   GoogleStyle.IndentCaseLabels = true;
   GoogleStyle.IndentFunctionDeclarationAfterType = true;
-  GoogleStyle.IndentWidth = 2;
-  GoogleStyle.TabWidth = 8;
-  GoogleStyle.MaxEmptyLinesToKeep = 1;
-  GoogleStyle.NamespaceIndentation = FormatStyle::NI_None;
+  GoogleStyle.KeepEmptyLinesAtTheStartOfBlocks = false;
+  GoogleStyle.ObjCSpaceAfterProperty = false;
   GoogleStyle.ObjCSpaceBeforeProtocolList = false;
   GoogleStyle.PointerBindsToType = true;
   GoogleStyle.SpacesBeforeTrailingComments = 2;
   GoogleStyle.Standard = FormatStyle::LS_Auto;
-  GoogleStyle.UseTab = FormatStyle::UT_Never;
-  GoogleStyle.SpacesInParentheses = false;
-  GoogleStyle.SpaceInEmptyParentheses = false;
-  GoogleStyle.SpacesInCStyleCastParentheses = false;
-  GoogleStyle.SpaceAfterControlStatementKeyword = true;
-  GoogleStyle.SpaceBeforeAssignmentOperators = true;
-  GoogleStyle.ContinuationIndentWidth = 4;
-  GoogleStyle.SpacesInAngles = false;
 
-  setDefaultPenalties(GoogleStyle);
   GoogleStyle.PenaltyReturnTypeOnItsOwnLine = 200;
   GoogleStyle.PenaltyBreakBeforeFirstCallParameter = 1;
 
+  if (Language == FormatStyle::LK_JavaScript) {
+    GoogleStyle.BreakBeforeTernaryOperators = false;
+    GoogleStyle.MaxEmptyLinesToKeep = 2;
+    GoogleStyle.SpacesInContainerLiterals = false;
+  } else if (Language == FormatStyle::LK_Proto) {
+    GoogleStyle.AllowShortFunctionsOnASingleLine = false;
+  }
+
   return GoogleStyle;
 }
 
-FormatStyle getChromiumStyle() {
-  FormatStyle ChromiumStyle = getGoogleStyle();
+FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
+  FormatStyle ChromiumStyle = getGoogleStyle(Language);
   ChromiumStyle.AllowAllParametersOfDeclarationOnNextLine = false;
   ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
   ChromiumStyle.AllowShortLoopsOnASingleLine = false;
@@ -292,12 +352,15 @@
 FormatStyle getMozillaStyle() {
   FormatStyle MozillaStyle = getLLVMStyle();
   MozillaStyle.AllowAllParametersOfDeclarationOnNextLine = false;
+  MozillaStyle.Cpp11BracedListStyle = false;
   MozillaStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = true;
   MozillaStyle.DerivePointerBinding = true;
   MozillaStyle.IndentCaseLabels = true;
+  MozillaStyle.ObjCSpaceAfterProperty = true;
   MozillaStyle.ObjCSpaceBeforeProtocolList = false;
   MozillaStyle.PenaltyReturnTypeOnItsOwnLine = 200;
   MozillaStyle.PointerBindsToType = true;
+  MozillaStyle.Standard = FormatStyle::LS_Cpp03;
   return MozillaStyle;
 }
 
@@ -308,36 +371,94 @@
   Style.BreakBeforeBinaryOperators = true;
   Style.BreakBeforeBraces = FormatStyle::BS_Stroustrup;
   Style.BreakConstructorInitializersBeforeComma = true;
+  Style.Cpp11BracedListStyle = false;
   Style.ColumnLimit = 0;
   Style.IndentWidth = 4;
   Style.NamespaceIndentation = FormatStyle::NI_Inner;
+  Style.ObjCSpaceAfterProperty = true;
   Style.PointerBindsToType = true;
+  Style.Standard = FormatStyle::LS_Cpp03;
   return Style;
 }
 
-bool getPredefinedStyle(StringRef Name, FormatStyle *Style) {
-  if (Name.equals_lower("llvm"))
-    *Style = getLLVMStyle();
-  else if (Name.equals_lower("chromium"))
-    *Style = getChromiumStyle();
-  else if (Name.equals_lower("mozilla"))
-    *Style = getMozillaStyle();
-  else if (Name.equals_lower("google"))
-    *Style = getGoogleStyle();
-  else if (Name.equals_lower("webkit"))
-    *Style = getWebKitStyle();
-  else
-    return false;
+FormatStyle getGNUStyle() {
+  FormatStyle Style = getLLVMStyle();
+  Style.BreakBeforeBinaryOperators = true;
+  Style.BreakBeforeBraces = FormatStyle::BS_GNU;
+  Style.BreakBeforeTernaryOperators = true;
+  Style.Cpp11BracedListStyle = false;
+  Style.ColumnLimit = 79;
+  Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
+  Style.Standard = FormatStyle::LS_Cpp03;
+  return Style;
+}
 
+bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
+                        FormatStyle *Style) {
+  if (Name.equals_lower("llvm")) {
+    *Style = getLLVMStyle();
+  } else if (Name.equals_lower("chromium")) {
+    *Style = getChromiumStyle(Language);
+  } else if (Name.equals_lower("mozilla")) {
+    *Style = getMozillaStyle();
+  } else if (Name.equals_lower("google")) {
+    *Style = getGoogleStyle(Language);
+  } else if (Name.equals_lower("webkit")) {
+    *Style = getWebKitStyle();
+  } else if (Name.equals_lower("gnu")) {
+    *Style = getGNUStyle();
+  } else {
+    return false;
+  }
+
+  Style->Language = Language;
   return true;
 }
 
 llvm::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
+  assert(Style);
+  FormatStyle::LanguageKind Language = Style->Language;
+  assert(Language != FormatStyle::LK_None);
   if (Text.trim().empty())
     return llvm::make_error_code(llvm::errc::invalid_argument);
+
+  std::vector<FormatStyle> Styles;
   llvm::yaml::Input Input(Text);
-  Input >> *Style;
-  return Input.error();
+  // DocumentListTraits<vector<FormatStyle>> uses the context to get default
+  // values for the fields, keys for which are missing from the configuration.
+  // Mapping also uses the context to get the language to find the correct
+  // base style.
+  Input.setContext(Style);
+  Input >> Styles;
+  if (Input.error())
+    return Input.error();
+
+  for (unsigned i = 0; i < Styles.size(); ++i) {
+    // Ensures that only the first configuration can skip the Language option.
+    if (Styles[i].Language == FormatStyle::LK_None && i != 0)
+      return llvm::make_error_code(llvm::errc::invalid_argument);
+    // Ensure that each language is configured at most once.
+    for (unsigned j = 0; j < i; ++j) {
+      if (Styles[i].Language == Styles[j].Language) {
+        DEBUG(llvm::dbgs()
+              << "Duplicate languages in the config file on positions " << j
+              << " and " << i << "\n");
+        return llvm::make_error_code(llvm::errc::invalid_argument);
+      }
+    }
+  }
+  // Look for a suitable configuration starting from the end, so we can
+  // find the configuration for the specific language first, and the default
+  // configuration (which can only be at slot 0) after it.
+  for (int i = Styles.size() - 1; i >= 0; --i) {
+    if (Styles[i].Language == Language ||
+        Styles[i].Language == FormatStyle::LK_None) {
+      *Style = Styles[i];
+      Style->Language = Language;
+      return llvm::make_error_code(llvm::errc::success);
+    }
+  }
+  return llvm::make_error_code(llvm::errc::not_supported);
 }
 
 std::string configurationAsText(const FormatStyle &Style) {
@@ -381,14 +502,14 @@
   /// \brief Calculates how many lines can be merged into 1 starting at \p I.
   unsigned
   tryFitMultipleLinesInOne(unsigned Indent,
-                           SmallVectorImpl<AnnotatedLine *>::const_iterator &I,
+                           SmallVectorImpl<AnnotatedLine *>::const_iterator I,
                            SmallVectorImpl<AnnotatedLine *>::const_iterator E) {
     // We can never merge stuff if there are trailing line comments.
-    AnnotatedLine *TheLine = *I;
+    const AnnotatedLine *TheLine = *I;
     if (TheLine->Last->Type == TT_LineComment)
       return 0;
 
-    if (Indent > Style.ColumnLimit)
+    if (Style.ColumnLimit > 0 && Indent > Style.ColumnLimit)
       return 0;
 
     unsigned Limit =
@@ -402,16 +523,46 @@
     if (I + 1 == E || I[1]->Type == LT_Invalid)
       return 0;
 
+    if (TheLine->Last->Type == TT_FunctionLBrace &&
+        TheLine->First != TheLine->Last) {
+      return Style.AllowShortFunctionsOnASingleLine
+                 ? tryMergeSimpleBlock(I, E, Limit)
+                 : 0;
+    }
     if (TheLine->Last->is(tok::l_brace)) {
-      return tryMergeSimpleBlock(I, E, Limit);
-    } else if (Style.AllowShortIfStatementsOnASingleLine &&
-               TheLine->First->is(tok::kw_if)) {
-      return tryMergeSimpleControlStatement(I, E, Limit);
-    } else if (Style.AllowShortLoopsOnASingleLine &&
-               TheLine->First->isOneOf(tok::kw_for, tok::kw_while)) {
-      return tryMergeSimpleControlStatement(I, E, Limit);
-    } else if (TheLine->InPPDirective && (TheLine->First->HasUnescapedNewline ||
-                                          TheLine->First->IsFirst)) {
+      return Style.BreakBeforeBraces == FormatStyle::BS_Attach
+                 ? tryMergeSimpleBlock(I, E, Limit)
+                 : 0;
+    }
+    if (I[1]->First->Type == TT_FunctionLBrace &&
+        Style.BreakBeforeBraces != FormatStyle::BS_Attach) {
+      // Check for Limit <= 2 to account for the " {".
+      if (Limit <= 2 || (Style.ColumnLimit == 0 && containsMustBreak(TheLine)))
+        return 0;
+      Limit -= 2;
+
+      unsigned MergedLines = 0;
+      if (Style.AllowShortFunctionsOnASingleLine) {
+        MergedLines = tryMergeSimpleBlock(I + 1, E, Limit);
+        // If we managed to merge the block, count the function header, which is
+        // on a separate line.
+        if (MergedLines > 0)
+          ++MergedLines;
+      }
+      return MergedLines;
+    }
+    if (TheLine->First->is(tok::kw_if)) {
+      return Style.AllowShortIfStatementsOnASingleLine
+                 ? tryMergeSimpleControlStatement(I, E, Limit)
+                 : 0;
+    }
+    if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while)) {
+      return Style.AllowShortLoopsOnASingleLine
+                 ? tryMergeSimpleControlStatement(I, E, Limit)
+                 : 0;
+    }
+    if (TheLine->InPPDirective &&
+        (TheLine->First->HasUnescapedNewline || TheLine->First->IsFirst)) {
       return tryMergeSimplePPDirective(I, E, Limit);
     }
     return 0;
@@ -419,7 +570,7 @@
 
 private:
   unsigned
-  tryMergeSimplePPDirective(SmallVectorImpl<AnnotatedLine *>::const_iterator &I,
+  tryMergeSimplePPDirective(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
                             SmallVectorImpl<AnnotatedLine *>::const_iterator E,
                             unsigned Limit) {
     if (Limit == 0)
@@ -434,23 +585,25 @@
   }
 
   unsigned tryMergeSimpleControlStatement(
-      SmallVectorImpl<AnnotatedLine *>::const_iterator &I,
+      SmallVectorImpl<AnnotatedLine *>::const_iterator I,
       SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) {
     if (Limit == 0)
       return 0;
-    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman &&
+    if ((Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
+         Style.BreakBeforeBraces == FormatStyle::BS_GNU) &&
         I[1]->First->is(tok::l_brace))
       return 0;
     if (I[1]->InPPDirective != (*I)->InPPDirective ||
         (I[1]->InPPDirective && I[1]->First->HasUnescapedNewline))
       return 0;
+    Limit = limitConsideringMacros(I + 1, E, Limit);
     AnnotatedLine &Line = **I;
     if (Line.Last->isNot(tok::r_paren))
       return 0;
     if (1 + I[1]->Last->TotalLength > Limit)
       return 0;
     if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for,
-                                   tok::kw_while) ||
+                             tok::kw_while) ||
         I[1]->First->Type == TT_LineComment)
       return 0;
     // Only inline simple if's (no nested if or else).
@@ -461,13 +614,9 @@
   }
 
   unsigned
-  tryMergeSimpleBlock(SmallVectorImpl<AnnotatedLine *>::const_iterator &I,
+  tryMergeSimpleBlock(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
                       SmallVectorImpl<AnnotatedLine *>::const_iterator E,
                       unsigned Limit) {
-    // No merging if the brace already is on the next line.
-    if (Style.BreakBeforeBraces != FormatStyle::BS_Attach)
-      return 0;
-
     // First, check that the current line allows merging. This is the case if
     // we're not in a control flow statement and the last token is an opening
     // brace.
@@ -491,6 +640,7 @@
       // Check that we still have three lines and they fit into the limit.
       if (I + 2 == E || I[2]->Type == LT_Invalid)
         return 0;
+      Limit = limitConsideringMacros(I + 2, E, Limit);
 
       if (!nextTwoLinesFitInto(I, Limit))
         return 0;
@@ -516,34 +666,51 @@
     return 0;
   }
 
+  /// Returns the modified column limit for \p I if it is inside a macro and
+  /// needs a trailing '\'.
+  unsigned
+  limitConsideringMacros(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+                         SmallVectorImpl<AnnotatedLine *>::const_iterator E,
+                         unsigned Limit) {
+    if (I[0]->InPPDirective && I + 1 != E &&
+        !I[1]->First->HasUnescapedNewline && !I[1]->First->is(tok::eof)) {
+      return Limit < 2 ? 0 : Limit - 2;
+    }
+    return Limit;
+  }
+
   bool nextTwoLinesFitInto(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
                            unsigned Limit) {
     return 1 + I[1]->Last->TotalLength + 1 + I[2]->Last->TotalLength <= Limit;
   }
 
+  bool containsMustBreak(const AnnotatedLine *Line) {
+    for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
+      if (Tok->MustBreakBefore)
+        return true;
+    }
+    return false;
+  }
+
   const FormatStyle &Style;
 };
 
 class UnwrappedLineFormatter {
 public:
-  UnwrappedLineFormatter(SourceManager &SourceMgr,
-                         SmallVectorImpl<CharSourceRange> &Ranges,
-                         ContinuationIndenter *Indenter,
+  UnwrappedLineFormatter(ContinuationIndenter *Indenter,
                          WhitespaceManager *Whitespaces,
                          const FormatStyle &Style)
-      : SourceMgr(SourceMgr), Ranges(Ranges), Indenter(Indenter),
-        Whitespaces(Whitespaces), Style(Style), Joiner(Style) {}
+      : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style),
+        Joiner(Style) {}
 
   unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines, bool DryRun,
-                  int AdditionalIndent = 0) {
+                  int AdditionalIndent = 0, bool FixBadIndentation = false) {
     assert(!Lines.empty());
     unsigned Penalty = 0;
     std::vector<int> IndentForLevel;
     for (unsigned i = 0, e = Lines[0]->Level; i != e; ++i)
       IndentForLevel.push_back(Style.IndentWidth * i + AdditionalIndent);
-    bool PreviousLineWasTouched = false;
     const AnnotatedLine *PreviousLine = NULL;
-    bool FormatPPDirective = false;
     for (SmallVectorImpl<AnnotatedLine *>::const_iterator I = Lines.begin(),
                                                           E = Lines.end();
          I != E; ++I) {
@@ -551,21 +718,30 @@
       const FormatToken *FirstTok = TheLine.First;
       int Offset = getIndentOffset(*FirstTok);
 
-      // Check whether this line is part of a formatted preprocessor directive.
-      if (FirstTok->HasUnescapedNewline)
-        FormatPPDirective = false;
-      if (!FormatPPDirective && TheLine.InPPDirective &&
-          (touchesLine(TheLine) || touchesPPDirective(I + 1, E)))
-        FormatPPDirective = true;
-
       // Determine indent and try to merge multiple unwrapped lines.
-      while (IndentForLevel.size() <= TheLine.Level)
-        IndentForLevel.push_back(-1);
-      IndentForLevel.resize(TheLine.Level + 1);
-      unsigned Indent = getIndent(IndentForLevel, TheLine.Level);
+      unsigned Indent;
+      if (TheLine.InPPDirective) {
+        Indent = TheLine.Level * Style.IndentWidth;
+      } else {
+        while (IndentForLevel.size() <= TheLine.Level)
+          IndentForLevel.push_back(-1);
+        IndentForLevel.resize(TheLine.Level + 1);
+        Indent = getIndent(IndentForLevel, TheLine.Level);
+      }
+      unsigned LevelIndent = Indent;
       if (static_cast<int>(Indent) + Offset >= 0)
         Indent += Offset;
+
+      // Merge multiple lines if possible.
       unsigned MergedLines = Joiner.tryFitMultipleLinesInOne(Indent, I, E);
+      if (MergedLines > 0 && Style.ColumnLimit == 0) {
+        // Disallow line merging if there is a break at the start of one of the
+        // input lines.
+        for (unsigned i = 0; i < MergedLines; ++i) {
+          if (I[i + 1]->First->NewlinesBefore > 0)
+            MergedLines = 0;
+        }
+      }
       if (!DryRun) {
         for (unsigned i = 0; i < MergedLines; ++i) {
           join(*I[i], *I[i + 1]);
@@ -573,18 +749,18 @@
       }
       I += MergedLines;
 
-      bool WasMoved = PreviousLineWasTouched && FirstTok->NewlinesBefore == 0;
+      bool FixIndentation =
+          FixBadIndentation && (LevelIndent != FirstTok->OriginalColumn);
       if (TheLine.First->is(tok::eof)) {
-        if (PreviousLineWasTouched && !DryRun) {
+        if (PreviousLine && PreviousLine->Affected && !DryRun) {
+          // Remove the file's trailing whitespace.
           unsigned Newlines = std::min(FirstTok->NewlinesBefore, 1u);
           Whitespaces->replaceWhitespace(*TheLine.First, Newlines,
                                          /*IndentLevel=*/0, /*Spaces=*/0,
                                          /*TargetColumn=*/0);
         }
       } else if (TheLine.Type != LT_Invalid &&
-                 (WasMoved || FormatPPDirective || touchesLine(TheLine))) {
-        unsigned LevelIndent =
-            getIndent(IndentForLevel, TheLine.Level);
+                 (TheLine.Affected || FixIndentation)) {
         if (FirstTok->WhitespaceRange.isValid()) {
           if (!DryRun)
             formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level,
@@ -606,6 +782,7 @@
           while (State.NextToken != NULL)
             Indenter->addTokenToState(State, /*Newline=*/false, DryRun);
         } else if (Style.ColumnLimit == 0) {
+          // FIXME: Implement nested blocks for ColumnLimit = 0.
           NoColumnLimitFormatter Formatter(Indenter);
           if (!DryRun)
             Formatter.format(Indent, &TheLine);
@@ -613,8 +790,10 @@
           Penalty += format(TheLine, Indent, DryRun);
         }
 
-        IndentForLevel[TheLine.Level] = LevelIndent;
-        PreviousLineWasTouched = true;
+        if (!TheLine.InPPDirective)
+          IndentForLevel[TheLine.Level] = LevelIndent;
+      } else if (TheLine.ChildrenAffected) {
+        format(TheLine.Children, DryRun);
       } else {
         // Format the first token if necessary, and notify the WhitespaceManager
         // about the unchanged whitespace.
@@ -623,9 +802,9 @@
               (Tok->NewlinesBefore > 0 || Tok->IsFirst)) {
             unsigned LevelIndent = Tok->OriginalColumn;
             if (!DryRun) {
-              // Remove trailing whitespace of the previous line if it was
-              // touched.
-              if (PreviousLineWasTouched || touchesEmptyLineBefore(TheLine)) {
+              // Remove trailing whitespace of the previous line.
+              if ((PreviousLine && PreviousLine->Affected) ||
+                  TheLine.LeadingEmptyLinesAffected) {
                 formatFirstToken(*Tok, PreviousLine, TheLine.Level, LevelIndent,
                                  TheLine.InPPDirective);
               } else {
@@ -635,16 +814,12 @@
 
             if (static_cast<int>(LevelIndent) - Offset >= 0)
               LevelIndent -= Offset;
-            if (Tok->isNot(tok::comment))
+            if (Tok->isNot(tok::comment) && !TheLine.InPPDirective)
               IndentForLevel[TheLine.Level] = LevelIndent;
           } else if (!DryRun) {
             Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective);
           }
         }
-        // If we did not reformat this unwrapped line, the column at the end of
-        // the last token is unchanged - thus, we can calculate the end of the
-        // last token.
-        PreviousLineWasTouched = false;
       }
       if (!DryRun) {
         for (FormatToken *Tok = TheLine.First; Tok != NULL; Tok = Tok->Next) {
@@ -723,6 +898,12 @@
     if (Newlines == 0 && !RootToken.IsFirst)
       Newlines = 1;
 
+    // Remove empty lines after "{".
+    if (!Style.KeepEmptyLinesAtTheStartOfBlocks && PreviousLine &&
+        PreviousLine->Last->is(tok::l_brace) &&
+        PreviousLine->First->isNot(tok::kw_namespace))
+      Newlines = 1;
+
     // Insert extra new line before access specifiers.
     if (PreviousLine && PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) &&
         RootToken.isAccessSpecifier() && RootToken.NewlinesBefore == 1)
@@ -732,9 +913,9 @@
     if (PreviousLine && PreviousLine->First->isAccessSpecifier())
       Newlines = std::min(1u, Newlines);
 
-    Whitespaces->replaceWhitespace(
-        RootToken, Newlines, IndentLevel, Indent, Indent,
-        InPPDirective && !RootToken.HasUnescapedNewline);
+    Whitespaces->replaceWhitespace(RootToken, Newlines, IndentLevel, Indent,
+                                   Indent, InPPDirective &&
+                                               !RootToken.HasUnescapedNewline);
   }
 
   /// \brief Get the indent of \p Level from \p IndentForLevel.
@@ -753,6 +934,8 @@
   void join(AnnotatedLine &A, const AnnotatedLine &B) {
     assert(!A.Last->Next);
     assert(!B.First->Previous);
+    if (B.Affected)
+      A.Affected = true;
     A.Last->Next = B.First;
     B.First->Previous = A.Last;
     B.First->CanBreakBefore = true;
@@ -768,48 +951,6 @@
     return Style.ColumnLimit - (InPPDirective ? 2 : 0);
   }
 
-  bool touchesRanges(const CharSourceRange &Range) {
-    for (SmallVectorImpl<CharSourceRange>::const_iterator I = Ranges.begin(),
-                                                          E = Ranges.end();
-         I != E; ++I) {
-      if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), I->getBegin()) &&
-          !SourceMgr.isBeforeInTranslationUnit(I->getEnd(), Range.getBegin()))
-        return true;
-    }
-    return false;
-  }
-
-  bool touchesLine(const AnnotatedLine &TheLine) {
-    const FormatToken *First = TheLine.First;
-    const FormatToken *Last = TheLine.Last;
-    CharSourceRange LineRange = CharSourceRange::getCharRange(
-        First->WhitespaceRange.getBegin().getLocWithOffset(
-            First->LastNewlineOffset),
-        Last->getStartOfNonWhitespace().getLocWithOffset(
-            Last->TokenText.size() - 1));
-    return touchesRanges(LineRange);
-  }
-
-  bool touchesPPDirective(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
-                          SmallVectorImpl<AnnotatedLine *>::const_iterator E) {
-    for (; I != E; ++I) {
-      if ((*I)->First->HasUnescapedNewline)
-        return false;
-      if (touchesLine(**I))
-        return true;
-    }
-    return false;
-  }
-
-  bool touchesEmptyLineBefore(const AnnotatedLine &TheLine) {
-    const FormatToken *First = TheLine.First;
-    CharSourceRange LineRange = CharSourceRange::getCharRange(
-        First->WhitespaceRange.getBegin(),
-        First->WhitespaceRange.getBegin().getLocWithOffset(
-            First->LastNewlineOffset));
-    return touchesRanges(LineRange);
-  }
-
   /// \brief Analyze the entire solution space starting from \p InitialState.
   ///
   /// This implements a variant of Dijkstra's algorithm on the graph that spans
@@ -955,13 +1096,14 @@
     if (NewLine) {
       int AdditionalIndent = State.Stack.back().Indent -
                              Previous.Children[0]->Level * Style.IndentWidth;
-      Penalty += format(Previous.Children, DryRun, AdditionalIndent);
+      Penalty += format(Previous.Children, DryRun, AdditionalIndent,
+                        /*FixBadIndentation=*/true);
       return true;
     }
 
     // Cannot merge multiple statements into a single line.
     if (Previous.Children.size() > 1)
-      return false; 
+      return false;
 
     // We can't put the closing "}" on a line with a trailing comment.
     if (Previous.Children[0]->Last->isTrailingComment())
@@ -979,8 +1121,6 @@
     return true;
   }
 
-  SourceManager &SourceMgr;
-  SmallVectorImpl<CharSourceRange> &Ranges;
   ContinuationIndenter *Indenter;
   WhitespaceManager *Whitespaces;
   FormatStyle Style;
@@ -997,13 +1137,17 @@
         TrailingWhitespace(0), Lex(Lex), SourceMgr(SourceMgr), Style(Style),
         IdentTable(getFormattingLangOpts()), Encoding(Encoding) {
     Lex.SetKeepWhitespaceMode(true);
+
+    for (const std::string& ForEachMacro : Style.ForEachMacros)
+      ForEachMacros.push_back(&IdentTable.get(ForEachMacro));
+    std::sort(ForEachMacros.begin(), ForEachMacros.end());
   }
 
   ArrayRef<FormatToken *> lex() {
     assert(Tokens.empty());
     do {
       Tokens.push_back(getNextToken());
-      maybeJoinPreviousTokens();
+      tryMergePreviousTokens();
     } while (Tokens.back()->Tok.isNot(tok::eof));
     return Tokens;
   }
@@ -1011,23 +1155,65 @@
   IdentifierTable &getIdentTable() { return IdentTable; }
 
 private:
-  void maybeJoinPreviousTokens() {
-    if (Tokens.size() < 4)
+  void tryMergePreviousTokens() {
+    if (tryMerge_TMacro())
       return;
+
+    if (Style.Language == FormatStyle::LK_JavaScript) {
+      static tok::TokenKind JSIdentity[] = { tok::equalequal, tok::equal };
+      static tok::TokenKind JSNotIdentity[] = { tok::exclaimequal, tok::equal };
+      static tok::TokenKind JSShiftEqual[] = { tok::greater, tok::greater,
+                                               tok::greaterequal };
+      // FIXME: We probably need to change token type to mimic operator with the
+      // correct priority.
+      if (tryMergeTokens(JSIdentity))
+        return;
+      if (tryMergeTokens(JSNotIdentity))
+        return;
+      if (tryMergeTokens(JSShiftEqual))
+        return;
+    }
+  }
+
+  bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds) {
+    if (Tokens.size() < Kinds.size())
+      return false;
+
+    SmallVectorImpl<FormatToken *>::const_iterator First =
+        Tokens.end() - Kinds.size();
+    if (!First[0]->is(Kinds[0]))
+      return false;
+    unsigned AddLength = 0;
+    for (unsigned i = 1; i < Kinds.size(); ++i) {
+      if (!First[i]->is(Kinds[i]) || First[i]->WhitespaceRange.getBegin() !=
+                                         First[i]->WhitespaceRange.getEnd())
+        return false;
+      AddLength += First[i]->TokenText.size();
+    }
+    Tokens.resize(Tokens.size() - Kinds.size() + 1);
+    First[0]->TokenText = StringRef(First[0]->TokenText.data(),
+                                    First[0]->TokenText.size() + AddLength);
+    First[0]->ColumnWidth += AddLength;
+    return true;
+  }
+
+  bool tryMerge_TMacro() {
+    if (Tokens.size() < 4)
+      return false;
     FormatToken *Last = Tokens.back();
     if (!Last->is(tok::r_paren))
-      return;
+      return false;
 
     FormatToken *String = Tokens[Tokens.size() - 2];
     if (!String->is(tok::string_literal) || String->IsMultiline)
-      return;
+      return false;
 
     if (!Tokens[Tokens.size() - 3]->is(tok::l_paren))
-      return;
+      return false;
 
     FormatToken *Macro = Tokens[Tokens.size() - 4];
     if (Macro->TokenText != "_T")
-      return;
+      return false;
 
     const char *Start = Macro->TokenText.data();
     const char *End = Last->TokenText.data() + Last->TokenText.size();
@@ -1043,6 +1229,7 @@
     Tokens.pop_back();
     Tokens.pop_back();
     Tokens.back() = String;
+    return true;
   }
 
   FormatToken *getNextToken() {
@@ -1174,6 +1361,10 @@
       Column = FormatTok->LastLineColumnWidth;
     }
 
+    FormatTok->IsForEachMacro =
+        std::binary_search(ForEachMacros.begin(), ForEachMacros.end(),
+                           FormatTok->Tok.getIdentifierInfo());
+
     return FormatTok;
   }
 
@@ -1189,6 +1380,7 @@
   encoding::Encoding Encoding;
   llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
   SmallVector<FormatToken *, 16> Tokens;
+  SmallVector<IdentifierInfo*, 8> ForEachMacros;
 
   void readRawToken(FormatToken &Tok) {
     Lex.LexFromRawLexer(Tok.Tok);
@@ -1196,14 +1388,31 @@
                               Tok.Tok.getLength());
     // For formatting, treat unterminated string literals like normal string
     // literals.
-    if (Tok.is(tok::unknown) && !Tok.TokenText.empty() &&
-        Tok.TokenText[0] == '"') {
-      Tok.Tok.setKind(tok::string_literal);
-      Tok.IsUnterminatedLiteral = true;
+    if (Tok.is(tok::unknown)) {
+      if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
+        Tok.Tok.setKind(tok::string_literal);
+        Tok.IsUnterminatedLiteral = true;
+      } else if (Style.Language == FormatStyle::LK_JavaScript &&
+                 Tok.TokenText == "''") {
+        Tok.Tok.setKind(tok::char_constant);
+      }
     }
   }
 };
 
+static StringRef getLanguageName(FormatStyle::LanguageKind Language) {
+  switch (Language) {
+  case FormatStyle::LK_Cpp:
+    return "C++";
+  case FormatStyle::LK_JavaScript:
+    return "JavaScript";
+  case FormatStyle::LK_Proto:
+    return "Proto";
+  default:
+    return "Unknown";
+  }
+}
+
 class Formatter : public UnwrappedLineConsumer {
 public:
   Formatter(const FormatStyle &Style, Lexer &Lex, SourceManager &SourceMgr,
@@ -1216,6 +1425,8 @@
                        << (Encoding == encoding::Encoding_UTF8 ? "UTF8"
                                                                : "unknown")
                        << "\n");
+    DEBUG(llvm::dbgs() << "Language: " << getLanguageName(Style.Language)
+                       << "\n");
   }
 
   tooling::Replacements format() {
@@ -1261,17 +1472,152 @@
     for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
       Annotator.calculateFormattingInformation(*AnnotatedLines[i]);
     }
+    computeAffectedLines(AnnotatedLines.begin(), AnnotatedLines.end());
 
     Annotator.setCommentLineLevels(AnnotatedLines);
     ContinuationIndenter Indenter(Style, SourceMgr, Whitespaces, Encoding,
                                   BinPackInconclusiveFunctions);
-    UnwrappedLineFormatter Formatter(SourceMgr, Ranges, &Indenter, &Whitespaces,
-                                     Style);
+    UnwrappedLineFormatter Formatter(&Indenter, &Whitespaces, Style);
     Formatter.format(AnnotatedLines, /*DryRun=*/false);
     return Whitespaces.generateReplacements();
   }
 
 private:
+  // Determines which lines are affected by the SourceRanges given as input.
+  // Returns \c true if at least one line between I and E or one of their
+  // children is affected.
+  bool computeAffectedLines(SmallVectorImpl<AnnotatedLine *>::iterator I,
+                            SmallVectorImpl<AnnotatedLine *>::iterator E) {
+    bool SomeLineAffected = false;
+    const AnnotatedLine *PreviousLine = NULL;
+    while (I != E) {
+      AnnotatedLine *Line = *I;
+      Line->LeadingEmptyLinesAffected = affectsLeadingEmptyLines(*Line->First);
+
+      // If a line is part of a preprocessor directive, it needs to be formatted
+      // if any token within the directive is affected.
+      if (Line->InPPDirective) {
+        FormatToken *Last = Line->Last;
+        SmallVectorImpl<AnnotatedLine *>::iterator PPEnd = I + 1;
+        while (PPEnd != E && !(*PPEnd)->First->HasUnescapedNewline) {
+          Last = (*PPEnd)->Last;
+          ++PPEnd;
+        }
+
+        if (affectsTokenRange(*Line->First, *Last,
+                              /*IncludeLeadingNewlines=*/false)) {
+          SomeLineAffected = true;
+          markAllAsAffected(I, PPEnd);
+        }
+        I = PPEnd;
+        continue;
+      }
+
+      if (nonPPLineAffected(Line, PreviousLine))
+        SomeLineAffected = true;
+
+      PreviousLine = Line;
+      ++I;
+    }
+    return SomeLineAffected;
+  }
+
+  // Determines whether 'Line' is affected by the SourceRanges given as input.
+  // Returns \c true if line or one if its children is affected.
+  bool nonPPLineAffected(AnnotatedLine *Line,
+                         const AnnotatedLine *PreviousLine) {
+    bool SomeLineAffected = false;
+    Line->ChildrenAffected =
+        computeAffectedLines(Line->Children.begin(), Line->Children.end());
+    if (Line->ChildrenAffected)
+      SomeLineAffected = true;
+
+    // Stores whether one of the line's tokens is directly affected.
+    bool SomeTokenAffected = false;
+    // Stores whether we need to look at the leading newlines of the next token
+    // in order to determine whether it was affected.
+    bool IncludeLeadingNewlines = false;
+
+    // Stores whether the first child line of any of this line's tokens is
+    // affected.
+    bool SomeFirstChildAffected = false;
+
+    for (FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
+      // Determine whether 'Tok' was affected.
+      if (affectsTokenRange(*Tok, *Tok, IncludeLeadingNewlines))
+        SomeTokenAffected = true;
+
+      // Determine whether the first child of 'Tok' was affected.
+      if (!Tok->Children.empty() && Tok->Children.front()->Affected)
+        SomeFirstChildAffected = true;
+
+      IncludeLeadingNewlines = Tok->Children.empty();
+    }
+
+    // Was this line moved, i.e. has it previously been on the same line as an
+    // affected line?
+    bool LineMoved = PreviousLine && PreviousLine->Affected &&
+                     Line->First->NewlinesBefore == 0;
+
+    bool IsContinuedComment = Line->First->is(tok::comment) &&
+                              Line->First->Next == NULL &&
+                              Line->First->NewlinesBefore < 2 && PreviousLine &&
+                              PreviousLine->Affected &&
+                              PreviousLine->Last->is(tok::comment);
+
+    if (SomeTokenAffected || SomeFirstChildAffected || LineMoved ||
+        IsContinuedComment) {
+      Line->Affected = true;
+      SomeLineAffected = true;
+    }
+    return SomeLineAffected;
+  }
+
+  // Marks all lines between I and E as well as all their children as affected.
+  void markAllAsAffected(SmallVectorImpl<AnnotatedLine *>::iterator I,
+                         SmallVectorImpl<AnnotatedLine *>::iterator E) {
+    while (I != E) {
+      (*I)->Affected = true;
+      markAllAsAffected((*I)->Children.begin(), (*I)->Children.end());
+      ++I;
+    }
+  }
+
+  // Returns true if the range from 'First' to 'Last' intersects with one of the
+  // input ranges.
+  bool affectsTokenRange(const FormatToken &First, const FormatToken &Last,
+                         bool IncludeLeadingNewlines) {
+    SourceLocation Start = First.WhitespaceRange.getBegin();
+    if (!IncludeLeadingNewlines)
+      Start = Start.getLocWithOffset(First.LastNewlineOffset);
+    SourceLocation End = Last.getStartOfNonWhitespace();
+    if (Last.TokenText.size() > 0)
+      End = End.getLocWithOffset(Last.TokenText.size() - 1);
+    CharSourceRange Range = CharSourceRange::getCharRange(Start, End);
+    return affectsCharSourceRange(Range);
+  }
+
+  // Returns true if one of the input ranges intersect the leading empty lines
+  // before 'Tok'.
+  bool affectsLeadingEmptyLines(const FormatToken &Tok) {
+    CharSourceRange EmptyLineRange = CharSourceRange::getCharRange(
+        Tok.WhitespaceRange.getBegin(),
+        Tok.WhitespaceRange.getBegin().getLocWithOffset(Tok.LastNewlineOffset));
+    return affectsCharSourceRange(EmptyLineRange);
+  }
+
+  // Returns true if 'Range' intersects with one of the input ranges.
+  bool affectsCharSourceRange(const CharSourceRange &Range) {
+    for (SmallVectorImpl<CharSourceRange>::const_iterator I = Ranges.begin(),
+                                                          E = Ranges.end();
+         I != E; ++I) {
+      if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), I->getBegin()) &&
+          !SourceMgr.isBeforeInTranslationUnit(I->getEnd(), Range.getBegin()))
+        return true;
+    }
+    return false;
+  }
+
   static bool inputUsesCRLF(StringRef Text) {
     return Text.count('\r') * 2 > Text.count('\n');
   }
@@ -1330,12 +1676,12 @@
         HasBinPackedFunction || !HasOnePerLineFunction;
   }
 
-  virtual void consumeUnwrappedLine(const UnwrappedLine &TheLine) {
+  void consumeUnwrappedLine(const UnwrappedLine &TheLine) override {
     assert(!UnwrappedLines.empty());
     UnwrappedLines.back().push_back(TheLine);
   }
 
-  virtual void finishRun() {
+  void finishRun() override {
     UnwrappedLines.push_back(SmallVector<UnwrappedLine, 16>());
   }
 
@@ -1407,11 +1753,25 @@
     "parameters, e.g.:\n"
     "  -style=\"{BasedOnStyle: llvm, IndentWidth: 8}\"";
 
-FormatStyle getStyle(StringRef StyleName, StringRef FileName) {
-  // Fallback style in case the rest of this function can't determine a style.
-  StringRef FallbackStyle = "LLVM";
-  FormatStyle Style;
-  getPredefinedStyle(FallbackStyle, &Style);
+static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
+  if (FileName.endswith_lower(".js")) {
+    return FormatStyle::LK_JavaScript;
+  } else if (FileName.endswith_lower(".proto") ||
+             FileName.endswith_lower(".protodevel")) {
+    return FormatStyle::LK_Proto;
+  }
+  return FormatStyle::LK_Cpp;
+}
+
+FormatStyle getStyle(StringRef StyleName, StringRef FileName,
+                     StringRef FallbackStyle) {
+  FormatStyle Style = getLLVMStyle();
+  Style.Language = getLanguageByFileName(FileName);
+  if (!getPredefinedStyle(FallbackStyle, Style.Language, &Style)) {
+    llvm::errs() << "Invalid fallback style \"" << FallbackStyle
+                 << "\" using LLVM style\n";
+    return Style;
+  }
 
   if (StyleName.startswith("{")) {
     // Parse YAML/JSON style from the command line.
@@ -1423,12 +1783,14 @@
   }
 
   if (!StyleName.equals_lower("file")) {
-    if (!getPredefinedStyle(StyleName, &Style))
+    if (!getPredefinedStyle(StyleName, Style.Language, &Style))
       llvm::errs() << "Invalid value for -style, using " << FallbackStyle
                    << " style\n";
     return Style;
   }
 
+  // Look for .clang-format/_clang-format file in the file's parent directories.
+  SmallString<128> UnsuitableConfigFiles;
   SmallString<128> Path(FileName);
   llvm::sys::fs::make_absolute(Path);
   for (StringRef Directory = Path; !Directory.empty();
@@ -1453,16 +1815,22 @@
     }
 
     if (IsFile) {
-      OwningPtr<llvm::MemoryBuffer> Text;
+      std::unique_ptr<llvm::MemoryBuffer> Text;
       if (llvm::error_code ec =
               llvm::MemoryBuffer::getFile(ConfigFile.c_str(), Text)) {
         llvm::errs() << ec.message() << "\n";
-        continue;
+        break;
       }
       if (llvm::error_code ec = parseConfiguration(Text->getBuffer(), &Style)) {
+        if (ec == llvm::errc::not_supported) {
+          if (!UnsuitableConfigFiles.empty())
+            UnsuitableConfigFiles.append(", ");
+          UnsuitableConfigFiles.append(ConfigFile);
+          continue;
+        }
         llvm::errs() << "Error reading " << ConfigFile << ": " << ec.message()
                      << "\n";
-        continue;
+        break;
       }
       DEBUG(llvm::dbgs() << "Using configuration file " << ConfigFile << "\n");
       return Style;
@@ -1470,6 +1838,11 @@
   }
   llvm::errs() << "Can't find usable .clang-format, using " << FallbackStyle
                << " style\n";
+  if (!UnsuitableConfigFiles.empty()) {
+    llvm::errs() << "Configuration file(s) do(es) not support "
+                 << getLanguageName(Style.Language) << ": "
+                 << UnsuitableConfigFiles << "\n";
+  }
   return Style;
 }
 
diff --git a/lib/Format/FormatToken.cpp b/lib/Format/FormatToken.cpp
index 8ac704a..c147dbb 100644
--- a/lib/Format/FormatToken.cpp
+++ b/lib/Format/FormatToken.cpp
@@ -22,15 +22,44 @@
 namespace clang {
 namespace format {
 
+// FIXME: This is copy&pasted from Sema. Put it in a common place and remove
+// duplication.
+bool FormatToken::isSimpleTypeSpecifier() const {
+  switch (Tok.getKind()) {
+  case tok::kw_short:
+  case tok::kw_long:
+  case tok::kw___int64:
+  case tok::kw___int128:
+  case tok::kw_signed:
+  case tok::kw_unsigned:
+  case tok::kw_void:
+  case tok::kw_char:
+  case tok::kw_int:
+  case tok::kw_half:
+  case tok::kw_float:
+  case tok::kw_double:
+  case tok::kw_wchar_t:
+  case tok::kw_bool:
+  case tok::kw___underlying_type:
+  case tok::annot_typename:
+  case tok::kw_char16_t:
+  case tok::kw_char32_t:
+  case tok::kw_typeof:
+  case tok::kw_decltype:
+    return true;
+  default:
+    return false;
+  }
+}
+
 TokenRole::~TokenRole() {}
 
 void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {}
 
-unsigned CommaSeparatedList::format(LineState &State,
-                                    ContinuationIndenter *Indenter,
-                                    bool DryRun) {
-  if (!State.NextToken->Previous || !State.NextToken->Previous->Previous ||
-      Commas.size() <= 2)
+unsigned CommaSeparatedList::formatAfterToken(LineState &State,
+                                              ContinuationIndenter *Indenter,
+                                              bool DryRun) {
+  if (!State.NextToken->Previous || !State.NextToken->Previous->Previous)
     return 0;
 
   // Ensure that we start on the opening brace.
@@ -48,8 +77,11 @@
 
   // Find the best ColumnFormat, i.e. the best number of columns to use.
   const ColumnFormat *Format = getColumnFormat(RemainingCodePoints);
+  // If no ColumnFormat can be used, the braced list would generally be
+  // bin-packed. Add a severe penalty to this so that column layouts are
+  // preferred if possible.
   if (!Format)
-    return 0;
+    return 10000;
 
   // Format the entire list.
   unsigned Penalty = 0;
@@ -79,6 +111,14 @@
   return Penalty;
 }
 
+unsigned CommaSeparatedList::formatFromToken(LineState &State,
+                                             ContinuationIndenter *Indenter,
+                                             bool DryRun) {
+  if (HasNestedBracedList)
+    State.Stack.back().AvoidBinPacking = true;
+  return 0;
+}
+
 // Returns the lengths in code points between Begin and End (both included),
 // assuming that the entire sequence is put on a single line.
 static unsigned CodePointsBetween(const FormatToken *Begin,
@@ -99,7 +139,6 @@
   // trailing comments which are otherwise ignored for column alignment.
   SmallVector<unsigned, 8> EndOfLineItemLength;
 
-  bool HasNestedBracedList = false;
   for (unsigned i = 0, e = Commas.size() + 1; i != e; ++i) {
     // Skip comments on their own line.
     while (ItemBegin->HasUnescapedNewline && ItemBegin->isTrailingComment())
@@ -139,6 +178,13 @@
     ItemBegin = ItemEnd->Next;
   }
 
+  // If this doesn't have a nested list, we require at least 6 elements in order
+  // create a column layout. If it has a nested list, column layout ensures one
+  // list element per line.
+  if (HasNestedBracedList || Commas.size() < 5 ||
+      Token->NestingLevel != 0)
+    return;
+
   // We can never place more than ColumnLimit / 3 items in a row (because of the
   // spaces and the comma).
   for (unsigned Columns = 1; Columns <= Style.ColumnLimit / 3; ++Columns) {
@@ -175,11 +221,6 @@
     if (Format.TotalWidth > Style.ColumnLimit)
       continue;
 
-    // If this braced list has nested braced list, we format it either with one
-    // element per line or with all elements on one line.
-    if (HasNestedBracedList && Columns > 1 && Format.LineCount > 1)
-      continue;
-
     Formats.push_back(Format);
   }
 }
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index 2145ee2..a5aaa6f 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -19,7 +19,7 @@
 #include "clang/Basic/OperatorPrecedence.h"
 #include "clang/Format/Format.h"
 #include "clang/Lex/Lexer.h"
-#include "llvm/ADT/OwningPtr.h"
+#include <memory>
 
 namespace clang {
 namespace format {
@@ -27,6 +27,7 @@
 enum TokenType {
   TT_ArrayInitializerLSquare,
   TT_ArraySubscriptLSquare,
+  TT_AttributeParen,
   TT_BinaryOperator,
   TT_BitFieldColon,
   TT_BlockComment,
@@ -39,10 +40,12 @@
   TT_ImplicitStringLiteral,
   TT_InlineASMColon,
   TT_InheritanceColon,
+  TT_FunctionLBrace,
   TT_FunctionTypeLParen,
   TT_LambdaLSquare,
   TT_LineComment,
   TT_ObjCBlockLParen,
+  TT_ObjCBlockLBrace,
   TT_ObjCDecl,
   TT_ObjCForIn,
   TT_ObjCMethodExpr,
@@ -57,6 +60,7 @@
   TT_StartOfName,
   TT_TemplateCloser,
   TT_TemplateOpener,
+  TT_TrailingAnnotation,
   TT_TrailingReturnArrow,
   TT_TrailingUnaryOperator,
   TT_UnaryOperator,
@@ -87,7 +91,7 @@
 class AnnotatedLine;
 
 /// \brief A wrapper around a \c Token storing information about the
-/// whitespace characters preceeding it.
+/// whitespace characters preceding it.
 struct FormatToken {
   FormatToken()
       : NewlinesBefore(0), HasUnescapedNewline(false), LastNewlineOffset(0),
@@ -96,11 +100,11 @@
         BlockKind(BK_Unknown), Type(TT_Unknown), SpacesRequiredBefore(0),
         CanBreakBefore(false), ClosesTemplateDeclaration(false),
         ParameterCount(0), PackingKind(PPK_Inconclusive), TotalLength(0),
-        UnbreakableTailLength(0), BindingStrength(0), SplitPenalty(0),
-        LongestObjCSelectorName(0), FakeRParens(0),
+        UnbreakableTailLength(0), BindingStrength(0), NestingLevel(0),
+        SplitPenalty(0), LongestObjCSelectorName(0), FakeRParens(0),
         StartsBinaryExpression(false), EndsBinaryExpression(false),
         LastInChainOfCalls(false), PartOfMultiVariableDeclStmt(false),
-        MatchingParen(NULL), Previous(NULL), Next(NULL),
+        IsForEachMacro(false), MatchingParen(NULL), Previous(NULL), Next(NULL),
         Decision(FD_Unformatted), Finalized(false) {}
 
   /// \brief The \c Token.
@@ -116,7 +120,7 @@
   /// Token.
   bool HasUnescapedNewline;
 
-  /// \brief The range of the whitespace immediately preceeding the \c Token.
+  /// \brief The range of the whitespace immediately preceding the \c Token.
   SourceRange WhitespaceRange;
 
   /// \brief The offset just past the last '\n' in this token's leading
@@ -184,7 +188,7 @@
 
   /// \brief A token can have a special role that can carry extra information
   /// about the token's formatting.
-  llvm::OwningPtr<TokenRole> Role;
+  std::unique_ptr<TokenRole> Role;
 
   /// \brief If this is an opening parenthesis, how are the parameters packed?
   ParameterPackingKind PackingKind;
@@ -206,11 +210,18 @@
   /// operator precedence, parenthesis nesting, etc.
   unsigned BindingStrength;
 
+  /// \brief The nesting level of this token, i.e. the number of surrounding (),
+  /// [], {} or <>.
+  unsigned NestingLevel;
+
   /// \brief Penalty for inserting a line break before this token.
   unsigned SplitPenalty;
 
   /// \brief If this is the first ObjC selector name in an ObjC method
   /// definition or call, this contains the length of the longest name.
+  ///
+  /// This being set to 0 means that the selectors should not be colon-aligned,
+  /// e.g. because several of them are block-type.
   unsigned LongestObjCSelectorName;
 
   /// \brief Stores the number of required fake parentheses and the
@@ -236,6 +247,9 @@
   /// Only set if \c Type == \c TT_StartOfName.
   bool PartOfMultiVariableDeclStmt;
 
+  /// \brief Is this a foreach macro?
+  bool IsForEachMacro;
+
   bool is(tok::TokenKind Kind) const { return Tok.is(Kind); }
 
   bool isOneOf(tok::TokenKind K1, tok::TokenKind K2) const {
@@ -260,6 +274,7 @@
   }
 
   bool isNot(tok::TokenKind Kind) const { return Tok.isNot(Kind); }
+  bool isStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); }
 
   bool isObjCAtKeyword(tok::ObjCKeywordKind Kind) const {
     return Tok.isObjCAtKeyword(Kind);
@@ -270,6 +285,9 @@
            (!ColonRequired || (Next && Next->is(tok::colon)));
   }
 
+  /// \brief Determine whether the token is a simple-type-specifier.
+  bool isSimpleTypeSpecifier() const;
+
   bool isObjCAccessSpecifier() const {
     return is(tok::at) && Next && (Next->isObjCAtKeyword(tok::objc_public) ||
                                    Next->isObjCAtKeyword(tok::objc_protected) ||
@@ -388,10 +406,21 @@
 
   /// \brief Apply the special formatting that the given role demands.
   ///
+  /// Assumes that the token having this role is already formatted.
+  ///
   /// Continues formatting from \p State leaving indentation to \p Indenter and
   /// returns the total penalty that this formatting incurs.
-  virtual unsigned format(LineState &State, ContinuationIndenter *Indenter,
-                          bool DryRun) {
+  virtual unsigned formatFromToken(LineState &State,
+                                   ContinuationIndenter *Indenter,
+                                   bool DryRun) {
+    return 0;
+  }
+
+  /// \brief Same as \c formatFromToken, but assumes that the first token has
+  /// already been set thereby deciding on the first line break.
+  virtual unsigned formatAfterToken(LineState &State,
+                                    ContinuationIndenter *Indenter,
+                                    bool DryRun) {
     return 0;
   }
 
@@ -404,15 +433,19 @@
 
 class CommaSeparatedList : public TokenRole {
 public:
-  CommaSeparatedList(const FormatStyle &Style) : TokenRole(Style) {}
+  CommaSeparatedList(const FormatStyle &Style)
+      : TokenRole(Style), HasNestedBracedList(false) {}
 
-  virtual void precomputeFormattingInfos(const FormatToken *Token);
+  void precomputeFormattingInfos(const FormatToken *Token) override;
 
-  virtual unsigned format(LineState &State, ContinuationIndenter *Indenter,
-                          bool DryRun);
+  unsigned formatAfterToken(LineState &State, ContinuationIndenter *Indenter,
+                            bool DryRun) override;
+
+  unsigned formatFromToken(LineState &State, ContinuationIndenter *Indenter,
+                           bool DryRun) override;
 
   /// \brief Adds \p Token as the next comma to the \c CommaSeparated list.
-  virtual void CommaFound(const FormatToken *Token) { Commas.push_back(Token); }
+  void CommaFound(const FormatToken *Token) override { Commas.push_back(Token);}
 
 private:
   /// \brief A struct that holds information on how to format a given list with
@@ -444,6 +477,8 @@
 
   /// \brief Precomputed formats that can be used for this list.
   SmallVector<ColumnFormat, 4> Formats;
+
+  bool HasNestedBracedList;
 };
 
 } // namespace format
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index 074e1d7..0034235 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -34,6 +34,7 @@
       : Style(Style), Line(Line), CurrentToken(Line.First),
         KeywordVirtualFound(false), AutoFound(false), Ident_in(Ident_in) {
     Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false));
+    resetTokenMetadata(CurrentToken);
   }
 
 private:
@@ -43,6 +44,11 @@
     ScopedContextCreator ContextCreator(*this, tok::less, 10);
     FormatToken *Left = CurrentToken->Previous;
     Contexts.back().IsExpression = false;
+    // If there's a template keyword before the opening angle bracket, this is a
+    // template parameter, not an argument.
+    Contexts.back().InTemplateArgument =
+        Left->Previous != NULL && Left->Previous->Tok.isNot(tok::kw_template);
+
     while (CurrentToken != NULL) {
       if (CurrentToken->is(tok::greater)) {
         Left->MatchingParen = CurrentToken;
@@ -61,7 +67,11 @@
       // parameters.
       // FIXME: This is getting out of hand, write a decent parser.
       if (CurrentToken->Previous->isOneOf(tok::pipepipe, tok::ampamp) &&
-          (CurrentToken->Previous->Type == TT_BinaryOperator ||
+          ((CurrentToken->Previous->Type == TT_BinaryOperator &&
+            // Toplevel bool expressions do not make lots of sense;
+            // If we're on the top level, it contains only the base context and
+            // the context for the current opening angle bracket.
+            Contexts.size() > 2) ||
            Contexts[Contexts.size() - 2].IsExpression) &&
           Line.First->isNot(tok::kw_template))
         return false;
@@ -84,7 +94,7 @@
     bool StartsObjCMethodExpr = false;
     FormatToken *Left = CurrentToken->Previous;
     if (CurrentToken->is(tok::caret)) {
-      // ^( starts a block.
+      // (^ can start a block type.
       Left->Type = TT_ObjCBlockLParen;
     } else if (FormatToken *MaybeSel = Left->Previous) {
       // @selector( starts a selector.
@@ -94,8 +104,10 @@
       }
     }
 
-    if (Left->Previous && Left->Previous->isOneOf(tok::kw_static_assert,
-                                                  tok::kw_if, tok::kw_while)) {
+    if (Left->Previous &&
+        (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_if,
+                                 tok::kw_while, tok::l_paren, tok::comma) ||
+         Left->Previous->Type == TT_BinaryOperator)) {
       // static_assert, if and while usually contain expressions.
       Contexts.back().IsExpression = true;
     } else if (Left->Previous && Left->Previous->is(tok::r_square) &&
@@ -103,6 +115,15 @@
                Left->Previous->MatchingParen->Type == TT_LambdaLSquare) {
       // This is a parameter list of a lambda expression.
       Contexts.back().IsExpression = false;
+    } else if (Contexts[Contexts.size() - 2].CaretFound) {
+      // This is the parameter list of an ObjC block.
+      Contexts.back().IsExpression = false;
+    } else if (Left->Previous && Left->Previous->is(tok::kw___attribute)) {
+      Left->Type = TT_AttributeParen;
+    } else if (Left->Previous && Left->Previous->IsForEachMacro) {
+      // The first argument to a foreach macro is a declaration.
+      Contexts.back().IsForEachMacro = true;
+      Contexts.back().IsExpression = false;
     }
 
     if (StartsObjCMethodExpr) {
@@ -153,6 +174,9 @@
           }
         }
 
+        if (Left->Type == TT_AttributeParen)
+          CurrentToken->Type = TT_AttributeParen;
+
         if (!HasMultipleLines)
           Left->PackingKind = PPK_Inconclusive;
         else if (HasMultipleParametersOnALine)
@@ -165,11 +189,16 @@
       }
       if (CurrentToken->isOneOf(tok::r_square, tok::r_brace))
         return false;
+      else if (CurrentToken->is(tok::l_brace))
+        Left->Type = TT_Unknown; // Not TT_ObjCBlockLParen
       updateParameterCount(Left, CurrentToken);
       if (CurrentToken->is(tok::comma) && CurrentToken->Next &&
           !CurrentToken->Next->HasUnescapedNewline &&
           !CurrentToken->Next->isTrailingComment())
         HasMultipleParametersOnALine = true;
+      if (CurrentToken->isOneOf(tok::kw_const, tok::kw_auto) ||
+          CurrentToken->isSimpleTypeSpecifier())
+        Contexts.back().IsExpression = false;
       if (!consumeToken())
         return false;
       if (CurrentToken && CurrentToken->HasUnescapedNewline)
@@ -226,9 +255,12 @@
         }
         Left->MatchingParen = CurrentToken;
         CurrentToken->MatchingParen = Left;
-        if (Contexts.back().FirstObjCSelectorName != NULL)
+        if (Contexts.back().FirstObjCSelectorName != NULL) {
           Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
               Contexts.back().LongestObjCSelectorName;
+          if (Contexts.back().NumBlockParameters > 1)
+            Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName = 0;
+        }
         next();
         return true;
       }
@@ -237,6 +269,7 @@
       if (CurrentToken->is(tok::colon))
         ColonFound = true;
       if (CurrentToken->is(tok::comma) &&
+          Style.Language != FormatStyle::LK_Proto &&
           (Left->Type == TT_ArraySubscriptLSquare ||
            (Left->Type == TT_ObjCMethodExpr && !ColonFound)))
         Left->Type = TT_ArrayInitializerLSquare;
@@ -250,6 +283,11 @@
   bool parseBrace() {
     if (CurrentToken != NULL) {
       FormatToken *Left = CurrentToken->Previous;
+
+      if (Contexts.back().CaretFound)
+        Left->Type = TT_ObjCBlockLBrace;
+      Contexts.back().CaretFound = false;
+
       ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
       Contexts.back().ColonIsDictLiteral = true;
 
@@ -263,7 +301,8 @@
         if (CurrentToken->isOneOf(tok::r_paren, tok::r_square))
           return false;
         updateParameterCount(Left, CurrentToken);
-        if (CurrentToken->is(tok::colon))
+        if (CurrentToken->is(tok::colon) &&
+            Style.Language != FormatStyle::LK_Proto)
           Left->Type = TT_DictLiteral;
         if (!consumeToken())
           return false;
@@ -367,7 +406,7 @@
       if (!parseParens())
         return false;
       if (Line.MustBeDeclaration && Contexts.size() == 1 &&
-          !Contexts.back().IsExpression)
+          !Contexts.back().IsExpression && Line.First->Type != TT_ObjCProperty)
         Line.MightBeFunctionDecl = true;
       break;
     case tok::l_square:
@@ -429,6 +468,8 @@
         Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
       if (Contexts.back().InCtorInitializer)
         Tok->Type = TT_CtorInitializerComma;
+      if (Contexts.back().IsForEachMacro)
+        Contexts.back().IsExpression = true;
       break;
     default:
       break;
@@ -467,6 +508,18 @@
     }
   }
 
+  void parsePragma() {
+    next(); // Consume "pragma".
+    if (CurrentToken && CurrentToken->TokenText == "mark") {
+      next(); // Consume "mark".
+      next(); // Consume first token (so we fix leading whitespace).
+      while (CurrentToken != NULL) {
+        CurrentToken->Type = TT_ImplicitStringLiteral;
+        next();
+      }
+    }
+  }
+
   void parsePreprocessorDirective() {
     next();
     if (CurrentToken == NULL)
@@ -488,8 +541,12 @@
     case tok::pp_warning:
       parseWarningOrError();
       break;
+    case tok::pp_pragma:
+      parsePragma();
+      break;
     case tok::pp_if:
     case tok::pp_elif:
+      Contexts.back().IsExpression = true;
       parseLine();
       break;
     default:
@@ -505,6 +562,15 @@
       parsePreprocessorDirective();
       return LT_PreprocessorDirective;
     }
+
+    // Directly allow to 'import <string-literal>' to support protocol buffer
+    // definitions (code.google.com/p/protobuf) or missing "#" (either way we
+    // should not break the line).
+    IdentifierInfo *Info = CurrentToken->Tok.getIdentifierInfo();
+    if (Info && Info->getPPKeywordID() == tok::pp_import &&
+        CurrentToken->Next && CurrentToken->Next->is(tok::string_literal))
+      parseIncludeDirective();
+
     while (CurrentToken != NULL) {
       if (CurrentToken->is(tok::kw_virtual))
         KeywordVirtualFound = true;
@@ -525,26 +591,33 @@
   }
 
 private:
+  void resetTokenMetadata(FormatToken *Token) {
+    if (Token == nullptr) return;
+
+    // Reset token type in case we have already looked at it and then
+    // recovered from an error (e.g. failure to find the matching >).
+    if (CurrentToken->Type != TT_LambdaLSquare &&
+        CurrentToken->Type != TT_FunctionLBrace &&
+        CurrentToken->Type != TT_ImplicitStringLiteral &&
+        CurrentToken->Type != TT_TrailingReturnArrow)
+      CurrentToken->Type = TT_Unknown;
+    if (CurrentToken->Role)
+      CurrentToken->Role.reset(NULL);
+    CurrentToken->FakeLParens.clear();
+    CurrentToken->FakeRParens = 0;
+  }
+
   void next() {
     if (CurrentToken != NULL) {
       determineTokenType(*CurrentToken);
       CurrentToken->BindingStrength = Contexts.back().BindingStrength;
+      CurrentToken->NestingLevel = Contexts.size() - 1;
     }
 
     if (CurrentToken != NULL)
       CurrentToken = CurrentToken->Next;
 
-    if (CurrentToken != NULL) {
-      // Reset token type in case we have already looked at it and then
-      // recovered from an error (e.g. failure to find the matching >).
-      if (CurrentToken->Type != TT_LambdaLSquare &&
-          CurrentToken->Type != TT_ImplicitStringLiteral)
-        CurrentToken->Type = TT_Unknown;
-      if (CurrentToken->Role)
-        CurrentToken->Role.reset(NULL);
-      CurrentToken->FakeLParens.clear();
-      CurrentToken->FakeRParens = 0;
-    }
+    resetTokenMetadata(CurrentToken);
   }
 
   /// \brief A struct to hold information valid in a specific context, e.g.
@@ -553,15 +626,17 @@
     Context(tok::TokenKind ContextKind, unsigned BindingStrength,
             bool IsExpression)
         : ContextKind(ContextKind), BindingStrength(BindingStrength),
-          LongestObjCSelectorName(0), ColonIsForRangeExpr(false),
-          ColonIsDictLiteral(false), ColonIsObjCMethodExpr(false),
-          FirstObjCSelectorName(NULL), FirstStartOfName(NULL),
-          IsExpression(IsExpression), CanBeExpression(true),
-          InCtorInitializer(false) {}
+          LongestObjCSelectorName(0), NumBlockParameters(0),
+          ColonIsForRangeExpr(false), ColonIsDictLiteral(false),
+          ColonIsObjCMethodExpr(false), FirstObjCSelectorName(NULL),
+          FirstStartOfName(NULL), IsExpression(IsExpression),
+          CanBeExpression(true), InTemplateArgument(false),
+          InCtorInitializer(false), CaretFound(false), IsForEachMacro(false) {}
 
     tok::TokenKind ContextKind;
     unsigned BindingStrength;
     unsigned LongestObjCSelectorName;
+    unsigned NumBlockParameters;
     bool ColonIsForRangeExpr;
     bool ColonIsDictLiteral;
     bool ColonIsObjCMethodExpr;
@@ -569,7 +644,10 @@
     FormatToken *FirstStartOfName;
     bool IsExpression;
     bool CanBeExpression;
+    bool InTemplateArgument;
     bool InCtorInitializer;
+    bool CaretFound;
+    bool IsForEachMacro;
   };
 
   /// \brief Puts a new \c Context onto the stack \c Contexts for the lifetime
@@ -603,12 +681,17 @@
           Previous->Type = TT_PointerOrReference;
         }
       }
-    } else if (Current.isOneOf(tok::kw_return, tok::kw_throw) ||
-               (Current.is(tok::l_paren) && !Line.MustBeDeclaration &&
-                !Line.InPPDirective &&
-                (!Current.Previous ||
-                 !Current.Previous->isOneOf(tok::kw_for, tok::kw_catch)))) {
+    } else if (Current.isOneOf(tok::kw_return, tok::kw_throw)) {
       Contexts.back().IsExpression = true;
+    } else if (Current.is(tok::l_paren) && !Line.MustBeDeclaration &&
+               !Line.InPPDirective) {
+      bool ParametersOfFunctionType =
+          Current.Previous && Current.Previous->is(tok::r_paren) &&
+          Current.Previous->MatchingParen &&
+          Current.Previous->MatchingParen->Type == TT_FunctionTypeLParen;
+      bool IsForOrCatch = Current.Previous &&
+                          Current.Previous->isOneOf(tok::kw_for, tok::kw_catch);
+      Contexts.back().IsExpression = !ParametersOfFunctionType && !IsForOrCatch;
     } else if (Current.isOneOf(tok::r_paren, tok::greater, tok::comma)) {
       for (FormatToken *Previous = Current.Previous;
            Previous && Previous->isOneOf(tok::star, tok::amp);
@@ -640,9 +723,15 @@
       } else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) {
         Current.Type =
             determineStarAmpUsage(Current, Contexts.back().CanBeExpression &&
-                                               Contexts.back().IsExpression);
+                                               Contexts.back().IsExpression,
+                                  Contexts.back().InTemplateArgument);
       } else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
         Current.Type = determinePlusMinusCaretUsage(Current);
+        if (Current.Type == TT_UnaryOperator) {
+          ++Contexts.back().NumBlockParameters;
+          if (Current.is(tok::caret))
+            Contexts.back().CaretFound = true;
+        }
       } else if (Current.isOneOf(tok::minusminus, tok::plusplus)) {
         Current.Type = determineIncrementUsage(Current);
       } else if (Current.is(tok::exclaim)) {
@@ -665,7 +754,7 @@
         bool ParensAreType = !Current.Previous ||
                              Current.Previous->Type == TT_PointerOrReference ||
                              Current.Previous->Type == TT_TemplateCloser ||
-                             isSimpleTypeSpecifier(*Current.Previous);
+                             Current.Previous->isSimpleTypeSpecifier();
         bool ParensCouldEndDecl =
             Current.Next &&
             Current.Next->isOneOf(tok::equal, tok::semi, tok::l_brace);
@@ -673,7 +762,8 @@
             LeftOfParens &&
             LeftOfParens->isOneOf(tok::kw_sizeof, tok::kw_alignof);
         if (ParensAreType && !ParensCouldEndDecl && !IsSizeOfOrAlignOf &&
-            (Contexts.back().IsExpression ||
+            ((Contexts.size() > 1 &&
+              Contexts[Contexts.size() - 2].IsExpression) ||
              (Current.Next && Current.Next->isBinaryOperator())))
           IsCast = true;
         if (Current.Next && Current.Next->isNot(tok::string_literal) &&
@@ -685,6 +775,7 @@
         if (LeftOfParens && (LeftOfParens->Tok.getIdentifierInfo() == NULL ||
                              LeftOfParens->is(tok::kw_return)) &&
             LeftOfParens->Type != TT_OverloadedOperator &&
+            LeftOfParens->isNot(tok::at) &&
             LeftOfParens->Type != TT_TemplateCloser && Current.Next &&
             Current.Next->is(tok::identifier))
           IsCast = true;
@@ -708,6 +799,11 @@
         if (PreviousNoComment &&
             PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
           Current.Type = TT_DesignatedInitializerPeriod;
+      } else if (Current.isOneOf(tok::identifier, tok::kw_const) &&
+                 Line.MightBeFunctionDecl && Contexts.size() == 1) {
+        // Line.MightBeFunctionDecl can only be true after the parentheses of a
+        // function declaration have been found.
+        Current.Type = TT_TrailingAnnotation;
       }
     }
   }
@@ -740,11 +836,12 @@
 
     return (!IsPPKeyword && PreviousNotConst->is(tok::identifier)) ||
            PreviousNotConst->Type == TT_PointerOrReference ||
-           isSimpleTypeSpecifier(*PreviousNotConst);
+           PreviousNotConst->isSimpleTypeSpecifier();
   }
 
   /// \brief Return the type of the given token assuming it is * or &.
-  TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression) {
+  TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression,
+                                  bool InTemplateArgument) {
     const FormatToken *PrevToken = Tok.getPreviousNonComment();
     if (PrevToken == NULL)
       return TT_UnaryOperator;
@@ -773,8 +870,15 @@
       return TT_PointerOrReference;
 
     if (PrevToken->Tok.isLiteral() ||
-        PrevToken->isOneOf(tok::r_paren, tok::r_square) ||
-        NextToken->Tok.isLiteral() || NextToken->isUnaryOperator())
+        PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::kw_true,
+                           tok::kw_false) ||
+        NextToken->Tok.isLiteral() ||
+        NextToken->isOneOf(tok::kw_true, tok::kw_false) ||
+        NextToken->isUnaryOperator() ||
+        // If we know we're in a template argument, there are no named
+        // declarations. Thus, having an identifier on the right-hand side
+        // indicates a binary operator.
+        (InTemplateArgument && NextToken->Tok.isAnyIdentifier()))
       return TT_BinaryOperator;
 
     // It is very unlikely that we are going to find a pointer or reference type
@@ -815,36 +919,6 @@
     return TT_UnaryOperator;
   }
 
-  // FIXME: This is copy&pasted from Sema. Put it in a common place and remove
-  // duplication.
-  /// \brief Determine whether the token kind starts a simple-type-specifier.
-  bool isSimpleTypeSpecifier(const FormatToken &Tok) const {
-    switch (Tok.Tok.getKind()) {
-    case tok::kw_short:
-    case tok::kw_long:
-    case tok::kw___int64:
-    case tok::kw___int128:
-    case tok::kw_signed:
-    case tok::kw_unsigned:
-    case tok::kw_void:
-    case tok::kw_char:
-    case tok::kw_int:
-    case tok::kw_half:
-    case tok::kw_float:
-    case tok::kw_double:
-    case tok::kw_wchar_t:
-    case tok::kw_bool:
-    case tok::kw___underlying_type:
-    case tok::annot_typename:
-    case tok::kw_char16_t:
-    case tok::kw_char32_t:
-    case tok::kw_typeof:
-    case tok::kw_decltype:
-      return true;
-    default:
-      return false;
-    }
-  }
 
   SmallVector<Context, 8> Contexts;
 
@@ -904,8 +978,11 @@
       int CurrentPrecedence = getCurrentPrecedence();
 
       if (Current && Current->Type == TT_ObjCSelectorName &&
-          Precedence == CurrentPrecedence)
+          Precedence == CurrentPrecedence) {
+        if (LatestOperator)
+          addFakeParenthesis(Start, prec::Level(Precedence));
         Start = Current;
+      }
 
       // At the end of the line or when an operator with higher precedence is
       // found, insert fake parenthesis and return.
@@ -950,6 +1027,8 @@
       else if (Current->is(tok::semi) || Current->Type == TT_InlineASMColon ||
                Current->Type == TT_ObjCSelectorName)
         return 0;
+      else if (Current->Type == TT_RangeBasedForLoopColon)
+        return prec::Comma;
       else if (Current->Type == TT_BinaryOperator || Current->is(tok::comma))
         return Current->getPrecedence();
       else if (Current->isOneOf(tok::period, tok::arrow))
@@ -1060,11 +1139,37 @@
   FormatToken *Current = Line.First->Next;
   bool InFunctionDecl = Line.MightBeFunctionDecl;
   while (Current != NULL) {
-    if (Current->Type == TT_LineComment)
-      Current->SpacesRequiredBefore = Style.SpacesBeforeTrailingComments;
-    else if (Current->SpacesRequiredBefore == 0 &&
-             spaceRequiredBefore(Line, *Current))
+    if (Current->Type == TT_LineComment) {
+      if (Current->Previous->BlockKind == BK_BracedInit &&
+          Current->Previous->opensScope())
+        Current->SpacesRequiredBefore = Style.Cpp11BracedListStyle ? 0 : 1;
+      else
+        Current->SpacesRequiredBefore = Style.SpacesBeforeTrailingComments;
+
+      // If we find a trailing comment, iterate backwards to determine whether
+      // it seems to relate to a specific parameter. If so, break before that
+      // parameter to avoid changing the comment's meaning. E.g. don't move 'b'
+      // to the previous line in:
+      //   SomeFunction(a,
+      //                b, // comment
+      //                c);
+      if (!Current->HasUnescapedNewline) {
+        for (FormatToken *Parameter = Current->Previous; Parameter;
+             Parameter = Parameter->Previous) {
+          if (Parameter->isOneOf(tok::comment, tok::r_brace))
+            break;
+          if (Parameter->Previous && Parameter->Previous->is(tok::comma)) {
+            if (Parameter->Previous->Type != TT_CtorInitializerComma &&
+                Parameter->HasUnescapedNewline)
+              Parameter->MustBreakBefore = true;
+            break;
+          }
+        }
+      }
+    } else if (Current->SpacesRequiredBefore == 0 &&
+             spaceRequiredBefore(Line, *Current)) {
       Current->SpacesRequiredBefore = 1;
+    }
 
     Current->MustBreakBefore =
         Current->MustBreakBefore || mustBreakBefore(Line, *Current);
@@ -1132,16 +1237,18 @@
     return 0;
   if (Left.is(tok::comma))
     return 1;
-  if (Right.is(tok::l_square))
-    return 150;
-
+  if (Right.is(tok::l_square)) {
+    if (Style.Language == FormatStyle::LK_Proto)
+      return 1;
+    if (Right.Type != TT_ObjCMethodExpr)
+      return 250;
+  }
   if (Right.Type == TT_StartOfName || Right.is(tok::kw_operator)) {
     if (Line.First->is(tok::kw_for) && Right.PartOfMultiVariableDeclStmt)
       return 3;
     if (Left.Type == TT_StartOfName)
       return 20;
-    if (InFunctionDecl && Right.BindingStrength == 1)
-      // FIXME: Clean up hack of using BindingStrength to find top-level names.
+    if (InFunctionDecl && Right.NestingLevel == 0)
       return Style.PenaltyReturnTypeOnItsOwnLine;
     return 200;
   }
@@ -1149,7 +1256,8 @@
     return 150;
   if (Left.Type == TT_CastRParen)
     return 100;
-  if (Left.is(tok::coloncolon))
+  if (Left.is(tok::coloncolon) ||
+      (Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto))
     return 500;
   if (Left.isOneOf(tok::kw_class, tok::kw_struct))
     return 5000;
@@ -1159,17 +1267,25 @@
     return 2;
 
   if (Right.isMemberAccess()) {
-    if (Left.isOneOf(tok::r_paren, tok::r_square) && Left.MatchingParen &&
+    if (Left.is(tok::r_paren) && Left.MatchingParen &&
         Left.MatchingParen->ParameterCount > 0)
       return 20; // Should be smaller than breaking at a nested comma.
     return 150;
   }
 
-  // Breaking before a trailing 'const' or not-function-like annotation is bad.
-  if (Left.is(tok::r_paren) && Line.Type != LT_ObjCProperty &&
-      (Right.is(tok::kw_const) || (Right.is(tok::identifier) && Right.Next &&
-                                   Right.Next->isNot(tok::l_paren))))
-    return 100;
+  if (Right.Type == TT_TrailingAnnotation && Right.Next &&
+      Right.Next->isNot(tok::l_paren)) {
+    // Generally, breaking before a trailing annotation is bad unless it is
+    // function-like. It seems to be especially preferable to keep standard
+    // annotations (i.e. "const", "final" and "override") on the same line.
+    // Use a slightly higher penalty after ")" so that annotations like
+    // "const override" are kept together.
+    bool is_standard_annotation = Right.is(tok::kw_const) ||
+                                  Right.TokenText == "override" ||
+                                  Right.TokenText == "final";
+    return (Left.is(tok::r_paren) ? 100 : 120) +
+           (is_standard_annotation ? 50 : 0);
+  }
 
   // In for-loops, prefer breaking at ',' and ';'.
   if (Line.First->is(tok::kw_for) && Left.is(tok::equal))
@@ -1180,10 +1296,12 @@
   if (Right.Type == TT_ObjCSelectorName)
     return 0;
   if (Left.is(tok::colon) && Left.Type == TT_ObjCMethodExpr)
-    return 50;
+    return Line.MightBeFunctionDecl ? 50 : 500;
 
   if (Left.is(tok::l_paren) && InFunctionDecl)
     return 100;
+  if (Left.is(tok::equal) && InFunctionDecl)
+    return 110;
   if (Left.opensScope())
     return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
                                    : 19;
@@ -1215,6 +1333,14 @@
 bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
                                           const FormatToken &Left,
                                           const FormatToken &Right) {
+  if (Style.Language == FormatStyle::LK_Proto) {
+    if (Right.is(tok::l_paren) &&
+        (Left.TokenText == "returns" || Left.TokenText == "option"))
+      return true;
+  }
+  if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty &&
+      Left.Tok.getObjCKeywordID() == tok::objc_property)
+    return true;
   if (Right.is(tok::hashhash))
     return Left.is(tok::hash);
   if (Left.isOneOf(tok::hashhash, tok::hash))
@@ -1246,7 +1372,7 @@
     return false;
   if (Left.is(tok::coloncolon))
     return false;
-  if (Right.is(tok::coloncolon))
+  if (Right.is(tok::coloncolon) && Left.isNot(tok::l_brace))
     return (Left.is(tok::less) && Style.Standard == FormatStyle::LS_Cpp03) ||
            !Left.isOneOf(tok::identifier, tok::greater, tok::l_paren,
                          tok::r_paren, tok::less);
@@ -1273,35 +1399,39 @@
     return false;
   if (Left.is(tok::l_square))
     return Left.Type == TT_ArrayInitializerLSquare &&
-           Right.isNot(tok::r_square);
+           Style.SpacesInContainerLiterals && Right.isNot(tok::r_square);
   if (Right.is(tok::r_square))
-    return Right.MatchingParen &&
+    return Right.MatchingParen && Style.SpacesInContainerLiterals &&
            Right.MatchingParen->Type == TT_ArrayInitializerLSquare;
   if (Right.is(tok::l_square) && Right.Type != TT_ObjCMethodExpr &&
       Right.Type != TT_LambdaLSquare && Left.isNot(tok::numeric_constant))
     return false;
   if (Left.is(tok::colon))
     return Left.Type != TT_ObjCMethodExpr;
-  if (Right.is(tok::colon))
-    return Right.Type != TT_ObjCMethodExpr && !Left.is(tok::question);
   if (Right.is(tok::l_paren)) {
-    if (Left.is(tok::r_paren) && Left.MatchingParen &&
-        Left.MatchingParen->Previous &&
-        Left.MatchingParen->Previous->is(tok::kw___attribute))
+    if (Left.is(tok::r_paren) && Left.Type == TT_AttributeParen)
       return true;
     return Line.Type == LT_ObjCDecl ||
            Left.isOneOf(tok::kw_return, tok::kw_new, tok::kw_delete,
                         tok::semi) ||
-           (Style.SpaceAfterControlStatementKeyword &&
-            Left.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while, tok::kw_switch,
-                         tok::kw_catch));
+           (Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
+            (Left.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while,
+                          tok::kw_switch, tok::kw_catch) ||
+             Left.IsForEachMacro)) ||
+           (Style.SpaceBeforeParens == FormatStyle::SBPO_Always &&
+            Left.isOneOf(tok::identifier, tok::kw___attribute) &&
+            Line.Type != LT_PreprocessorDirective);
   }
   if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword)
     return false;
   if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
     return !Left.Children.empty(); // No spaces in "{}".
-  if (Left.is(tok::l_brace) || Right.is(tok::r_brace))
+  if ((Left.is(tok::l_brace) && Left.BlockKind != BK_Block) ||
+      (Right.is(tok::r_brace) && Right.MatchingParen &&
+       Right.MatchingParen->BlockKind != BK_Block))
     return !Style.Cpp11BracedListStyle;
+  if (Left.Type == TT_BlockComment && Left.TokenText.endswith("=*/"))
+    return false;
   if (Right.Type == TT_UnaryOperator)
     return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) &&
            (Left.isNot(tok::colon) || Left.Type != TT_ObjCMethodExpr);
@@ -1311,8 +1441,6 @@
     return false;
   if (Left.is(tok::period) || Right.is(tok::period))
     return false;
-  if (Left.Type == TT_BlockComment && Left.TokenText.endswith("=*/"))
-    return false;
   if (Right.is(tok::hash) && Left.is(tok::identifier) && Left.TokenText == "L")
     return false;
   return true;
@@ -1351,10 +1479,11 @@
   if (Tok.is(tok::colon))
     return !Line.First->isOneOf(tok::kw_case, tok::kw_default) &&
            Tok.getNextNonComment() != NULL && Tok.Type != TT_ObjCMethodExpr &&
-           !Tok.Previous->is(tok::question);
+           !Tok.Previous->is(tok::question) &&
+           (Tok.Type != TT_DictLiteral || Style.SpacesInContainerLiterals);
   if (Tok.Previous->Type == TT_UnaryOperator ||
       Tok.Previous->Type == TT_CastRParen)
-    return false;
+    return Tok.Type == TT_BinaryOperator;
   if (Tok.Previous->is(tok::greater) && Tok.is(tok::greater)) {
     return Tok.Type == TT_TemplateCloser &&
            Tok.Previous->Type == TT_TemplateCloser &&
@@ -1381,11 +1510,13 @@
 
 bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
                                      const FormatToken &Right) {
+  const FormatToken &Left = *Right.Previous;
   if (Right.is(tok::comment)) {
-    return Right.NewlinesBefore > 0;
+    return Right.Previous->BlockKind != BK_BracedInit &&
+           Right.Previous->Type != TT_CtorInitializerColon &&
+           Right.NewlinesBefore > 0;
   } else if (Right.Previous->isTrailingComment() ||
-             (Right.is(tok::string_literal) &&
-              Right.Previous->is(tok::string_literal))) {
+             (Right.isStringLiteral() && Right.Previous->isStringLiteral())) {
     return true;
   } else if (Right.Previous->IsUnterminatedLiteral) {
     return true;
@@ -1395,19 +1526,34 @@
     return true;
   } else if (Right.Previous->ClosesTemplateDeclaration &&
              Right.Previous->MatchingParen &&
-             Right.Previous->MatchingParen->BindingStrength == 1 &&
+             Right.Previous->MatchingParen->NestingLevel == 0 &&
              Style.AlwaysBreakTemplateDeclarations) {
-    // FIXME: Fix horrible hack of using BindingStrength to find top-level <>.
     return true;
-  } else if (Right.Type == TT_CtorInitializerComma &&
+  } else if ((Right.Type == TT_CtorInitializerComma ||
+              Right.Type == TT_CtorInitializerColon) &&
              Style.BreakConstructorInitializersBeforeComma &&
              !Style.ConstructorInitializerAllOnOneLineOrOnePerLine) {
     return true;
-  } else if (Right.Previous->BlockKind == BK_Block &&
-             Right.Previous->isNot(tok::r_brace) && Right.isNot(tok::r_brace)) {
-    return true;
   } else if (Right.is(tok::l_brace) && (Right.BlockKind == BK_Block)) {
-    return Style.BreakBeforeBraces == FormatStyle::BS_Allman;
+    return Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
+           Style.BreakBeforeBraces == FormatStyle::BS_GNU;
+  } else if (Right.is(tok::string_literal) &&
+             Right.TokenText.startswith("R\"")) {
+    // Raw string literals are special wrt. line breaks. The author has made a
+    // deliberate choice and might have aligned the contents of the string
+    // literal accordingly. Thus, we try keep existing line breaks.
+    return Right.NewlinesBefore > 0;
+  } else if (Right.Previous->is(tok::l_brace) && Right.NestingLevel == 1 &&
+             Style.Language == FormatStyle::LK_Proto) {
+    // Don't enums onto single lines in protocol buffers.
+    return true;
+  } else if ((Left.is(tok::l_brace) && Left.MatchingParen &&
+              Left.MatchingParen->Previous &&
+              Left.MatchingParen->Previous->is(tok::comma)) ||
+             (Right.is(tok::r_brace) && Left.is(tok::comma))) {
+    // If the last token before a '}' is a comma, the intention is to insert a
+    // line break after it in order to make shuffling around entries easier.
+    return true;
   }
   return false;
 }
@@ -1415,12 +1561,17 @@
 bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
                                     const FormatToken &Right) {
   const FormatToken &Left = *Right.Previous;
+  if (Left.is(tok::at))
+    return false;
   if (Right.Type == TT_StartOfName || Right.is(tok::kw_operator))
     return true;
   if (Right.isTrailingComment())
     // We rely on MustBreakBefore being set correctly here as we should not
     // change the "binding" behavior of a comment.
-    return false;
+    // The first comment in a braced lists is always interpreted as belonging to
+    // the first list element. Otherwise, it should be placed outside of the
+    // list.
+    return Left.BlockKind == BK_BracedInit;
   if (Left.is(tok::question) && Right.is(tok::colon))
     return false;
   if (Right.Type == TT_ConditionalExpr || Right.is(tok::question))
@@ -1430,6 +1581,8 @@
   if (Right.is(tok::colon) &&
       (Right.Type == TT_DictLiteral || Right.Type == TT_ObjCMethodExpr))
     return false;
+  if (Right.Type == TT_InheritanceColon)
+    return true;
   if (Left.is(tok::colon) &&
       (Left.Type == TT_DictLiteral || Left.Type == TT_ObjCMethodExpr))
     return true;
@@ -1452,14 +1605,12 @@
     return false;
   if (Left.is(tok::equal) && Line.Type == LT_VirtualFunctionDecl)
     return false;
-  if (Left.Previous) {
-    if (Left.is(tok::l_paren) && Right.is(tok::l_paren) &&
-        Left.Previous->is(tok::kw___attribute))
-      return false;
-    if (Left.is(tok::l_paren) && (Left.Previous->Type == TT_BinaryOperator ||
-                                  Left.Previous->Type == TT_CastRParen))
-      return false;
-  }
+  if (Left.is(tok::l_paren) && Left.Type == TT_AttributeParen)
+    return false;
+  if (Left.is(tok::l_paren) && Left.Previous &&
+      (Left.Previous->Type == TT_BinaryOperator ||
+       Left.Previous->Type == TT_CastRParen))
+    return false;
   if (Right.Type == TT_ImplicitStringLiteral)
     return false;
 
@@ -1489,7 +1640,7 @@
   if (Right.Type == TT_CtorInitializerComma &&
       Style.BreakConstructorInitializersBeforeComma)
     return true;
-  if (Right.isBinaryOperator() && Style.BreakBeforeBinaryOperators)
+  if (Right.Type == TT_BinaryOperator && Style.BreakBeforeBinaryOperators)
     return true;
   if (Left.is(tok::greater) && Right.is(tok::greater) &&
       Left.Type != TT_TemplateCloser)
@@ -1503,7 +1654,7 @@
          Right.isOneOf(tok::lessless, tok::arrow, tok::period, tok::colon,
                        tok::l_square, tok::at) ||
          (Left.is(tok::r_paren) &&
-          Right.isOneOf(tok::identifier, tok::kw_const, tok::kw___attribute)) ||
+          Right.isOneOf(tok::identifier, tok::kw_const)) ||
          (Left.is(tok::l_paren) && !Right.is(tok::r_paren));
 }
 
diff --git a/lib/Format/TokenAnnotator.h b/lib/Format/TokenAnnotator.h
index aa49b2a..bd91980 100644
--- a/lib/Format/TokenAnnotator.h
+++ b/lib/Format/TokenAnnotator.h
@@ -41,7 +41,8 @@
       : First(Line.Tokens.front().Tok), Level(Line.Level),
         InPPDirective(Line.InPPDirective),
         MustBeDeclaration(Line.MustBeDeclaration), MightBeFunctionDecl(false),
-        StartsDefinition(false) {
+        StartsDefinition(false), Affected(false),
+        LeadingEmptyLinesAffected(false), ChildrenAffected(false) {
     assert(!Line.Tokens.empty());
 
     // Calculate Next and Previous for all tokens. Note that we must overwrite
@@ -87,6 +88,17 @@
   bool MightBeFunctionDecl;
   bool StartsDefinition;
 
+  /// \c True if this line should be formatted, i.e. intersects directly or
+  /// indirectly with one of the input ranges.
+  bool Affected;
+
+  /// \c True if the leading empty lines of this line intersect with one of the
+  /// input ranges.
+  bool LeadingEmptyLinesAffected;
+
+  /// \c True if a one of this line's children intersects with an input range.
+  bool ChildrenAffected;
+
 private:
   // Disallow copying.
   AnnotatedLine(const AnnotatedLine &) LLVM_DELETED_FUNCTION;
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index e0b090f..96d8155 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -74,7 +74,7 @@
     StructuralError = PreviousStructuralError;
   }
 
-  virtual FormatToken *getNextToken() {
+  FormatToken *getNextToken() override {
     // The \c UnwrappedLineParser guards against this by never calling
     // \c getNextToken() after it has encountered the first eof token.
     assert(!eof());
@@ -84,9 +84,9 @@
     return Token;
   }
 
-  virtual unsigned getPosition() { return PreviousTokenSource->getPosition(); }
+  unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
 
-  virtual FormatToken *setPosition(unsigned Position) {
+  FormatToken *setPosition(unsigned Position) override {
     Token = PreviousTokenSource->setPosition(Position);
     return Token;
   }
@@ -128,7 +128,7 @@
       Parser.CurrentLines = &Parser.PreprocessorDirectives;
     else if (!Parser.Line->Tokens.empty())
       Parser.CurrentLines = &Parser.Line->Tokens.back().Children;
-    PreBlockLine = Parser.Line.take();
+    PreBlockLine = Parser.Line.release();
     Parser.Line.reset(new UnwrappedLine());
     Parser.Line->Level = PreBlockLine->Level;
     Parser.Line->InPPDirective = PreBlockLine->InPPDirective;
@@ -152,6 +152,27 @@
   SmallVectorImpl<UnwrappedLine> *OriginalLines;
 };
 
+class CompoundStatementIndenter {
+public:
+  CompoundStatementIndenter(UnwrappedLineParser *Parser,
+                            const FormatStyle &Style, unsigned &LineLevel)
+      : LineLevel(LineLevel), OldLineLevel(LineLevel) {
+    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman) {
+      Parser->addUnwrappedLine();
+    } else if (Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
+      Parser->addUnwrappedLine();
+      ++LineLevel;
+    }
+  }
+  ~CompoundStatementIndenter() {
+    LineLevel = OldLineLevel;
+  }
+
+private:
+  unsigned &LineLevel;
+  unsigned OldLineLevel;
+};
+
 namespace {
 
 class IndexedTokenSource : public FormatTokenSource {
@@ -159,17 +180,17 @@
   IndexedTokenSource(ArrayRef<FormatToken *> Tokens)
       : Tokens(Tokens), Position(-1) {}
 
-  virtual FormatToken *getNextToken() {
+  FormatToken *getNextToken() override {
     ++Position;
     return Tokens[Position];
   }
 
-  virtual unsigned getPosition() {
+  unsigned getPosition() override {
     assert(Position >= 0);
     return Position;
   }
 
-  virtual FormatToken *setPosition(unsigned P) {
+  FormatToken *setPosition(unsigned P) override {
     Position = P;
     return Tokens[Position];
   }
@@ -322,7 +343,7 @@
           // (for example while parsing lambdas).
           //
           // We exclude + and - as they can be ObjC visibility modifiers.
-          if (NextTok->isOneOf(tok::comma, tok::semi, tok::r_paren,
+          if (NextTok->isOneOf(tok::comma, tok::semi, tok::r_paren, tok::period,
                                tok::r_square, tok::l_brace, tok::colon) ||
               (NextTok->isBinaryOperator() &&
                !NextTok->isOneOf(tok::plus, tok::minus))) {
@@ -336,6 +357,7 @@
         LBraceStack.pop_back();
       }
       break;
+    case tok::at:
     case tok::semi:
     case tok::kw_if:
     case tok::kw_while:
@@ -488,7 +510,9 @@
       PPLevelBranchCount[PPBranchLevel] = PPChainBranchIndex.top() + 1;
     }
   }
-  --PPBranchLevel;
+  // Guard against #endif's without #if.
+  if (PPBranchLevel > 0)
+    --PPBranchLevel;
   if (!PPChainBranchIndex.empty())
     PPChainBranchIndex.pop();
   if (!PPStack.empty())
@@ -620,9 +644,6 @@
   case tok::kw_case:
     parseCaseLabel();
     return;
-  case tok::kw_return:
-    parseReturn();
-    return;
   case tok::kw_extern:
     nextToken();
     if (FormatTok->Tok.is(tok::string_literal)) {
@@ -633,6 +654,12 @@
         return;
       }
     }
+    break;
+  case tok::identifier:
+    if (FormatTok->IsForEachMacro) {
+      parseForOrWhileLoop();
+      return;
+    }
     // In all other cases, parse the declaration.
     break;
   default:
@@ -648,6 +675,12 @@
     case tok::kw_enum:
       parseEnum();
       break;
+    case tok::kw_typedef:
+      nextToken();
+      // FIXME: Use the IdentifierTable instead.
+      if (FormatTok->TokenText == "NS_ENUM")
+        parseEnum();
+      break;
     case tok::kw_struct:
     case tok::kw_union:
     case tok::kw_class:
@@ -667,9 +700,13 @@
       break;
     case tok::caret:
       nextToken();
-      if (FormatTok->is(tok::l_brace)) {
+      if (FormatTok->Tok.isAnyIdentifier() ||
+          FormatTok->isSimpleTypeSpecifier())
+        nextToken();
+      if (FormatTok->is(tok::l_paren))
+        parseParens();
+      if (FormatTok->is(tok::l_brace))
         parseChildBlock();
-      }
       break;
     case tok::l_brace:
       if (!tryToParseBracedList()) {
@@ -677,10 +714,9 @@
         // structural element.
         // FIXME: Figure out cases where this is not true, and add projections
         // for them (the one we know is missing are lambdas).
-        if (Style.BreakBeforeBraces == FormatStyle::BS_Linux ||
-            Style.BreakBeforeBraces == FormatStyle::BS_Stroustrup ||
-            Style.BreakBeforeBraces == FormatStyle::BS_Allman)
+        if (Style.BreakBeforeBraces != FormatStyle::BS_Attach)
           addUnwrappedLine();
+        FormatTok->Type = TT_FunctionLBrace;
         parseBlock(/*MustBeDeclaration=*/false);
         addUnwrappedLine();
         return;
@@ -699,8 +735,8 @@
         // Recognize function-like macro usages without trailing semicolon.
         if (FormatTok->Tok.is(tok::l_paren)) {
           parseParens();
-          if (FormatTok->HasUnescapedNewline &&
-              tokenCanStartNewLine(FormatTok->Tok)) {
+          if (FormatTok->NewlinesBefore > 0 &&
+              tokenCanStartNewLine(FormatTok->Tok) && Text == Text.upper()) {
             addUnwrappedLine();
             return;
           }
@@ -720,7 +756,7 @@
       }
       break;
     case tok::l_square:
-      tryToParseLambda();
+      parseSquare();
       break;
     default:
       nextToken();
@@ -729,36 +765,50 @@
   } while (!eof());
 }
 
-void UnwrappedLineParser::tryToParseLambda() {
+bool UnwrappedLineParser::tryToParseLambda() {
   // FIXME: This is a dirty way to access the previous token. Find a better
   // solution.
   if (!Line->Tokens.empty() &&
-      Line->Tokens.back().Tok->isOneOf(tok::identifier, tok::kw_operator)) {
+      (Line->Tokens.back().Tok->isOneOf(tok::identifier, tok::kw_operator) ||
+       Line->Tokens.back().Tok->closesScope() ||
+       Line->Tokens.back().Tok->isSimpleTypeSpecifier())) {
     nextToken();
-    return;
+    return false;
   }
   assert(FormatTok->is(tok::l_square));
   FormatToken &LSquare = *FormatTok;
   if (!tryToParseLambdaIntroducer())
-    return;
+    return false;
 
   while (FormatTok->isNot(tok::l_brace)) {
+    if (FormatTok->isSimpleTypeSpecifier()) {
+      nextToken();
+      continue;
+    }
     switch (FormatTok->Tok.getKind()) {
     case tok::l_brace:
       break;
     case tok::l_paren:
       parseParens();
       break;
+    case tok::less:
+    case tok::greater:
     case tok::identifier:
+    case tok::coloncolon:
     case tok::kw_mutable:
       nextToken();
       break;
+    case tok::arrow:
+      FormatTok->Type = TT_TrailingReturnArrow;
+      nextToken();
+      break;
     default:
-      return;
+      return true;
     }
   }
   LSquare.Type = TT_LambdaLSquare;
   parseChildBlock();
+  return true;
 }
 
 bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
@@ -861,40 +911,6 @@
   return false;
 }
 
-void UnwrappedLineParser::parseReturn() {
-  nextToken();
-
-  do {
-    switch (FormatTok->Tok.getKind()) {
-    case tok::l_brace:
-      parseBracedList();
-      if (FormatTok->Tok.isNot(tok::semi)) {
-        // Assume missing ';'.
-        addUnwrappedLine();
-        return;
-      }
-      break;
-    case tok::l_paren:
-      parseParens();
-      break;
-    case tok::r_brace:
-      // Assume missing ';'.
-      addUnwrappedLine();
-      return;
-    case tok::semi:
-      nextToken();
-      addUnwrappedLine();
-      return;
-    case tok::l_square:
-      tryToParseLambda();
-      break;
-    default:
-      nextToken();
-      break;
-    }
-  } while (!eof());
-}
-
 void UnwrappedLineParser::parseParens() {
   assert(FormatTok->Tok.is(tok::l_paren) && "'(' expected.");
   nextToken();
@@ -930,6 +946,42 @@
   } while (!eof());
 }
 
+void UnwrappedLineParser::parseSquare() {
+  assert(FormatTok->Tok.is(tok::l_square) && "'[' expected.");
+  if (tryToParseLambda())
+    return;
+  do {
+    switch (FormatTok->Tok.getKind()) {
+    case tok::l_paren:
+      parseParens();
+      break;
+    case tok::r_square:
+      nextToken();
+      return;
+    case tok::r_brace:
+      // A "}" inside parenthesis is an error if there wasn't a matching "{".
+      return;
+    case tok::l_square:
+      parseSquare();
+      break;
+    case tok::l_brace: {
+      if (!tryToParseBracedList()) {
+        parseChildBlock();
+      }
+      break;
+    }
+    case tok::at:
+      nextToken();
+      if (FormatTok->Tok.is(tok::l_brace))
+        parseBracedList();
+      break;
+    default:
+      nextToken();
+      break;
+    }
+  } while (!eof());
+}
+
 void UnwrappedLineParser::parseIfThenElse() {
   assert(FormatTok->Tok.is(tok::kw_if) && "'if' expected");
   nextToken();
@@ -937,13 +989,14 @@
     parseParens();
   bool NeedsUnwrappedLine = false;
   if (FormatTok->Tok.is(tok::l_brace)) {
-    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman)
-      addUnwrappedLine();
+    CompoundStatementIndenter Indenter(this, Style, Line->Level);
     parseBlock(/*MustBeDeclaration=*/false);
-    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman)
+    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
+        Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
       addUnwrappedLine();
-    else
+    } else {
       NeedsUnwrappedLine = true;
+    }
   } else {
     addUnwrappedLine();
     ++Line->Level;
@@ -953,8 +1006,7 @@
   if (FormatTok->Tok.is(tok::kw_else)) {
     nextToken();
     if (FormatTok->Tok.is(tok::l_brace)) {
-      if (Style.BreakBeforeBraces == FormatStyle::BS_Allman)
-        addUnwrappedLine();
+      CompoundStatementIndenter Indenter(this, Style, Line->Level);
       parseBlock(/*MustBeDeclaration=*/false);
       addUnwrappedLine();
     } else if (FormatTok->Tok.is(tok::kw_if)) {
@@ -977,7 +1029,8 @@
     nextToken();
   if (FormatTok->Tok.is(tok::l_brace)) {
     if (Style.BreakBeforeBraces == FormatStyle::BS_Linux ||
-        Style.BreakBeforeBraces == FormatStyle::BS_Allman)
+        Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
+        Style.BreakBeforeBraces == FormatStyle::BS_GNU)
       addUnwrappedLine();
 
     bool AddLevel = Style.NamespaceIndentation == FormatStyle::NI_All ||
@@ -994,14 +1047,14 @@
 }
 
 void UnwrappedLineParser::parseForOrWhileLoop() {
-  assert((FormatTok->Tok.is(tok::kw_for) || FormatTok->Tok.is(tok::kw_while)) &&
-         "'for' or 'while' expected");
+  assert((FormatTok->Tok.is(tok::kw_for) || FormatTok->Tok.is(tok::kw_while) ||
+          FormatTok->IsForEachMacro) &&
+         "'for', 'while' or foreach macro expected");
   nextToken();
   if (FormatTok->Tok.is(tok::l_paren))
     parseParens();
   if (FormatTok->Tok.is(tok::l_brace)) {
-    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman)
-      addUnwrappedLine();
+    CompoundStatementIndenter Indenter(this, Style, Line->Level);
     parseBlock(/*MustBeDeclaration=*/false);
     addUnwrappedLine();
   } else {
@@ -1016,9 +1069,10 @@
   assert(FormatTok->Tok.is(tok::kw_do) && "'do' expected");
   nextToken();
   if (FormatTok->Tok.is(tok::l_brace)) {
-    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman)
-      addUnwrappedLine();
+    CompoundStatementIndenter Indenter(this, Style, Line->Level);
     parseBlock(/*MustBeDeclaration=*/false);
+    if (Style.BreakBeforeBraces == FormatStyle::BS_GNU)
+      addUnwrappedLine();
   } else {
     addUnwrappedLine();
     ++Line->Level;
@@ -1042,17 +1096,20 @@
   if (Line->Level > 1 || (!Line->InPPDirective && Line->Level > 0))
     --Line->Level;
   if (CommentsBeforeNextToken.empty() && FormatTok->Tok.is(tok::l_brace)) {
-    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman)
-      addUnwrappedLine();
+    CompoundStatementIndenter Indenter(this, Style, Line->Level);
     parseBlock(/*MustBeDeclaration=*/false);
     if (FormatTok->Tok.is(tok::kw_break)) {
-      // "break;" after "}" on its own line only for BS_Allman
-      if (Style.BreakBeforeBraces == FormatStyle::BS_Allman)
+      // "break;" after "}" on its own line only for BS_Allman and BS_GNU
+      if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
+          Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
         addUnwrappedLine();
+      }
       parseStructuralElement();
     }
+    addUnwrappedLine();
+  } else {
+    addUnwrappedLine();
   }
-  addUnwrappedLine();
   Line->Level = OldLineLevel;
 }
 
@@ -1071,8 +1128,7 @@
   if (FormatTok->Tok.is(tok::l_paren))
     parseParens();
   if (FormatTok->Tok.is(tok::l_brace)) {
-    if (Style.BreakBeforeBraces == FormatStyle::BS_Allman)
-      addUnwrappedLine();
+    CompoundStatementIndenter Indenter(this, Style, Line->Level);
     parseBlock(/*MustBeDeclaration=*/false);
     addUnwrappedLine();
   } else {
@@ -1085,6 +1141,10 @@
 
 void UnwrappedLineParser::parseAccessSpecifier() {
   nextToken();
+  // Understand Qt's slots.
+  if (FormatTok->is(tok::identifier) &&
+      (FormatTok->TokenText == "slots" || FormatTok->TokenText == "Q_SLOTS"))
+    nextToken();
   // Otherwise, we don't know what it is, and we'd better keep the next token.
   if (FormatTok->Tok.is(tok::colon))
     nextToken();
@@ -1092,7 +1152,10 @@
 }
 
 void UnwrappedLineParser::parseEnum() {
-  nextToken();
+  if (FormatTok->Tok.is(tok::kw_enum)) {
+    // Won't be 'enum' for NS_ENUMs.
+    nextToken();
+  }
   // Eat up enum class ...
   if (FormatTok->Tok.is(tok::kw_class) ||
       FormatTok->Tok.is(tok::kw_struct))
@@ -1159,10 +1222,11 @@
   }
   if (FormatTok->Tok.is(tok::l_brace)) {
     if (Style.BreakBeforeBraces == FormatStyle::BS_Linux ||
-        Style.BreakBeforeBraces == FormatStyle::BS_Allman)
+        Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
+        Style.BreakBeforeBraces == FormatStyle::BS_GNU)
       addUnwrappedLine();
 
-    parseBlock(/*MustBeDeclaration=*/true, /*Addlevel=*/true,
+    parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
                /*MunchSemi=*/false);
   }
   // We fall through to parsing a structural element afterwards, so
@@ -1189,6 +1253,10 @@
       parseBlock(/*MustBeDeclaration=*/false);
       // In ObjC interfaces, nothing should be following the "}".
       addUnwrappedLine();
+    } else if (FormatTok->is(tok::r_brace)) {
+      // Ignore stray "}". parseStructuralElement doesn't consume them.
+      nextToken();
+      addUnwrappedLine();
     } else {
       parseStructuralElement();
     }
@@ -1312,6 +1380,7 @@
   bool CommentsInCurrentLine = true;
   do {
     FormatTok = Tokens->getNextToken();
+    assert(FormatTok);
     while (!Line->InPPDirective && FormatTok->Tok.is(tok::hash) &&
            (FormatTok->HasUnescapedNewline || FormatTok->IsFirst)) {
       // If there is an unfinished unwrapped line, we flush the preprocessor
diff --git a/lib/Format/UnwrappedLineParser.h b/lib/Format/UnwrappedLineParser.h
index f1f4e57..62e0b58 100644
--- a/lib/Format/UnwrappedLineParser.h
+++ b/lib/Format/UnwrappedLineParser.h
@@ -16,9 +16,9 @@
 #ifndef LLVM_CLANG_FORMAT_UNWRAPPED_LINE_PARSER_H
 #define LLVM_CLANG_FORMAT_UNWRAPPED_LINE_PARSER_H
 
+#include "FormatToken.h"
 #include "clang/Basic/IdentifierTable.h"
 #include "clang/Format/Format.h"
-#include "FormatToken.h"
 #include <list>
 
 namespace clang {
@@ -82,8 +82,8 @@
   void parseStructuralElement();
   bool tryToParseBracedList();
   bool parseBracedList(bool ContinueOnSemicolons = false);
-  void parseReturn();
   void parseParens();
+  void parseSquare();
   void parseIfThenElse();
   void parseForOrWhileLoop();
   void parseDoWhile();
@@ -98,7 +98,7 @@
   void parseObjCUntilAtEnd();
   void parseObjCInterfaceOrImplementation();
   void parseObjCProtocol();
-  void tryToParseLambda();
+  bool tryToParseLambda();
   bool tryToParseLambdaIntroducer();
   void addUnwrappedLine();
   bool eof() const;
@@ -112,7 +112,7 @@
   // FIXME: We are constantly running into bugs where Line.Level is incorrectly
   // subtracted from beyond 0. Introduce a method to subtract from Line.Level
   // and use that everywhere in the Parser.
-  OwningPtr<UnwrappedLine> Line;
+  std::unique_ptr<UnwrappedLine> Line;
 
   // Comments are sorted into unwrapped lines by whether they are in the same
   // line as the previous token, or not. If not, they belong to the next token.
@@ -185,6 +185,7 @@
   std::stack<int> PPChainBranchIndex;
 
   friend class ScopedLineState;
+  friend class CompoundStatementIndenter;
 };
 
 struct UnwrappedLineNode {
diff --git a/lib/Format/WhitespaceManager.cpp b/lib/Format/WhitespaceManager.cpp
index 26a8d41..4c393ed 100644
--- a/lib/Format/WhitespaceManager.cpp
+++ b/lib/Format/WhitespaceManager.cpp
@@ -193,6 +193,10 @@
     if (Changes[i].IsTrailingComment) {
       assert(Column >= Changes[i].StartOfTokenColumn);
       Changes[i].Spaces += Column - Changes[i].StartOfTokenColumn;
+      if (i + 1 != End) {
+        Changes[i + 1].PreviousEndOfTokenColumn +=
+            Column - Changes[i].StartOfTokenColumn;
+      }
       Changes[i].StartOfTokenColumn = Column;
     }
   }
diff --git a/lib/Format/WhitespaceManager.h b/lib/Format/WhitespaceManager.h
index ae62023..f94464f 100644
--- a/lib/Format/WhitespaceManager.h
+++ b/lib/Format/WhitespaceManager.h
@@ -51,7 +51,7 @@
                          unsigned StartOfTokenColumn,
                          bool InPPDirective = false);
 
-  /// \brief Adds information about an unchangable token's whitespace.
+  /// \brief Adds information about an unchangeable token's whitespace.
   ///
   /// Needs to be called for every token for which \c replaceWhitespace
   /// was not called.
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
index 1ef4c18..f90c0a9 100644
--- a/lib/Frontend/ASTConsumers.cpp
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -41,7 +41,7 @@
         : Out(Out ? *Out : llvm::outs()), Dump(Dump),
           FilterString(FilterString), DumpLookups(DumpLookups) {}
 
-    virtual void HandleTranslationUnit(ASTContext &Context) {
+    void HandleTranslationUnit(ASTContext &Context) override {
       TranslationUnitDecl *D = Context.getTranslationUnitDecl();
 
       if (FilterString.empty())
@@ -101,13 +101,13 @@
     ASTDeclNodeLister(raw_ostream *Out = NULL)
         : Out(Out ? *Out : llvm::outs()) {}
 
-    virtual void HandleTranslationUnit(ASTContext &Context) {
+    void HandleTranslationUnit(ASTContext &Context) override {
       TraverseDecl(Context.getTranslationUnitDecl());
     }
 
     bool shouldWalkTypesOfTypeLocs() const { return false; }
 
-    virtual bool VisitNamedDecl(NamedDecl *D) {
+    bool VisitNamedDecl(NamedDecl *D) {
       D->printQualifiedName(Out);
       Out << '\n';
       return true;
@@ -138,11 +138,11 @@
   class ASTViewer : public ASTConsumer {
     ASTContext *Context;
   public:
-    void Initialize(ASTContext &Context) {
+    void Initialize(ASTContext &Context) override {
       this->Context = &Context;
     }
 
-    virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+    bool HandleTopLevelDecl(DeclGroupRef D) override {
       for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
         HandleTopLevelSingleDecl(*I);
       return true;
@@ -177,7 +177,7 @@
 public:
   DeclContextPrinter() : Out(llvm::errs()) {}
 
-  void HandleTranslationUnit(ASTContext &C) {
+  void HandleTranslationUnit(ASTContext &C) override {
     PrintDeclContext(C.getTranslationUnitDecl(), 4);
   }
 
@@ -259,13 +259,12 @@
     // Print the parameters.
     Out << "(";
     bool PrintComma = false;
-    for (FunctionDecl::param_const_iterator I = FD->param_begin(),
-           E = FD->param_end(); I != E; ++I) {
+    for (auto I : FD->params()) {
       if (PrintComma)
         Out << ", ";
       else
         PrintComma = true;
-      Out << **I;
+      Out << *I;
     }
     Out << ")";
     break;
@@ -369,8 +368,7 @@
   Out << "\n";
 
   // Print decls in the DeclContext.
-  for (DeclContext::decl_iterator I = DC->decls_begin(), E = DC->decls_end();
-       I != E; ++I) {
+  for (auto *I : DC->decls()) {
     for (unsigned i = 0; i < Indentation; ++i)
       Out << "  ";
 
@@ -394,58 +392,58 @@
     case Decl::CXXDestructor:
     case Decl::CXXConversion:
     {
-      DeclContext* DC = cast<DeclContext>(*I);
+      DeclContext* DC = cast<DeclContext>(I);
       PrintDeclContext(DC, Indentation+2);
       break;
     }
     case Decl::IndirectField: {
-      IndirectFieldDecl* IFD = cast<IndirectFieldDecl>(*I);
+      IndirectFieldDecl* IFD = cast<IndirectFieldDecl>(I);
       Out << "<IndirectField> " << *IFD << '\n';
       break;
     }
     case Decl::Label: {
-      LabelDecl *LD = cast<LabelDecl>(*I);
+      LabelDecl *LD = cast<LabelDecl>(I);
       Out << "<Label> " << *LD << '\n';
       break;
     }
     case Decl::Field: {
-      FieldDecl *FD = cast<FieldDecl>(*I);
+      FieldDecl *FD = cast<FieldDecl>(I);
       Out << "<field> " << *FD << '\n';
       break;
     }
     case Decl::Typedef:
     case Decl::TypeAlias: {
-      TypedefNameDecl* TD = cast<TypedefNameDecl>(*I);
+      TypedefNameDecl* TD = cast<TypedefNameDecl>(I);
       Out << "<typedef> " << *TD << '\n';
       break;
     }
     case Decl::EnumConstant: {
-      EnumConstantDecl* ECD = cast<EnumConstantDecl>(*I);
+      EnumConstantDecl* ECD = cast<EnumConstantDecl>(I);
       Out << "<enum constant> " << *ECD << '\n';
       break;
     }
     case Decl::Var: {
-      VarDecl* VD = cast<VarDecl>(*I);
+      VarDecl* VD = cast<VarDecl>(I);
       Out << "<var> " << *VD << '\n';
       break;
     }
     case Decl::ImplicitParam: {
-      ImplicitParamDecl* IPD = cast<ImplicitParamDecl>(*I);
+      ImplicitParamDecl* IPD = cast<ImplicitParamDecl>(I);
       Out << "<implicit parameter> " << *IPD << '\n';
       break;
     }
     case Decl::ParmVar: {
-      ParmVarDecl* PVD = cast<ParmVarDecl>(*I);
+      ParmVarDecl* PVD = cast<ParmVarDecl>(I);
       Out << "<parameter> " << *PVD << '\n';
       break;
     }
     case Decl::ObjCProperty: {
-      ObjCPropertyDecl* OPD = cast<ObjCPropertyDecl>(*I);
+      ObjCPropertyDecl* OPD = cast<ObjCPropertyDecl>(I);
       Out << "<objc property> " << *OPD << '\n';
       break;
     }
     case Decl::FunctionTemplate: {
-      FunctionTemplateDecl* FTD = cast<FunctionTemplateDecl>(*I);
+      FunctionTemplateDecl* FTD = cast<FunctionTemplateDecl>(I);
       Out << "<function template> " << *FTD << '\n';
       break;
     }
@@ -458,21 +456,21 @@
       break;
     }
     case Decl::NamespaceAlias: {
-      NamespaceAliasDecl* NAD = cast<NamespaceAliasDecl>(*I);
+      NamespaceAliasDecl* NAD = cast<NamespaceAliasDecl>(I);
       Out << "<namespace alias> " << *NAD << '\n';
       break;
     }
     case Decl::ClassTemplate: {
-      ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(*I);
+      ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(I);
       Out << "<class template> " << *CTD << '\n';
       break;
     }
     case Decl::OMPThreadPrivate: {
-      Out << "<omp threadprivate> " << '"' << *I << "\"\n";
+      Out << "<omp threadprivate> " << '"' << I << "\"\n";
       break;
     }
     default:
-      Out << "DeclKind: " << DK << '"' << *I << "\"\n";
+      Out << "DeclKind: " << DK << '"' << I << "\"\n";
       llvm_unreachable("decl unhandled");
     }
   }
diff --git a/lib/Frontend/ASTMerge.cpp b/lib/Frontend/ASTMerge.cpp
index b6c644e..ff6434c 100644
--- a/lib/Frontend/ASTMerge.cpp
+++ b/lib/Frontend/ASTMerge.cpp
@@ -57,16 +57,14 @@
                          /*MinimalImport=*/false);
 
     TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
-    for (DeclContext::decl_iterator D = TU->decls_begin(), 
-                                 DEnd = TU->decls_end();
-         D != DEnd; ++D) {
+    for (auto *D : TU->decls()) {
       // Don't re-import __va_list_tag, __builtin_va_list.
-      if (NamedDecl *ND = dyn_cast<NamedDecl>(*D))
+      if (const auto *ND = dyn_cast<NamedDecl>(D))
         if (IdentifierInfo *II = ND->getIdentifier())
           if (II->isStr("__va_list_tag") || II->isStr("__builtin_va_list"))
             continue;
       
-      Importer.Import(*D);
+      Importer.Import(D);
     }
 
     delete Unit;
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index a8c5876..c4f7596 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -20,6 +20,7 @@
 #include "clang/Basic/Diagnostic.h"
 #include "clang/Basic/TargetInfo.h"
 #include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/VirtualFileSystem.h"
 #include "clang/Frontend/CompilerInstance.h"
 #include "clang/Frontend/FrontendActions.h"
 #include "clang/Frontend/FrontendDiagnostic.h"
@@ -35,9 +36,7 @@
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringSet.h"
-#include "llvm/Support/Atomic.h"
 #include "llvm/Support/CrashRecoveryContext.h"
-#include "llvm/Support/FileSystem.h"
 #include "llvm/Support/Host.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/Mutex.h"
@@ -45,6 +44,7 @@
 #include "llvm/Support/Path.h"
 #include "llvm/Support/Timer.h"
 #include "llvm/Support/raw_ostream.h"
+#include <atomic>
 #include <cstdio>
 #include <cstdlib>
 #include <sys/stat.h>
@@ -191,10 +191,7 @@
 };
 
 void ASTUnit::clearFileLevelDecls() {
-  for (FileDeclsTy::iterator
-         I = FileDecls.begin(), E = FileDecls.end(); I != E; ++I)
-    delete I->second;
-  FileDecls.clear();
+  llvm::DeleteContainerSeconds(FileDecls);
 }
 
 void ASTUnit::CleanTemporaryFiles() {
@@ -214,7 +211,7 @@
 /// \brief Tracks the number of ASTUnit objects that are currently active.
 ///
 /// Used for debugging purposes only.
-static llvm::sys::cas_flag ActiveASTUnitObjects;
+static std::atomic<unsigned> ActiveASTUnitObjects;
 
 ASTUnit::ASTUnit(bool _MainFileIsAST)
   : Reader(0), HadModuleLoaderFatalFailure(false),
@@ -231,10 +228,8 @@
     PreambleTopLevelHashValue(0),
     CurrentTopLevelHashValue(0),
     UnsafeToFree(false) { 
-  if (getenv("LIBCLANG_OBJTRACKING")) {
-    llvm::sys::AtomicIncrement(&ActiveASTUnitObjects);
-    fprintf(stderr, "+++ %d translation units\n", ActiveASTUnitObjects);
-  }    
+  if (getenv("LIBCLANG_OBJTRACKING"))
+    fprintf(stderr, "+++ %u translation units\n", ++ActiveASTUnitObjects);
 }
 
 ASTUnit::~ASTUnit() {
@@ -267,10 +262,8 @@
 
   ClearCachedCompletionResults();  
   
-  if (getenv("LIBCLANG_OBJTRACKING")) {
-    llvm::sys::AtomicDecrement(&ActiveASTUnitObjects);
-    fprintf(stderr, "--- %d translation units\n", ActiveASTUnitObjects);
-  }    
+  if (getenv("LIBCLANG_OBJTRACKING"))
+    fprintf(stderr, "--- %u translation units\n", --ActiveASTUnitObjects);
 }
 
 void ASTUnit::setPreprocessor(Preprocessor *pp) { PP = pp; }
@@ -526,8 +519,8 @@
       Counter(Counter),
       InitializedLanguage(false) {}
 
-  virtual bool ReadLanguageOptions(const LangOptions &LangOpts,
-                                   bool Complain) {
+  bool ReadLanguageOptions(const LangOptions &LangOpts,
+                           bool Complain) override {
     if (InitializedLanguage)
       return false;
     
@@ -538,8 +531,8 @@
     return false;
   }
 
-  virtual bool ReadTargetOptions(const TargetOptions &TargetOpts,
-                                 bool Complain) {
+  bool ReadTargetOptions(const TargetOptions &TargetOpts,
+                         bool Complain) override {
     // If we've already initialized the target, don't do it again.
     if (Target)
       return false;
@@ -552,7 +545,8 @@
     return false;
   }
 
-  virtual void ReadCounter(const serialization::ModuleFile &M, unsigned Value) {
+  void ReadCounter(const serialization::ModuleFile &M,
+                   unsigned Value) override {
     Counter = Value;
   }
 
@@ -590,14 +584,14 @@
                           SmallVectorImpl<StoredDiagnostic> &StoredDiags)
     : StoredDiags(StoredDiags), SourceMgr(0) { }
 
-  virtual void BeginSourceFile(const LangOptions &LangOpts,
-                               const Preprocessor *PP = 0) {
+  void BeginSourceFile(const LangOptions &LangOpts,
+                       const Preprocessor *PP = 0) override {
     if (PP)
       SourceMgr = &PP->getSourceManager();
   }
 
-  virtual void HandleDiagnostic(DiagnosticsEngine::Level Level,
-                                const Diagnostic &Info);
+  void HandleDiagnostic(DiagnosticsEngine::Level Level,
+                        const Diagnostic &Info) override;
 };
 
 /// \brief RAII object that optionally captures diagnostics, if
@@ -680,12 +674,11 @@
                               IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
                                   const FileSystemOptions &FileSystemOpts,
                                   bool OnlyLocalDecls,
-                                  RemappedFile *RemappedFiles,
-                                  unsigned NumRemappedFiles,
+                                  ArrayRef<RemappedFile> RemappedFiles,
                                   bool CaptureDiagnostics,
                                   bool AllowPCHWithCompilerErrors,
                                   bool UserFilesAreVolatile) {
-  OwningPtr<ASTUnit> AST(new ASTUnit(true));
+  std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
 
   // Recover resources if we crash before exiting this method.
   llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -711,62 +704,18 @@
                                          AST->getDiagnostics(),
                                          AST->ASTFileLangOpts,
                                          /*Target=*/0));
-  
-  for (unsigned I = 0; I != NumRemappedFiles; ++I) {
-    FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
-    if (const llvm::MemoryBuffer *
-          memBuf = fileOrBuf.dyn_cast<const llvm::MemoryBuffer *>()) {
-      // Create the file entry for the file that we're mapping from.
-      const FileEntry *FromFile
-        = AST->getFileManager().getVirtualFile(RemappedFiles[I].first,
-                                               memBuf->getBufferSize(),
-                                               0);
-      if (!FromFile) {
-        AST->getDiagnostics().Report(diag::err_fe_remap_missing_from_file)
-          << RemappedFiles[I].first;
-        delete memBuf;
-        continue;
-      }
-      
-      // Override the contents of the "from" file with the contents of
-      // the "to" file.
-      AST->getSourceManager().overrideFileContents(FromFile, memBuf);
 
-    } else {
-      const char *fname = fileOrBuf.get<const char *>();
-      const FileEntry *ToFile = AST->FileMgr->getFile(fname);
-      if (!ToFile) {
-        AST->getDiagnostics().Report(diag::err_fe_remap_missing_to_file)
-        << RemappedFiles[I].first << fname;
-        continue;
-      }
+  PreprocessorOptions *PPOpts = new PreprocessorOptions();
 
-      // Create the file entry for the file that we're mapping from.
-      const FileEntry *FromFile
-        = AST->getFileManager().getVirtualFile(RemappedFiles[I].first,
-                                               ToFile->getSize(),
-                                               0);
-      if (!FromFile) {
-        AST->getDiagnostics().Report(diag::err_fe_remap_missing_from_file)
-          << RemappedFiles[I].first;
-        delete memBuf;
-        continue;
-      }
-      
-      // Override the contents of the "from" file with the contents of
-      // the "to" file.
-      AST->getSourceManager().overrideFileContents(FromFile, ToFile);
-    }
-  }
-  
+  for (unsigned I = 0, N = RemappedFiles.size(); I != N; ++I)
+    PPOpts->addRemappedFile(RemappedFiles[I].first, RemappedFiles[I].second);
+
   // Gather Info for preprocessor construction later on.
 
   HeaderSearch &HeaderInfo = *AST->HeaderInfo.get();
   unsigned Counter;
 
-  OwningPtr<ASTReader> Reader;
-
-  AST->PP = new Preprocessor(new PreprocessorOptions(),
+  AST->PP = new Preprocessor(PPOpts,
                              AST->getDiagnostics(), AST->ASTFileLangOpts,
                              /*Target=*/0, AST->getSourceManager(), HeaderInfo, 
                              *AST, 
@@ -788,21 +737,17 @@
   bool disableValid = false;
   if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
     disableValid = true;
-  Reader.reset(new ASTReader(PP, Context,
+  AST->Reader = new ASTReader(PP, Context,
                              /*isysroot=*/"",
                              /*DisableValidation=*/disableValid,
-                             AllowPCHWithCompilerErrors));
-  
-  // Recover resources if we crash before exiting this method.
-  llvm::CrashRecoveryContextCleanupRegistrar<ASTReader>
-    ReaderCleanup(Reader.get());
+                             AllowPCHWithCompilerErrors);
 
-  Reader->setListener(new ASTInfoCollector(*AST->PP, Context,
+  AST->Reader->setListener(new ASTInfoCollector(*AST->PP, Context,
                                            AST->ASTFileLangOpts,
                                            AST->TargetOpts, AST->Target, 
                                            Counter));
 
-  switch (Reader->ReadAST(Filename, serialization::MK_MainFile,
+  switch (AST->Reader->ReadAST(Filename, serialization::MK_MainFile,
                           SourceLocation(), ASTReader::ARR_None)) {
   case ASTReader::Success:
     break;
@@ -817,21 +762,14 @@
     return NULL;
   }
 
-  AST->OriginalSourceFile = Reader->getOriginalSourceFile();
+  AST->OriginalSourceFile = AST->Reader->getOriginalSourceFile();
 
   PP.setCounterValue(Counter);
 
   // Attach the AST reader to the AST context as an external AST
   // source, so that declarations will be deserialized from the
   // AST file as needed.
-  ASTReader *ReaderPtr = Reader.get();
-  OwningPtr<ExternalASTSource> Source(Reader.take());
-
-  // Unregister the cleanup for ASTReader.  It will get cleaned up
-  // by the ASTUnit cleanup.
-  ReaderCleanup.unregister();
-
-  Context.setExternalSource(Source);
+  Context.setExternalSource(AST->Reader);
 
   // Create an AST consumer, even though it isn't used.
   AST->Consumer.reset(new ASTConsumer);
@@ -839,13 +777,12 @@
   // Create a semantic analysis object and tell the AST reader about it.
   AST->TheSema.reset(new Sema(PP, Context, *AST->Consumer));
   AST->TheSema->Initialize();
-  ReaderPtr->InitializeSema(*AST->TheSema);
-  AST->Reader = ReaderPtr;
+  AST->Reader->InitializeSema(*AST->TheSema);
 
   // Tell the diagnostic client that we have started a source file.
   AST->getDiagnostics().getClient()->BeginSourceFile(Context.getLangOpts(),&PP);
 
-  return AST.take();
+  return AST.release();
 }
 
 namespace {
@@ -857,9 +794,9 @@
   
 public:
   explicit MacroDefinitionTrackerPPCallbacks(unsigned &Hash) : Hash(Hash) { }
-  
-  virtual void MacroDefined(const Token &MacroNameTok,
-                            const MacroDirective *MD) {
+
+  void MacroDefined(const Token &MacroNameTok,
+                    const MacroDirective *MD) override {
     Hash = llvm::HashString(MacroNameTok.getIdentifierInfo()->getName(), Hash);
   }
 };
@@ -881,10 +818,9 @@
       // For an unscoped enum include the enumerators in the hash since they
       // enter the top-level namespace.
       if (!EnumD->isScoped()) {
-        for (EnumDecl::enumerator_iterator EI = EnumD->enumerator_begin(),
-               EE = EnumD->enumerator_end(); EI != EE; ++EI) {
-          if ((*EI)->getIdentifier())
-            Hash = llvm::HashString((*EI)->getIdentifier()->getName(), Hash);
+        for (const auto *EI : EnumD->enumerators()) {
+          if (EI->getIdentifier())
+            Hash = llvm::HashString(EI->getIdentifier()->getName(), Hash);
         }
       }
     }
@@ -937,31 +873,30 @@
   void handleFileLevelDecl(Decl *D) {
     Unit.addFileLevelDecl(D);
     if (NamespaceDecl *NSD = dyn_cast<NamespaceDecl>(D)) {
-      for (NamespaceDecl::decl_iterator
-             I = NSD->decls_begin(), E = NSD->decls_end(); I != E; ++I)
-        handleFileLevelDecl(*I);
+      for (auto *I : NSD->decls())
+        handleFileLevelDecl(I);
     }
   }
 
-  bool HandleTopLevelDecl(DeclGroupRef D) {
+  bool HandleTopLevelDecl(DeclGroupRef D) override {
     for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it)
       handleTopLevelDecl(*it);
     return true;
   }
 
   // We're not interested in "interesting" decls.
-  void HandleInterestingDecl(DeclGroupRef) {}
+  void HandleInterestingDecl(DeclGroupRef) override {}
 
-  void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
+  void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
     for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it)
       handleTopLevelDecl(*it);
   }
 
-  virtual ASTMutationListener *GetASTMutationListener() {
+  ASTMutationListener *GetASTMutationListener() override {
     return Unit.getASTMutationListener();
   }
 
-  virtual ASTDeserializationListener *GetASTDeserializationListener() {
+  ASTDeserializationListener *GetASTDeserializationListener() override {
     return Unit.getDeserializationListener();
   }
 };
@@ -970,8 +905,8 @@
 public:
   ASTUnit &Unit;
 
-  virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
-                                         StringRef InFile) {
+  ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+                                 StringRef InFile) override {
     CI.getPreprocessor().addPPCallbacks(
      new MacroDefinitionTrackerPPCallbacks(Unit.getCurrentTopLevelHashValue()));
     return new TopLevelDeclTrackerConsumer(Unit, 
@@ -981,8 +916,8 @@
 public:
   TopLevelDeclTrackerAction(ASTUnit &_Unit) : Unit(_Unit) {}
 
-  virtual bool hasCodeCompletionSupport() const { return false; }
-  virtual TranslationUnitKind getTranslationUnitKind()  { 
+  bool hasCodeCompletionSupport() const override { return false; }
+  TranslationUnitKind getTranslationUnitKind() override {
     return Unit.getTranslationUnitKind(); 
   }
 };
@@ -995,15 +930,15 @@
   explicit PrecompilePreambleAction(ASTUnit &Unit)
       : Unit(Unit), HasEmittedPreamblePCH(false) {}
 
-  virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
-                                         StringRef InFile);
+  ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+                                 StringRef InFile) override;
   bool hasEmittedPreamblePCH() const { return HasEmittedPreamblePCH; }
   void setHasEmittedPreamblePCH() { HasEmittedPreamblePCH = true; }
-  virtual bool shouldEraseOutputFiles() { return !hasEmittedPreamblePCH(); }
+  bool shouldEraseOutputFiles() override { return !hasEmittedPreamblePCH(); }
 
-  virtual bool hasCodeCompletionSupport() const { return false; }
-  virtual bool hasASTFileSupport() const { return false; }
-  virtual TranslationUnitKind getTranslationUnitKind() { return TU_Prefix; }
+  bool hasCodeCompletionSupport() const override { return false; }
+  bool hasASTFileSupport() const override { return false; }
+  TranslationUnitKind getTranslationUnitKind() override { return TU_Prefix; }
 };
 
 class PrecompilePreambleConsumer : public PCHGenerator {
@@ -1021,7 +956,7 @@
     Hash = 0;
   }
 
-  virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+  bool HandleTopLevelDecl(DeclGroupRef D) override {
     for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) {
       Decl *D = *it;
       // FIXME: Currently ObjC method declarations are incorrectly being
@@ -1036,7 +971,7 @@
     return true;
   }
 
-  virtual void HandleTranslationUnit(ASTContext &Ctx) {
+  void HandleTranslationUnit(ASTContext &Ctx) override {
     PCHGenerator::HandleTranslationUnit(Ctx);
     if (hasEmittedPCH()) {
       // Translate the top-level declarations we captured during
@@ -1120,7 +1055,7 @@
   }
   
   // Create the compiler instance to use for building the AST.
-  OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+  std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
 
   // Recover resources if we crash before exiting this method.
   llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
@@ -1206,10 +1141,10 @@
     // Keep track of the override buffer;
     SavedMainFileBuffer = OverrideMainBuffer;
   }
-  
-  OwningPtr<TopLevelDeclTrackerAction> Act(
-    new TopLevelDeclTrackerAction(*this));
-    
+
+  std::unique_ptr<TopLevelDeclTrackerAction> Act(
+      new TopLevelDeclTrackerAction(*this));
+
   // Recover resources if we crash before exiting this method.
   llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
     ActCleanup(Act.get());
@@ -1219,9 +1154,8 @@
 
   if (OverrideMainBuffer) {
     std::string ModName = getPreambleFile(this);
-    TranslateStoredDiagnostics(Clang->getModuleManager(), ModName,
-                               getSourceManager(), PreambleDiagnostics,
-                               StoredDiagnostics);
+    TranslateStoredDiagnostics(getFileManager(), getSourceManager(),
+                               PreambleDiagnostics, StoredDiagnostics);
   }
 
   if (!Act->Execute())
@@ -1345,20 +1279,83 @@
                                                        MaxLines));
 }
 
-static llvm::MemoryBuffer *CreatePaddedMainFileBuffer(llvm::MemoryBuffer *Old,
-                                                      unsigned NewSize,
-                                                      StringRef NewName) {
-  llvm::MemoryBuffer *Result
-    = llvm::MemoryBuffer::getNewUninitMemBuffer(NewSize, NewName);
-  memcpy(const_cast<char*>(Result->getBufferStart()), 
-         Old->getBufferStart(), Old->getBufferSize());
-  memset(const_cast<char*>(Result->getBufferStart()) + Old->getBufferSize(), 
-         ' ', NewSize - Old->getBufferSize() - 1);
-  const_cast<char*>(Result->getBufferEnd())[-1] = '\n';  
-  
+ASTUnit::PreambleFileHash
+ASTUnit::PreambleFileHash::createForFile(off_t Size, time_t ModTime) {
+  PreambleFileHash Result;
+  Result.Size = Size;
+  Result.ModTime = ModTime;
+  memset(Result.MD5, 0, sizeof(Result.MD5));
   return Result;
 }
 
+ASTUnit::PreambleFileHash ASTUnit::PreambleFileHash::createForMemoryBuffer(
+    const llvm::MemoryBuffer *Buffer) {
+  PreambleFileHash Result;
+  Result.Size = Buffer->getBufferSize();
+  Result.ModTime = 0;
+
+  llvm::MD5 MD5Ctx;
+  MD5Ctx.update(Buffer->getBuffer().data());
+  MD5Ctx.final(Result.MD5);
+
+  return Result;
+}
+
+namespace clang {
+bool operator==(const ASTUnit::PreambleFileHash &LHS,
+                const ASTUnit::PreambleFileHash &RHS) {
+  return LHS.Size == RHS.Size && LHS.ModTime == RHS.ModTime &&
+         memcmp(LHS.MD5, RHS.MD5, sizeof(LHS.MD5)) == 0;
+}
+} // namespace clang
+
+static std::pair<unsigned, unsigned>
+makeStandaloneRange(CharSourceRange Range, const SourceManager &SM,
+                    const LangOptions &LangOpts) {
+  CharSourceRange FileRange = Lexer::makeFileCharRange(Range, SM, LangOpts);
+  unsigned Offset = SM.getFileOffset(FileRange.getBegin());
+  unsigned EndOffset = SM.getFileOffset(FileRange.getEnd());
+  return std::make_pair(Offset, EndOffset);
+}
+
+static void makeStandaloneFixIt(const SourceManager &SM,
+                                const LangOptions &LangOpts,
+                                const FixItHint &InFix,
+                                ASTUnit::StandaloneFixIt &OutFix) {
+  OutFix.RemoveRange = makeStandaloneRange(InFix.RemoveRange, SM, LangOpts);
+  OutFix.InsertFromRange = makeStandaloneRange(InFix.InsertFromRange, SM,
+                                               LangOpts);
+  OutFix.CodeToInsert = InFix.CodeToInsert;
+  OutFix.BeforePreviousInsertions = InFix.BeforePreviousInsertions;
+}
+
+static void makeStandaloneDiagnostic(const LangOptions &LangOpts,
+                                     const StoredDiagnostic &InDiag,
+                                     ASTUnit::StandaloneDiagnostic &OutDiag) {
+  OutDiag.ID = InDiag.getID();
+  OutDiag.Level = InDiag.getLevel();
+  OutDiag.Message = InDiag.getMessage();
+  OutDiag.LocOffset = 0;
+  if (InDiag.getLocation().isInvalid())
+    return;
+  const SourceManager &SM = InDiag.getLocation().getManager();
+  SourceLocation FileLoc = SM.getFileLoc(InDiag.getLocation());
+  OutDiag.Filename = SM.getFilename(FileLoc);
+  if (OutDiag.Filename.empty())
+    return;
+  OutDiag.LocOffset = SM.getFileOffset(FileLoc);
+  for (StoredDiagnostic::range_iterator
+         I = InDiag.range_begin(), E = InDiag.range_end(); I != E; ++I) {
+    OutDiag.Ranges.push_back(makeStandaloneRange(*I, SM, LangOpts));
+  }
+  for (StoredDiagnostic::fixit_iterator
+         I = InDiag.fixit_begin(), E = InDiag.fixit_end(); I != E; ++I) {
+    ASTUnit::StandaloneFixIt Fix;
+    makeStandaloneFixIt(SM, LangOpts, *I, Fix);
+    OutDiag.FixIts.push_back(Fix);
+  }
+}
+
 /// \brief Attempt to build or re-use a precompiled preamble when (re-)parsing
 /// the source file.
 ///
@@ -1395,7 +1392,7 @@
     = ComputePreamble(*PreambleInvocation, MaxLines, CreatedPreambleBuffer);
 
   // If ComputePreamble() Take ownership of the preamble buffer.
-  OwningPtr<llvm::MemoryBuffer> OwnedPreambleBuffer;
+  std::unique_ptr<llvm::MemoryBuffer> OwnedPreambleBuffer;
   if (CreatedPreambleBuffer)
     OwnedPreambleBuffer.reset(NewPreamble.first);
 
@@ -1417,7 +1414,6 @@
     // new main file.
     if (Preamble.size() == NewPreamble.second.first &&
         PreambleEndsAtStartOfLine == NewPreamble.second.second &&
-        NewPreamble.first->getBufferSize() < PreambleReservedSize-2 &&
         memcmp(Preamble.getBufferStart(), NewPreamble.first->getBufferStart(),
                NewPreamble.second.first) == 0) {
       // The preamble has not changed. We may be able to re-use the precompiled
@@ -1428,13 +1424,13 @@
           
       // First, make a record of those files that have been overridden via
       // remapping or unsaved_files.
-      llvm::StringMap<std::pair<off_t, time_t> > OverriddenFiles;
+      llvm::StringMap<PreambleFileHash> OverriddenFiles;
       for (PreprocessorOptions::remapped_file_iterator
                 R = PreprocessorOpts.remapped_file_begin(),
              REnd = PreprocessorOpts.remapped_file_end();
            !AnyFileChanged && R != REnd;
            ++R) {
-        llvm::sys::fs::file_status Status;
+        vfs::Status Status;
         if (FileMgr->getNoncachedStatValue(R->second, Status)) {
           // If we can't stat the file we're remapping to, assume that something
           // horrible happened.
@@ -1442,7 +1438,7 @@
           break;
         }
 
-        OverriddenFiles[R->first] = std::make_pair(
+        OverriddenFiles[R->first] = PreambleFileHash::createForFile(
             Status.getSize(), Status.getLastModificationTime().toEpochTime());
       }
       for (PreprocessorOptions::remapped_file_buffer_iterator
@@ -1450,18 +1446,16 @@
              REnd = PreprocessorOpts.remapped_file_buffer_end();
            !AnyFileChanged && R != REnd;
            ++R) {
-        // FIXME: Should we actually compare the contents of file->buffer
-        // remappings?
-        OverriddenFiles[R->first] = std::make_pair(R->second->getBufferSize(), 
-                                                   0);
+        OverriddenFiles[R->first] =
+            PreambleFileHash::createForMemoryBuffer(R->second);
       }
        
       // Check whether anything has changed.
-      for (llvm::StringMap<std::pair<off_t, time_t> >::iterator 
+      for (llvm::StringMap<PreambleFileHash>::iterator 
              F = FilesInPreamble.begin(), FEnd = FilesInPreamble.end();
            !AnyFileChanged && F != FEnd; 
            ++F) {
-        llvm::StringMap<std::pair<off_t, time_t> >::iterator Overridden
+        llvm::StringMap<PreambleFileHash>::iterator Overridden
           = OverriddenFiles.find(F->first());
         if (Overridden != OverriddenFiles.end()) {
           // This file was remapped; check whether the newly-mapped file 
@@ -1472,13 +1466,13 @@
         }
         
         // The file was not remapped; check whether it has changed on disk.
-        llvm::sys::fs::file_status Status;
+        vfs::Status Status;
         if (FileMgr->getNoncachedStatValue(F->first(), Status)) {
           // If we can't stat the file, assume that something horrible happened.
           AnyFileChanged = true;
-        } else if (Status.getSize() != uint64_t(F->second.first) ||
+        } else if (Status.getSize() != uint64_t(F->second.Size) ||
                    Status.getLastModificationTime().toEpochTime() !=
-                       uint64_t(F->second.second))
+                       uint64_t(F->second.ModTime))
           AnyFileChanged = true;
       }
           
@@ -1492,11 +1486,8 @@
                               PreambleInvocation->getDiagnosticOpts());
         getDiagnostics().setNumWarnings(NumWarningsInPreamble);
 
-        // Create a version of the main file buffer that is padded to
-        // buffer size we reserved when creating the preamble.
-        return CreatePaddedMainFileBuffer(NewPreamble.first, 
-                                          PreambleReservedSize,
-                                          FrontendOpts.Inputs[0].getFile());
+        return llvm::MemoryBuffer::getMemBufferCopy(
+            NewPreamble.first->getBuffer(), FrontendOpts.Inputs[0].getFile());
       }
     }
 
@@ -1536,20 +1527,10 @@
   // We did not previously compute a preamble, or it can't be reused anyway.
   SimpleTimer PreambleTimer(WantTiming);
   PreambleTimer.setOutput("Precompiling preamble");
-  
-  // Create a new buffer that stores the preamble. The buffer also contains
-  // extra space for the original contents of the file (which will be present
-  // when we actually parse the file) along with more room in case the file
-  // grows.  
-  PreambleReservedSize = NewPreamble.first->getBufferSize();
-  if (PreambleReservedSize < 4096)
-    PreambleReservedSize = 8191;
-  else
-    PreambleReservedSize *= 2;
 
   // Save the preamble text for later; we'll need to compare against it for
   // subsequent reparses.
-  StringRef MainFilename = PreambleInvocation->getFrontendOpts().Inputs[0].getFile();
+  StringRef MainFilename = FrontendOpts.Inputs[0].getFile();
   Preamble.assign(FileMgr->getFile(MainFilename),
                   NewPreamble.first->getBufferStart(), 
                   NewPreamble.first->getBufferStart() 
@@ -1558,13 +1539,8 @@
 
   delete PreambleBuffer;
   PreambleBuffer
-    = llvm::MemoryBuffer::getNewUninitMemBuffer(PreambleReservedSize,
-                                                FrontendOpts.Inputs[0].getFile());
-  memcpy(const_cast<char*>(PreambleBuffer->getBufferStart()), 
-         NewPreamble.first->getBufferStart(), Preamble.size());
-  memset(const_cast<char*>(PreambleBuffer->getBufferStart()) + Preamble.size(), 
-         ' ', PreambleReservedSize - Preamble.size() - 1);
-  const_cast<char*>(PreambleBuffer->getBufferEnd())[-1] = '\n';  
+    = llvm::MemoryBuffer::getMemBufferCopy(
+        NewPreamble.first->getBuffer().slice(0, Preamble.size()), MainFilename);
 
   // Remap the main source file to the preamble buffer.
   StringRef MainFilePath = FrontendOpts.Inputs[0].getFile();
@@ -1578,7 +1554,7 @@
   PreprocessorOpts.PrecompiledPreambleBytes.second = false;
   
   // Create the compiler instance to use for building the precompiled preamble.
-  OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+  std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
 
   // Recover resources if we crash before exiting this method.
   llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
@@ -1621,6 +1597,7 @@
   checkAndRemoveNonDriverDiags(StoredDiagnostics);
   TopLevelDecls.clear();
   TopLevelDeclsInPreamble.clear();
+  PreambleDiagnostics.clear();
   
   // Create a file manager object to provide access to and cache the filesystem.
   Clang->setFileManager(new FileManager(Clang->getFileSystemOpts()));
@@ -1628,8 +1605,8 @@
   // Create the source manager.
   Clang->setSourceManager(new SourceManager(getDiagnostics(),
                                             Clang->getFileManager()));
-  
-  OwningPtr<PrecompilePreambleAction> Act;
+
+  std::unique_ptr<PrecompilePreambleAction> Act;
   Act.reset(new PrecompilePreambleAction(*this));
   if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
     llvm::sys::fs::remove(FrontendOpts.OutputFile);
@@ -1641,8 +1618,21 @@
   }
   
   Act->Execute();
+
+  // Transfer any diagnostics generated when parsing the preamble into the set
+  // of preamble diagnostics.
+  for (stored_diag_iterator
+         I = stored_diag_afterDriver_begin(),
+         E = stored_diag_end(); I != E; ++I) {
+    StandaloneDiagnostic Diag;
+    makeStandaloneDiagnostic(Clang->getLangOpts(), *I, Diag);
+    PreambleDiagnostics.push_back(Diag);
+  }
+
   Act->EndSourceFile();
 
+  checkAndRemoveNonDriverDiags(StoredDiagnostics);
+
   if (!Act->hasEmittedPreamblePCH()) {
     // The preamble PCH failed (e.g. there was a module loading fatal error),
     // so no precompiled header was generated. Forget that we even tried.
@@ -1656,13 +1646,6 @@
     return 0;
   }
   
-  // Transfer any diagnostics generated when parsing the preamble into the set
-  // of preamble diagnostics.
-  PreambleDiagnostics.clear();
-  PreambleDiagnostics.insert(PreambleDiagnostics.end(), 
-                            stored_diag_afterDriver_begin(), stored_diag_end());
-  checkAndRemoveNonDriverDiags(StoredDiagnostics);
-  
   // Keep track of the preamble we precompiled.
   setPreambleFile(this, FrontendOpts.OutputFile);
   NumWarningsInPreamble = getDiagnostics().getNumWarnings();
@@ -1678,11 +1661,20 @@
        F != FEnd;
        ++F) {
     const FileEntry *File = F->second->OrigEntry;
-    if (!File || F->second->getRawBuffer() == MainFileBuffer)
+    if (!File)
       continue;
-    
-    FilesInPreamble[File->getName()]
-      = std::make_pair(F->second->getSize(), File->getModificationTime());
+    const llvm::MemoryBuffer *Buffer = F->second->getRawBuffer();
+    if (Buffer == MainFileBuffer)
+      continue;
+
+    if (time_t ModTime = File->getModificationTime()) {
+      FilesInPreamble[File->getName()] = PreambleFileHash::createForFile(
+          F->second->getSize(), ModTime);
+    } else {
+      assert(F->second->getSize() == Buffer->getBufferSize());
+      FilesInPreamble[File->getName()] =
+          PreambleFileHash::createForMemoryBuffer(Buffer);
+    }
   }
   
   PreambleRebuildCounter = 1;
@@ -1697,9 +1689,8 @@
     PreambleTopLevelHashValue = CurrentTopLevelHashValue;
   }
   
-  return CreatePaddedMainFileBuffer(NewPreamble.first, 
-                                    PreambleReservedSize,
-                                    FrontendOpts.Inputs[0].getFile());
+  return llvm::MemoryBuffer::getMemBufferCopy(NewPreamble.first->getBuffer(),
+                                              MainFilename);
 }
 
 void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
@@ -1761,7 +1752,7 @@
                          IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
                          bool CaptureDiagnostics,
                          bool UserFilesAreVolatile) {
-  OwningPtr<ASTUnit> AST;
+  std::unique_ptr<ASTUnit> AST;
   AST.reset(new ASTUnit(false));
   ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
   AST->Diagnostics = Diags;
@@ -1772,25 +1763,19 @@
   AST->SourceMgr = new SourceManager(AST->getDiagnostics(), *AST->FileMgr,
                                      UserFilesAreVolatile);
 
-  return AST.take();
+  return AST.release();
 }
 
-ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
-                              IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
-                                             ASTFrontendAction *Action,
-                                             ASTUnit *Unit,
-                                             bool Persistent,
-                                             StringRef ResourceFilesPath,
-                                             bool OnlyLocalDecls,
-                                             bool CaptureDiagnostics,
-                                             bool PrecompilePreamble,
-                                             bool CacheCodeCompletionResults,
-                                    bool IncludeBriefCommentsInCodeCompletion,
-                                             bool UserFilesAreVolatile,
-                                             OwningPtr<ASTUnit> *ErrAST) {
+ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
+    CompilerInvocation *CI, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+    ASTFrontendAction *Action, ASTUnit *Unit, bool Persistent,
+    StringRef ResourceFilesPath, bool OnlyLocalDecls, bool CaptureDiagnostics,
+    bool PrecompilePreamble, bool CacheCodeCompletionResults,
+    bool IncludeBriefCommentsInCodeCompletion, bool UserFilesAreVolatile,
+    std::unique_ptr<ASTUnit> *ErrAST) {
   assert(CI && "A CompilerInvocation is required");
 
-  OwningPtr<ASTUnit> OwnAST;
+  std::unique_ptr<ASTUnit> OwnAST;
   ASTUnit *AST = Unit;
   if (!AST) {
     // Create the AST unit.
@@ -1824,7 +1809,7 @@
   ProcessWarningOptions(AST->getDiagnostics(), CI->getDiagnosticOpts());
 
   // Create the compiler instance to use for building the AST.
-  OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+  std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
 
   // Recover resources if we crash before exiting this method.
   llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
@@ -1870,7 +1855,7 @@
 
   ASTFrontendAction *Act = Action;
 
-  OwningPtr<TopLevelDeclTrackerAction> TrackerAct;
+  std::unique_ptr<TopLevelDeclTrackerAction> TrackerAct;
   if (!Act) {
     TrackerAct.reset(new TopLevelDeclTrackerAction(*AST));
     Act = TrackerAct.get();
@@ -1912,7 +1897,7 @@
   Act->EndSourceFile();
 
   if (OwnAST)
-    return OwnAST.take();
+    return OwnAST.release();
   else
     return AST;
 }
@@ -1953,7 +1938,7 @@
                                     bool IncludeBriefCommentsInCodeCompletion,
                                              bool UserFilesAreVolatile) {
   // Create the AST unit.
-  OwningPtr<ASTUnit> AST;
+  std::unique_ptr<ASTUnit> AST;
   AST.reset(new ASTUnit(false));
   ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
   AST->Diagnostics = Diags;
@@ -1975,27 +1960,20 @@
     llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
     DiagCleanup(Diags.getPtr());
 
-  return AST->LoadFromCompilerInvocation(PrecompilePreamble)? 0 : AST.take();
+  return AST->LoadFromCompilerInvocation(PrecompilePreamble) ? 0
+                                                             : AST.release();
 }
 
-ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
-                                      const char **ArgEnd,
-                                    IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
-                                      StringRef ResourceFilesPath,
-                                      bool OnlyLocalDecls,
-                                      bool CaptureDiagnostics,
-                                      RemappedFile *RemappedFiles,
-                                      unsigned NumRemappedFiles,
-                                      bool RemappedFilesKeepOriginalName,
-                                      bool PrecompilePreamble,
-                                      TranslationUnitKind TUKind,
-                                      bool CacheCodeCompletionResults,
-                                      bool IncludeBriefCommentsInCodeCompletion,
-                                      bool AllowPCHWithCompilerErrors,
-                                      bool SkipFunctionBodies,
-                                      bool UserFilesAreVolatile,
-                                      bool ForSerialization,
-                                      OwningPtr<ASTUnit> *ErrAST) {
+ASTUnit *ASTUnit::LoadFromCommandLine(
+    const char **ArgBegin, const char **ArgEnd,
+    IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
+    bool OnlyLocalDecls, bool CaptureDiagnostics,
+    ArrayRef<RemappedFile> RemappedFiles, bool RemappedFilesKeepOriginalName,
+    bool PrecompilePreamble, TranslationUnitKind TUKind,
+    bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
+    bool AllowPCHWithCompilerErrors, bool SkipFunctionBodies,
+    bool UserFilesAreVolatile, bool ForSerialization,
+    std::unique_ptr<ASTUnit> *ErrAST) {
   if (!Diags.getPtr()) {
     // No diagnostics engine was provided, so create our own diagnostics object
     // with the default options.
@@ -2019,15 +1997,9 @@
   }
 
   // Override any files that need remapping
-  for (unsigned I = 0; I != NumRemappedFiles; ++I) {
-    FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
-    if (const llvm::MemoryBuffer *
-            memBuf = fileOrBuf.dyn_cast<const llvm::MemoryBuffer *>()) {
-      CI->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first, memBuf);
-    } else {
-      const char *fname = fileOrBuf.get<const char *>();
-      CI->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first, fname);
-    }
+  for (unsigned I = 0, N = RemappedFiles.size(); I != N; ++I) {
+    CI->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first,
+                                              RemappedFiles[I].second);
   }
   PreprocessorOptions &PPOpts = CI->getPreprocessorOpts();
   PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName;
@@ -2039,7 +2011,7 @@
   CI->getFrontendOpts().SkipFunctionBodies = SkipFunctionBodies;
 
   // Create the AST unit.
-  OwningPtr<ASTUnit> AST;
+  std::unique_ptr<ASTUnit> AST;
   AST.reset(new ASTUnit(false));
   ConfigureDiags(Diags, ArgBegin, ArgEnd, *AST, CaptureDiagnostics);
   AST->Diagnostics = Diags;
@@ -2074,10 +2046,10 @@
     return 0;
   }
 
-  return AST.take();
+  return AST.release();
 }
 
-bool ASTUnit::Reparse(RemappedFile *RemappedFiles, unsigned NumRemappedFiles) {
+bool ASTUnit::Reparse(ArrayRef<RemappedFile> RemappedFiles) {
   if (!Invocation)
     return true;
 
@@ -2096,19 +2068,11 @@
     delete R->second;
   }
   Invocation->getPreprocessorOpts().clearRemappedFiles();
-  for (unsigned I = 0; I != NumRemappedFiles; ++I) {
-    FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
-    if (const llvm::MemoryBuffer *
-            memBuf = fileOrBuf.dyn_cast<const llvm::MemoryBuffer *>()) {
-      Invocation->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first,
-                                                        memBuf);
-    } else {
-      const char *fname = fileOrBuf.get<const char *>();
-      Invocation->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first,
-                                                        fname);
-    }
+  for (unsigned I = 0, N = RemappedFiles.size(); I != N; ++I) {
+    Invocation->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first,
+                                                      RemappedFiles[I].second);
   }
-  
+
   // If we have a preamble file lying around, or if we might try to
   // build a precompiled preamble, do so now.
   llvm::MemoryBuffer *OverrideMainBuffer = 0;
@@ -2178,23 +2142,22 @@
                        |  (1LL << CodeCompletionContext::CCC_UnionTag)
                        |  (1LL << CodeCompletionContext::CCC_ClassOrStructTag);
     }
-    
-    virtual void ProcessCodeCompleteResults(Sema &S, 
-                                            CodeCompletionContext Context,
-                                            CodeCompletionResult *Results,
-                                            unsigned NumResults);
-    
-    virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
-                                           OverloadCandidate *Candidates,
-                                           unsigned NumCandidates) { 
+
+    void ProcessCodeCompleteResults(Sema &S, CodeCompletionContext Context,
+                                    CodeCompletionResult *Results,
+                                    unsigned NumResults) override;
+
+    void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
+                                   OverloadCandidate *Candidates,
+                                   unsigned NumCandidates) override {
       Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates);
     }
-    
-    virtual CodeCompletionAllocator &getAllocator() {
+
+    CodeCompletionAllocator &getAllocator() override {
       return Next.getAllocator();
     }
 
-    virtual CodeCompletionTUInfo &getCodeCompletionTUInfo() {
+    CodeCompletionTUInfo &getCodeCompletionTUInfo() override {
       return Next.getCodeCompletionTUInfo();
     }
   };
@@ -2378,8 +2341,7 @@
 
 
 void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
-                           RemappedFile *RemappedFiles, 
-                           unsigned NumRemappedFiles,
+                           ArrayRef<RemappedFile> RemappedFiles,
                            bool IncludeMacros, 
                            bool IncludeCodePatterns,
                            bool IncludeBriefComments,
@@ -2417,7 +2379,7 @@
   // Set the language options appropriately.
   LangOpts = *CCInvocation->getLangOpts();
 
-  OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+  std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
 
   // Recover resources if we crash before exiting this method.
   llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
@@ -2462,18 +2424,12 @@
   // Remap files.
   PreprocessorOpts.clearRemappedFiles();
   PreprocessorOpts.RetainRemappedFileBuffers = true;
-  for (unsigned I = 0; I != NumRemappedFiles; ++I) {
-    FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
-    if (const llvm::MemoryBuffer *
-            memBuf = fileOrBuf.dyn_cast<const llvm::MemoryBuffer *>()) {
-      PreprocessorOpts.addRemappedFile(RemappedFiles[I].first, memBuf);
-      OwnedBuffers.push_back(memBuf);
-    } else {
-      const char *fname = fileOrBuf.get<const char *>();
-      PreprocessorOpts.addRemappedFile(RemappedFiles[I].first, fname);
-    }
+  for (unsigned I = 0, N = RemappedFiles.size(); I != N; ++I) {
+    PreprocessorOpts.addRemappedFile(RemappedFiles[I].first,
+                                     RemappedFiles[I].second);
+    OwnedBuffers.push_back(RemappedFiles[I].second);
   }
-  
+
   // Use the code completion consumer we were given, but adding any cached
   // code-completion results.
   AugmentedCodeCompleteConsumer *AugmentedConsumer
@@ -2520,8 +2476,8 @@
   // Disable the preprocessing record if modules are not enabled.
   if (!Clang->getLangOpts().Modules)
     PreprocessorOpts.DetailedRecord = false;
-  
-  OwningPtr<SyntaxOnlyAction> Act;
+
+  std::unique_ptr<SyntaxOnlyAction> Act;
   Act.reset(new SyntaxOnlyAction);
   if (Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
     Act->Execute();
@@ -2554,8 +2510,7 @@
   }
 
   if (llvm::sys::fs::rename(TempPath.str(), File)) {
-    bool exists;
-    llvm::sys::fs::remove(TempPath.str(), exists);
+    llvm::sys::fs::remove(TempPath.str());
     return true;
   }
 
@@ -2591,68 +2546,57 @@
 
 typedef ContinuousRangeMap<unsigned, int, 2> SLocRemap;
 
-static void TranslateSLoc(SourceLocation &L, SLocRemap &Remap) {
-  unsigned Raw = L.getRawEncoding();
-  const unsigned MacroBit = 1U << 31;
-  L = SourceLocation::getFromRawEncoding((Raw & MacroBit) |
-      ((Raw & ~MacroBit) + Remap.find(Raw & ~MacroBit)->second));
-}
-
 void ASTUnit::TranslateStoredDiagnostics(
-                          ASTReader *MMan,
-                          StringRef ModName,
+                          FileManager &FileMgr,
                           SourceManager &SrcMgr,
-                          const SmallVectorImpl<StoredDiagnostic> &Diags,
+                          const SmallVectorImpl<StandaloneDiagnostic> &Diags,
                           SmallVectorImpl<StoredDiagnostic> &Out) {
-  // The stored diagnostic has the old source manager in it; update
-  // the locations to refer into the new source manager. We also need to remap
-  // all the locations to the new view. This includes the diag location, any
-  // associated source ranges, and the source ranges of associated fix-its.
+  // Map the standalone diagnostic into the new source manager. We also need to
+  // remap all the locations to the new view. This includes the diag location,
+  // any associated source ranges, and the source ranges of associated fix-its.
   // FIXME: There should be a cleaner way to do this.
 
   SmallVector<StoredDiagnostic, 4> Result;
   Result.reserve(Diags.size());
-  assert(MMan && "Don't have a module manager");
-  serialization::ModuleFile *Mod = MMan->ModuleMgr.lookup(ModName);
-  assert(Mod && "Don't have preamble module");
-  SLocRemap &Remap = Mod->SLocRemap;
   for (unsigned I = 0, N = Diags.size(); I != N; ++I) {
     // Rebuild the StoredDiagnostic.
-    const StoredDiagnostic &SD = Diags[I];
-    SourceLocation L = SD.getLocation();
-    TranslateSLoc(L, Remap);
+    const StandaloneDiagnostic &SD = Diags[I];
+    if (SD.Filename.empty())
+      continue;
+    const FileEntry *FE = FileMgr.getFile(SD.Filename);
+    if (!FE)
+      continue;
+    FileID FID = SrcMgr.translateFile(FE);
+    SourceLocation FileLoc = SrcMgr.getLocForStartOfFile(FID);
+    if (FileLoc.isInvalid())
+      continue;
+    SourceLocation L = FileLoc.getLocWithOffset(SD.LocOffset);
     FullSourceLoc Loc(L, SrcMgr);
 
     SmallVector<CharSourceRange, 4> Ranges;
-    Ranges.reserve(SD.range_size());
-    for (StoredDiagnostic::range_iterator I = SD.range_begin(),
-                                          E = SD.range_end();
-         I != E; ++I) {
-      SourceLocation BL = I->getBegin();
-      TranslateSLoc(BL, Remap);
-      SourceLocation EL = I->getEnd();
-      TranslateSLoc(EL, Remap);
-      Ranges.push_back(CharSourceRange(SourceRange(BL, EL), I->isTokenRange()));
+    Ranges.reserve(SD.Ranges.size());
+    for (std::vector<std::pair<unsigned, unsigned> >::const_iterator
+           I = SD.Ranges.begin(), E = SD.Ranges.end(); I != E; ++I) {
+      SourceLocation BL = FileLoc.getLocWithOffset((*I).first);
+      SourceLocation EL = FileLoc.getLocWithOffset((*I).second);
+      Ranges.push_back(CharSourceRange::getCharRange(BL, EL));
     }
 
     SmallVector<FixItHint, 2> FixIts;
-    FixIts.reserve(SD.fixit_size());
-    for (StoredDiagnostic::fixit_iterator I = SD.fixit_begin(),
-                                          E = SD.fixit_end();
+    FixIts.reserve(SD.FixIts.size());
+    for (std::vector<StandaloneFixIt>::const_iterator
+           I = SD.FixIts.begin(), E = SD.FixIts.end();
          I != E; ++I) {
       FixIts.push_back(FixItHint());
       FixItHint &FH = FixIts.back();
       FH.CodeToInsert = I->CodeToInsert;
-      SourceLocation BL = I->RemoveRange.getBegin();
-      TranslateSLoc(BL, Remap);
-      SourceLocation EL = I->RemoveRange.getEnd();
-      TranslateSLoc(EL, Remap);
-      FH.RemoveRange = CharSourceRange(SourceRange(BL, EL),
-                                       I->RemoveRange.isTokenRange());
+      SourceLocation BL = FileLoc.getLocWithOffset(I->RemoveRange.first);
+      SourceLocation EL = FileLoc.getLocWithOffset(I->RemoveRange.second);
+      FH.RemoveRange = CharSourceRange::getCharRange(BL, EL);
     }
 
-    Result.push_back(StoredDiagnostic(SD.getLevel(), SD.getID(), 
-                                      SD.getMessage(), Loc, Ranges, FixIts));
+    Result.push_back(StoredDiagnostic(SD.Level, SD.ID, 
+                                      SD.Message, Loc, Ranges, FixIts));
   }
   Result.swap(Out);
 }
@@ -2677,7 +2621,7 @@
   assert(SM.isLocalSourceLocation(FileLoc));
   FileID FID;
   unsigned Offset;
-  llvm::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
+  std::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
   if (FID.isInvalid())
     return;
 
@@ -2860,7 +2804,7 @@
     serialization::ModuleFile &
       Mod = Reader->getModuleManager().getPrimaryModule();
     ASTReader::ModuleDeclIterator MDI, MDE;
-    llvm::tie(MDI, MDE) = Reader->getModuleFileLevelDecls(Mod);
+    std::tie(MDI, MDE) = Reader->getModuleFileLevelDecls(Mod);
     for (; MDI != MDE; ++MDI) {
       if (!Fn(context, *MDI))
         return false;
@@ -2953,7 +2897,7 @@
 
 #else // NDEBUG
 
-ASTUnit::ConcurrencyState::ConcurrencyState() {}
+ASTUnit::ConcurrencyState::ConcurrencyState() { Mutex = 0; }
 ASTUnit::ConcurrencyState::~ConcurrencyState() {}
 void ASTUnit::ConcurrencyState::start() {}
 void ASTUnit::ConcurrencyState::finish() {}
diff --git a/lib/Frontend/Android.mk b/lib/Frontend/Android.mk
index 0839dec..c3c42c3 100644
--- a/lib/Frontend/Android.mk
+++ b/lib/Frontend/Android.mk
@@ -9,6 +9,7 @@
   AttrList.inc \
   Attrs.inc \
   AttrParsedAttrList.inc \
+  AttrVisitor.inc \
   CC1Options.inc \
   CommentCommandList.inc \
   CommentNodes.inc \
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index 0566d54..f1c7897 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -1,3 +1,8 @@
+set(LLVM_LINK_COMPONENTS
+  Option
+  Support
+  )
+
 add_clang_library(clangFrontend
   ASTConsumers.cpp
   ASTMerge.cpp
@@ -28,25 +33,11 @@
   TextDiagnosticPrinter.cpp
   VerifyDiagnosticConsumer.cpp
   Warnings.cpp
-  )
 
-add_dependencies(clangFrontend
-  ClangAttrClasses
-  ClangAttrList
-  ClangAttrParsedAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticAST
-  ClangDiagnosticCommon
-  ClangDiagnosticDriver
-  ClangDiagnosticFrontend
-  ClangDiagnosticLex
-  ClangDiagnosticSema
+  DEPENDS
   ClangDriverOptions
-  ClangStmtNodes
-  )
 
-target_link_libraries(clangFrontend
+  LINK_LIBS
   clangAST
   clangBasic
   clangDriver
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
index 0c30b04..2cb6582 100644
--- a/lib/Frontend/CacheTokens.cpp
+++ b/lib/Frontend/CacheTokens.cpp
@@ -23,6 +23,7 @@
 #include "clang/Lex/Preprocessor.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringMap.h"
+#include "llvm/Support/EndianStream.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/Path.h"
@@ -78,21 +79,23 @@
   unsigned getKind() const { return (unsigned) Kind; }
 
   void EmitData(raw_ostream& Out) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
     switch (Kind) {
     case IsFE: {
       // Emit stat information.
       llvm::sys::fs::UniqueID UID = FE->getUniqueID();
-      ::Emit64(Out, UID.getFile());
-      ::Emit64(Out, UID.getDevice());
-      ::Emit64(Out, FE->getModificationTime());
-      ::Emit64(Out, FE->getSize());
+      LE.write<uint64_t>(UID.getFile());
+      LE.write<uint64_t>(UID.getDevice());
+      LE.write<uint64_t>(FE->getModificationTime());
+      LE.write<uint64_t>(FE->getSize());
     } break;
     case IsDE:
       // Emit stat information.
-      ::Emit64(Out, Data->UniqueID.getFile());
-      ::Emit64(Out, Data->UniqueID.getDevice());
-      ::Emit64(Out, Data->ModTime);
-      ::Emit64(Out, Data->Size);
+      LE.write<uint64_t>(Data->UniqueID.getFile());
+      LE.write<uint64_t>(Data->UniqueID.getDevice());
+      LE.write<uint64_t>(Data->ModTime);
+      LE.write<uint64_t>(Data->Size);
       delete Data;
       break;
     default:
@@ -120,32 +123,36 @@
   static std::pair<unsigned,unsigned>
   EmitKeyDataLength(raw_ostream& Out, PTHEntryKeyVariant V,
                     const PTHEntry& E) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
 
     unsigned n = V.getString().size() + 1 + 1;
-    ::Emit16(Out, n);
+    LE.write<uint16_t>(n);
 
     unsigned m = V.getRepresentationLength() + (V.isFile() ? 4 + 4 : 0);
-    ::Emit8(Out, m);
+    LE.write<uint8_t>(m);
 
     return std::make_pair(n, m);
   }
 
   static void EmitKey(raw_ostream& Out, PTHEntryKeyVariant V, unsigned n){
+    using namespace llvm::support;
     // Emit the entry kind.
-    ::Emit8(Out, (unsigned) V.getKind());
+    endian::Writer<little>(Out).write<uint8_t>((unsigned)V.getKind());
     // Emit the string.
     Out.write(V.getString().data(), n - 1);
   }
 
   static void EmitData(raw_ostream& Out, PTHEntryKeyVariant V,
                        const PTHEntry& E, unsigned) {
-
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
 
     // For file entries emit the offsets into the PTH file for token data
     // and the preprocessor blocks table.
     if (V.isFile()) {
-      ::Emit32(Out, E.getTokenOffset());
-      ::Emit32(Out, E.getPPCondTableOffset());
+      LE.write<uint32_t>(E.getTokenOffset());
+      LE.write<uint32_t>(E.getPPCondTableOffset());
     }
 
     // Emit any other data associated with the key (i.e., stat information).
@@ -186,18 +193,28 @@
   /// Emit a token to the PTH file.
   void EmitToken(const Token& T);
 
-  void Emit8(uint32_t V) { ::Emit8(Out, V); }
+  void Emit8(uint32_t V) {
+    using namespace llvm::support;
+    endian::Writer<little>(Out).write<uint8_t>(V);
+  }
 
-  void Emit16(uint32_t V) { ::Emit16(Out, V); }
+  void Emit16(uint32_t V) {
+    using namespace llvm::support;
+    endian::Writer<little>(Out).write<uint16_t>(V);
+  }
 
-  void Emit32(uint32_t V) { ::Emit32(Out, V); }
+  void Emit32(uint32_t V) {
+    using namespace llvm::support;
+    endian::Writer<little>(Out).write<uint32_t>(V);
+  }
 
   void EmitBuf(const char *Ptr, unsigned NumBytes) {
     Out.write(Ptr, NumBytes);
   }
 
   void EmitString(StringRef V) {
-    ::Emit16(Out, V.size());
+    using namespace llvm::support;
+    endian::Writer<little>(Out).write<uint16_t>(V.size());
     EmitBuf(V.data(), V.size());
   }
 
@@ -516,8 +533,8 @@
   ~StatListener() {}
 
   LookupResult getStat(const char *Path, FileData &Data, bool isFile,
-                       int *FileDescriptor) {
-    LookupResult Result = statChained(Path, Data, isFile, FileDescriptor);
+                       vfs::File **F, vfs::FileSystem &FS) override {
+    LookupResult Result = statChained(Path, Data, isFile, F, FS);
 
     if (Result == CacheMissing) // Failed 'stat'.
       PM.insert(PTHEntryKeyVariant(Path), PTHEntry());
@@ -584,8 +601,9 @@
 
   static std::pair<unsigned,unsigned>
   EmitKeyDataLength(raw_ostream& Out, const PTHIdKey* key, uint32_t) {
+    using namespace llvm::support;
     unsigned n = key->II->getLength() + 1;
-    ::Emit16(Out, n);
+    endian::Writer<little>(Out).write<uint16_t>(n);
     return std::make_pair(n, sizeof(uint32_t));
   }
 
@@ -598,7 +616,8 @@
 
   static void EmitData(raw_ostream& Out, PTHIdKey*, uint32_t pID,
                        unsigned) {
-    ::Emit32(Out, pID);
+    using namespace llvm::support;
+    endian::Writer<little>(Out).write<uint32_t>(pID);
   }
 };
 } // end anonymous namespace
diff --git a/lib/Frontend/ChainedIncludesSource.cpp b/lib/Frontend/ChainedIncludesSource.cpp
index 442177e..6741160 100644
--- a/lib/Frontend/ChainedIncludesSource.cpp
+++ b/lib/Frontend/ChainedIncludesSource.cpp
@@ -31,7 +31,7 @@
                              SmallVectorImpl<std::string> &bufNames,
                              ASTDeserializationListener *deserialListener = 0) {
   Preprocessor &PP = CI.getPreprocessor();
-  OwningPtr<ASTReader> Reader;
+  std::unique_ptr<ASTReader> Reader;
   Reader.reset(new ASTReader(PP, CI.getASTContext(), /*isysroot=*/"",
                              /*DisableValidation=*/true));
   for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
@@ -44,7 +44,7 @@
   case ASTReader::Success:
     // Set the predefines buffer as suggested by the PCH reader.
     PP.setPredefines(Reader->getSuggestedPredefines());
-    return Reader.take();
+    return Reader.release();
 
   case ASTReader::Failure:
   case ASTReader::Missing:
@@ -62,12 +62,13 @@
     delete CIs[i];
 }
 
-ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
+IntrusiveRefCntPtr<ChainedIncludesSource>
+ChainedIncludesSource::create(CompilerInstance &CI) {
 
   std::vector<std::string> &includes = CI.getPreprocessorOpts().ChainedIncludes;
   assert(!includes.empty() && "No '-chain-include' in options!");
 
-  OwningPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
+  IntrusiveRefCntPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
   InputKind IK = CI.getFrontendOpts().Inputs[0].getKind();
 
   SmallVector<llvm::MemoryBuffer *, 4> serialBufs;
@@ -75,7 +76,7 @@
 
   for (unsigned i = 0, e = includes.size(); i != e; ++i) {
     bool firstInclude = (i == 0);
-    OwningPtr<CompilerInvocation> CInvok;
+    std::unique_ptr<CompilerInvocation> CInvok;
     CInvok.reset(new CompilerInvocation(CI.getInvocation()));
     
     CInvok->getPreprocessorOpts().ChainedIncludes.clear();
@@ -96,26 +97,26 @@
     IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
         new DiagnosticsEngine(DiagID, &CI.getDiagnosticOpts(), DiagClient));
 
-    OwningPtr<CompilerInstance> Clang(new CompilerInstance());
-    Clang->setInvocation(CInvok.take());
+    std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
+    Clang->setInvocation(CInvok.release());
     Clang->setDiagnostics(Diags.getPtr());
     Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
                                                   &Clang->getTargetOpts()));
     Clang->createFileManager();
     Clang->createSourceManager(Clang->getFileManager());
-    Clang->createPreprocessor();
+    Clang->createPreprocessor(TU_Prefix);
     Clang->getDiagnosticClient().BeginSourceFile(Clang->getLangOpts(),
                                                  &Clang->getPreprocessor());
     Clang->createASTContext();
 
     SmallVector<char, 256> serialAST;
     llvm::raw_svector_ostream OS(serialAST);
-    OwningPtr<ASTConsumer> consumer;
+    std::unique_ptr<ASTConsumer> consumer;
     consumer.reset(new PCHGenerator(Clang->getPreprocessor(), "-", 0,
                                     /*isysroot=*/"", &OS));
     Clang->getASTContext().setASTMutationListener(
                                             consumer->GetASTMutationListener());
-    Clang->setASTConsumer(consumer.take());
+    Clang->setASTConsumer(consumer.release());
     Clang->createSema(TU_Prefix, 0);
 
     if (firstInclude) {
@@ -137,13 +138,12 @@
       
       serialBufNames.push_back(pchName);
 
-      OwningPtr<ExternalASTSource> Reader;
-
-      Reader.reset(createASTReader(*Clang, pchName, bufs, serialBufNames, 
-        Clang->getASTConsumer().GetASTDeserializationListener()));
+      IntrusiveRefCntPtr<ASTReader> Reader;
+      Reader = createASTReader(*Clang, pchName, bufs, serialBufNames, 
+        Clang->getASTConsumer().GetASTDeserializationListener());
       if (!Reader)
         return 0;
-      Clang->setModuleManager(static_cast<ASTReader*>(Reader.get()));
+      Clang->setModuleManager(Reader);
       Clang->getASTContext().setExternalSource(Reader);
     }
     
@@ -156,19 +156,19 @@
     serialBufs.push_back(
       llvm::MemoryBuffer::getMemBufferCopy(StringRef(serialAST.data(),
                                                            serialAST.size())));
-    source->CIs.push_back(Clang.take());
+    source->CIs.push_back(Clang.release());
   }
 
   assert(!serialBufs.empty());
   std::string pchName = includes.back() + ".pch-final";
   serialBufNames.push_back(pchName);
-  OwningPtr<ASTReader> Reader;
-  Reader.reset(createASTReader(CI, pchName, serialBufs, serialBufNames));
+  IntrusiveRefCntPtr<ASTReader> Reader;
+  Reader = createASTReader(CI, pchName, serialBufs, serialBufNames);
   if (!Reader)
     return 0;
 
-  source->FinalReader.reset(Reader.take());
-  return source.take();
+  source->FinalReader = Reader;
+  return source;
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index eccb94c..ce47674 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -79,6 +79,10 @@
 
 void CompilerInstance::setFileManager(FileManager *Value) {
   FileMgr = Value;
+  if (Value)
+    VirtualFileSystem = Value->getVirtualFileSystem();
+  else
+    VirtualFileSystem.reset();
 }
 
 void CompilerInstance::setSourceManager(SourceManager *Value) {
@@ -100,6 +104,13 @@
 void CompilerInstance::setCodeCompletionConsumer(CodeCompleteConsumer *Value) {
   CompletionConsumer.reset(Value);
 }
+ 
+IntrusiveRefCntPtr<ASTReader> CompilerInstance::getModuleManager() const {
+  return ModuleManager;
+}
+void CompilerInstance::setModuleManager(IntrusiveRefCntPtr<ASTReader> Reader) {
+  ModuleManager = Reader;
+}
 
 // Diagnostics
 static void SetUpDiagnosticLog(DiagnosticOptions *DiagOpts,
@@ -110,9 +121,9 @@
   raw_ostream *OS = &llvm::errs();
   if (DiagOpts->DiagnosticLogFile != "-") {
     // Create the output stream.
-    llvm::raw_fd_ostream *FileOS(
-        new llvm::raw_fd_ostream(DiagOpts->DiagnosticLogFile.c_str(), ErrorInfo,
-                                 llvm::sys::fs::F_Append));
+    llvm::raw_fd_ostream *FileOS(new llvm::raw_fd_ostream(
+        DiagOpts->DiagnosticLogFile.c_str(), ErrorInfo,
+        llvm::sys::fs::F_Append | llvm::sys::fs::F_Text));
     if (!ErrorInfo.empty()) {
       Diags.Report(diag::warn_fe_cc_log_diagnostics_failure)
         << DiagOpts->DiagnosticLogFile << ErrorInfo;
@@ -136,20 +147,19 @@
                                        DiagnosticsEngine &Diags,
                                        StringRef OutputFile) {
   std::string ErrorInfo;
-  OwningPtr<llvm::raw_fd_ostream> OS;
+  std::unique_ptr<llvm::raw_fd_ostream> OS;
   OS.reset(new llvm::raw_fd_ostream(OutputFile.str().c_str(), ErrorInfo,
-                                    llvm::sys::fs::F_Binary));
+                                    llvm::sys::fs::F_None));
 
   if (!ErrorInfo.empty()) {
     Diags.Report(diag::warn_fe_serialized_diag_failure)
       << OutputFile << ErrorInfo;
     return;
   }
-  
-  DiagnosticConsumer *SerializedConsumer =
-    clang::serialized_diags::create(OS.take(), DiagOpts);
 
-  
+  DiagnosticConsumer *SerializedConsumer =
+      clang::serialized_diags::create(OS.release(), DiagOpts);
+
   Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(),
                                                 SerializedConsumer));
 }
@@ -197,7 +207,11 @@
 // File Manager
 
 void CompilerInstance::createFileManager() {
-  FileMgr = new FileManager(getFileSystemOpts());
+  if (!hasVirtualFileSystem()) {
+    // TODO: choose the virtual file system based on the CompilerInvocation.
+    setVirtualFileSystem(vfs::getRealFileSystem());
+  }
+  FileMgr = new FileManager(getFileSystemOpts(), VirtualFileSystem);
 }
 
 // Source Manager
@@ -208,7 +222,7 @@
 
 // Preprocessor
 
-void CompilerInstance::createPreprocessor() {
+void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
   const PreprocessorOptions &PPOpts = getPreprocessorOpts();
 
   // Create a PTH manager if we are using some form of a token cache.
@@ -225,7 +239,10 @@
   PP = new Preprocessor(&getPreprocessorOpts(),
                         getDiagnostics(), getLangOpts(), &getTarget(),
                         getSourceManager(), *HeaderInfo, *this, PTHMgr,
-                        /*OwnsHeaderSearch=*/true);
+                        /*OwnsHeaderSearch=*/true,
+                        /*DelayInitialization=*/false,
+                        /*IncrProcessing=*/false,
+                        TUKind);
 
   // Note that this is different then passing PTHMgr to Preprocessor's ctor.
   // That argument is used as the IdentifierInfoLookup argument to
@@ -254,7 +271,8 @@
   // Handle generating dependencies, if requested.
   const DependencyOutputOptions &DepOpts = getDependencyOutputOpts();
   if (!DepOpts.OutputFile.empty())
-    AttachDependencyFileGen(*PP, DepOpts);
+    TheDependencyFileGenerator.reset(
+        DependencyFileGenerator::CreateAndAttachToPreprocessor(*PP, DepOpts));
   if (!DepOpts.DOTOutputFile.empty())
     AttachDependencyGraphGen(*PP, DepOpts.DOTOutputFile,
                              getHeaderSearchOpts().Sysroot);
@@ -293,16 +311,16 @@
                                                   bool DisablePCHValidation,
                                                 bool AllowPCHWithCompilerErrors,
                                                  void *DeserializationListener){
-  OwningPtr<ExternalASTSource> Source;
+  IntrusiveRefCntPtr<ExternalASTSource> Source;
   bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
-  Source.reset(createPCHExternalASTSource(Path, getHeaderSearchOpts().Sysroot,
+  Source = createPCHExternalASTSource(Path, getHeaderSearchOpts().Sysroot,
                                           DisablePCHValidation,
                                           AllowPCHWithCompilerErrors,
                                           getPreprocessor(), getASTContext(),
                                           DeserializationListener,
                                           Preamble,
-                                       getFrontendOpts().UseGlobalModuleIndex));
-  ModuleManager = static_cast<ASTReader*>(Source.get());
+                                       getFrontendOpts().UseGlobalModuleIndex);
+  ModuleManager = static_cast<ASTReader*>(Source.getPtr());
   getASTContext().setExternalSource(Source);
 }
 
@@ -316,11 +334,15 @@
                                              void *DeserializationListener,
                                              bool Preamble,
                                              bool UseGlobalModuleIndex) {
-  OwningPtr<ASTReader> Reader;
+  HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+
+  std::unique_ptr<ASTReader> Reader;
   Reader.reset(new ASTReader(PP, Context,
                              Sysroot.empty() ? "" : Sysroot.c_str(),
                              DisablePCHValidation,
                              AllowPCHWithCompilerErrors,
+                             /*AllowConfigurationMismatch*/false,
+                             HSOpts.ModulesValidateSystemHeaders,
                              UseGlobalModuleIndex));
 
   Reader->setDeserializationListener(
@@ -334,7 +356,7 @@
     // Set the predefines buffer as suggested by the PCH reader. Typically, the
     // predefines buffer will be empty.
     PP.setPredefines(Reader->getSuggestedPredefines());
-    return Reader.take();
+    return Reader.release();
 
   case ASTReader::Failure:
     // Unrecoverable failure: don't even try to process the input file.
@@ -432,8 +454,7 @@
     delete it->OS;
     if (!it->TempFilename.empty()) {
       if (EraseFiles) {
-        bool existed;
-        llvm::sys::fs::remove(it->TempFilename, existed);
+        llvm::sys::fs::remove(it->TempFilename);
       } else {
         SmallString<128> NewOutFile(it->Filename);
 
@@ -445,8 +466,7 @@
           getDiagnostics().Report(diag::err_unable_to_rename_temp)
             << it->TempFilename << it->Filename << ec.message();
 
-          bool existed;
-          llvm::sys::fs::remove(it->TempFilename, existed);
+          llvm::sys::fs::remove(it->TempFilename);
         }
       }
     } else if (!it->Filename.empty() && EraseFiles)
@@ -521,7 +541,7 @@
     OutFile = "-";
   }
 
-  OwningPtr<llvm::raw_fd_ostream> OS;
+  std::unique_ptr<llvm::raw_fd_ostream> OS;
   std::string OSFile;
 
   if (UseTemporary) {
@@ -574,7 +594,7 @@
     OSFile = OutFile;
     OS.reset(new llvm::raw_fd_ostream(
         OSFile.c_str(), Error,
-        (Binary ? llvm::sys::fs::F_Binary : llvm::sys::fs::F_None)));
+        (Binary ? llvm::sys::fs::F_None : llvm::sys::fs::F_Text)));
     if (!Error.empty())
       return 0;
   }
@@ -588,7 +608,7 @@
   if (TempPathName)
     *TempPathName = TempFile;
 
-  return OS.take();
+  return OS.release();
 }
 
 // Initialization Utilities
@@ -644,7 +664,7 @@
 
     SourceMgr.createMainFileID(File, Kind);
   } else {
-    OwningPtr<llvm::MemoryBuffer> SB;
+    std::unique_ptr<llvm::MemoryBuffer> SB;
     if (llvm::error_code ec = llvm::MemoryBuffer::getSTDIN(SB)) {
       Diags.Report(diag::err_fe_error_reading_stdin) << ec.message();
       return false;
@@ -652,7 +672,7 @@
     const FileEntry *File = FileMgr.getVirtualFile(SB->getBufferIdentifier(),
                                                    SB->getBufferSize(), 0);
     SourceMgr.createMainFileID(File, Kind);
-    SourceMgr.overrideFileContents(File, SB.take());
+    SourceMgr.overrideFileContents(File, SB.release());
   }
 
   assert(!SourceMgr.getMainFileID().isInvalid() &&
@@ -748,38 +768,6 @@
   return LangOpts.CPlusPlus? IK_CXX : IK_C;
 }
 
-namespace {
-  struct CompileModuleMapData {
-    CompilerInstance &Instance;
-    GenerateModuleAction &CreateModuleAction;
-  };
-}
-
-/// \brief Helper function that executes the module-generating action under
-/// a crash recovery context.
-static void doCompileMapModule(void *UserData) {
-  CompileModuleMapData &Data
-    = *reinterpret_cast<CompileModuleMapData *>(UserData);
-  Data.Instance.ExecuteAction(Data.CreateModuleAction);
-}
-
-namespace {
-  /// \brief Function object that checks with the given macro definition should
-  /// be removed, because it is one of the ignored macros.
-  class RemoveIgnoredMacro {
-    const HeaderSearchOptions &HSOpts;
-
-  public:
-    explicit RemoveIgnoredMacro(const HeaderSearchOptions &HSOpts)
-      : HSOpts(HSOpts) { }
-
-    bool operator()(const std::pair<std::string, bool> &def) const {
-      StringRef MacroDef = def.first;
-      return HSOpts.ModulesIgnoreMacros.count(MacroDef.split('=').first) > 0;
-    }
-  };
-}
-
 /// \brief Compile a module file for the given module, using the options 
 /// provided by the importing compiler instance.
 static void compileModule(CompilerInstance &ImportingInstance,
@@ -824,10 +812,13 @@
   // Remove any macro definitions that are explicitly ignored by the module.
   // They aren't supposed to affect how the module is built anyway.
   const HeaderSearchOptions &HSOpts = Invocation->getHeaderSearchOpts();
-  PPOpts.Macros.erase(std::remove_if(PPOpts.Macros.begin(), PPOpts.Macros.end(),
-                                     RemoveIgnoredMacro(HSOpts)),
-                      PPOpts.Macros.end());
-
+  PPOpts.Macros.erase(
+      std::remove_if(PPOpts.Macros.begin(), PPOpts.Macros.end(),
+                     [&HSOpts](const std::pair<std::string, bool> &def) {
+        StringRef MacroDef = def.first;
+        return HSOpts.ModulesIgnoreMacros.count(MacroDef.split('=').first) > 0;
+      }),
+      PPOpts.Macros.end());
 
   // Note the name of the module we're building.
   Invocation->getLangOpts()->CurrentModule = Module->getTopLevelModuleName();
@@ -867,9 +858,11 @@
                                    ImportingInstance.getDiagnosticClient()),
                              /*ShouldOwnClient=*/true);
 
+  Instance.setVirtualFileSystem(&ImportingInstance.getVirtualFileSystem());
+
   // Note that this module is part of the module build stack, so that we
   // can detect cycles in the module graph.
-  Instance.createFileManager(); // FIXME: Adopt file manager from importer?
+  Instance.setFileManager(&ImportingInstance.getFileManager());
   Instance.createSourceManager(Instance.getFileManager());
   SourceManager &SourceMgr = Instance.getSourceManager();
   SourceMgr.setModuleBuildStack(
@@ -905,10 +898,9 @@
   // thread so that we get a stack large enough.
   const unsigned ThreadStackSize = 8 << 20;
   llvm::CrashRecoveryContext CRC;
-  CompileModuleMapData Data = { Instance, CreateModuleAction };
-  CRC.RunSafelyOnThread(&doCompileMapModule, &Data, ThreadStackSize);
+  CRC.RunSafelyOnThread([&]() { Instance.ExecuteAction(CreateModuleAction); },
+                        ThreadStackSize);
 
-  
   // Delete the temporary module map file.
   // FIXME: Even though we're executing under crash protection, it would still
   // be nice to do this with RemoveFileOnSignal when we can. However, that
@@ -1012,7 +1004,7 @@
 static void writeTimestampFile(StringRef TimestampFile) {
   std::string ErrorInfo;
   llvm::raw_fd_ostream Out(TimestampFile.str().c_str(), ErrorInfo,
-                           llvm::sys::fs::F_Binary);
+                           llvm::sys::fs::F_None);
 }
 
 /// \brief Prune the module cache of modules that haven't been accessed in
@@ -1057,44 +1049,39 @@
       continue;
 
     // Walk all of the files within this directory.
-    bool RemovedAllFiles = true;
     for (llvm::sys::fs::directory_iterator File(Dir->path(), EC), FileEnd;
          File != FileEnd && !EC; File.increment(EC)) {
       // We only care about module and global module index files.
-      if (llvm::sys::path::extension(File->path()) != ".pcm" &&
-          llvm::sys::path::filename(File->path()) != "modules.idx") {
-        RemovedAllFiles = false;
+      StringRef Extension = llvm::sys::path::extension(File->path());
+      if (Extension != ".pcm" && Extension != ".timestamp" &&
+          llvm::sys::path::filename(File->path()) != "modules.idx")
         continue;
-      }
 
       // Look at this file. If we can't stat it, there's nothing interesting
       // there.
-      if (::stat(File->path().c_str(), &StatBuf)) {
-        RemovedAllFiles = false;
+      if (::stat(File->path().c_str(), &StatBuf))
         continue;
-      }
 
       // If the file has been used recently enough, leave it there.
       time_t FileAccessTime = StatBuf.st_atime;
       if (CurrentTime - FileAccessTime <=
               time_t(HSOpts.ModuleCachePruneAfter)) {
-        RemovedAllFiles = false;
         continue;
       }
 
       // Remove the file.
-      bool Existed;
-      if (llvm::sys::fs::remove(File->path(), Existed) || !Existed) {
-        RemovedAllFiles = false;
-      }
+      llvm::sys::fs::remove(File->path());
+
+      // Remove the timestamp file.
+      std::string TimpestampFilename = File->path() + ".timestamp";
+      llvm::sys::fs::remove(TimpestampFilename);
     }
 
     // If we removed all of the files in the directory, remove the directory
     // itself.
-    if (RemovedAllFiles) {
-      bool Existed;
-      llvm::sys::fs::remove(Dir->path(), Existed);
-    }
+    if (llvm::sys::fs::directory_iterator(Dir->path(), EC) ==
+            llvm::sys::fs::directory_iterator() && !EC)
+      llvm::sys::fs::remove(Dir->path());
   }
 }
 
@@ -1133,11 +1120,15 @@
   } else {
     // Search for a module with the given name.
     Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
-    std::string ModuleFileName;
-    if (Module) {
-      ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(Module);
-    } else
-      ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(ModuleName);
+    if (!Module) {
+      getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
+      << ModuleName
+      << SourceRange(ImportLoc, ModuleNameLoc);
+      ModuleBuildFailed = true;
+      return ModuleLoadResult();
+    }
+
+    std::string ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(Module);
 
     // If we don't already have an ASTReader, create one now.
     if (!ModuleManager) {
@@ -1152,12 +1143,15 @@
         pruneModuleCache(getHeaderSearchOpts());
       }
 
-      std::string Sysroot = getHeaderSearchOpts().Sysroot;
+      HeaderSearchOptions &HSOpts = getHeaderSearchOpts();
+      std::string Sysroot = HSOpts.Sysroot;
       const PreprocessorOptions &PPOpts = getPreprocessorOpts();
       ModuleManager = new ASTReader(getPreprocessor(), *Context,
                                     Sysroot.empty() ? "" : Sysroot.c_str(),
                                     PPOpts.DisablePCHValidation,
                                     /*AllowASTWithCompilerErrors=*/false,
+                                    /*AllowConfigurationMismatch=*/false,
+                                    HSOpts.ModulesValidateSystemHeaders,
                                     getFrontendOpts().UseGlobalModuleIndex);
       if (hasASTConsumer()) {
         ModuleManager->setDeserializationListener(
@@ -1165,15 +1159,16 @@
         getASTContext().setASTMutationListener(
           getASTConsumer().GetASTMutationListener());
       }
-      OwningPtr<ExternalASTSource> Source;
-      Source.reset(ModuleManager);
-      getASTContext().setExternalSource(Source);
+      getASTContext().setExternalSource(ModuleManager);
       if (hasSema())
         ModuleManager->InitializeSema(getSema());
       if (hasASTConsumer())
         ModuleManager->StartTranslationUnit(&getASTConsumer());
     }
 
+    if (TheDependencyFileGenerator)
+      TheDependencyFileGenerator->AttachToASTReader(*ModuleManager);
+
     // Try to load the module file.
     unsigned ARRFlags = ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing;
     switch (ModuleManager->ReadAST(ModuleFileName, serialization::MK_Module,
@@ -1184,17 +1179,7 @@
     case ASTReader::OutOfDate:
     case ASTReader::Missing: {
       // The module file is missing or out-of-date. Build it.
-
-      // If we don't have a module, we don't know how to build the module file.
-      // Complain and return.
-      if (!Module) {
-        getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
-          << ModuleName
-          << SourceRange(ImportLoc, ModuleNameLoc);
-        ModuleBuildFailed = true;
-        return ModuleLoadResult();
-      }
-
+      assert(Module && "missing module file");
       // Check whether there is a cycle in the module graph.
       ModuleBuildStack ModPath = getSourceManager().getModuleBuildStack();
       ModuleBuildStack::iterator Pos = ModPath.begin(), PosEnd = ModPath.end();
@@ -1271,13 +1256,6 @@
       ModuleBuildFailed = true;
       return ModuleLoadResult();
     }
-    
-    if (!Module) {
-      // If we loaded the module directly, without finding a module map first,
-      // we'll have loaded the module's information from the module itself.
-      Module = PP->getHeaderSearchInfo().getModuleMap()
-                 .findModule((Path[0].first->getName()));
-    }
 
     // Cache the result of this top-level module lookup for later.
     Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
@@ -1360,11 +1338,19 @@
 
     // Check whether this module is available.
     clang::Module::Requirement Requirement;
-    if (!Module->isAvailable(getLangOpts(), getTarget(), Requirement)) {
-      getDiagnostics().Report(ImportLoc, diag::err_module_unavailable)
-        << Module->getFullModuleName()
-        << Requirement.second << Requirement.first
-        << SourceRange(Path.front().second, Path.back().second);
+    clang::Module::HeaderDirective MissingHeader;
+    if (!Module->isAvailable(getLangOpts(), getTarget(), Requirement,
+                             MissingHeader)) {
+      if (MissingHeader.FileNameLoc.isValid()) {
+        getDiagnostics().Report(MissingHeader.FileNameLoc,
+                                diag::err_module_header_missing)
+          << MissingHeader.IsUmbrella << MissingHeader.FileName;
+      } else {
+        getDiagnostics().Report(ImportLoc, diag::err_module_unavailable)
+          << Module->getFullModuleName()
+          << Requirement.second << Requirement.first
+          << SourceRange(Path.front().second, Path.back().second);
+      }
       LastModuleImportLoc = ImportLoc;
       LastModuleImportResult = ModuleLoadResult();
       return ModuleLoadResult();
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 8db5cf1..b9706e8 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -19,7 +19,6 @@
 #include "clang/Lex/HeaderSearchOptions.h"
 #include "clang/Serialization/ASTReader.h"
 #include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringSwitch.h"
@@ -34,6 +33,8 @@
 #include "llvm/Support/Path.h"
 #include "llvm/Support/Process.h"
 #include "llvm/Support/system_error.h"
+#include <atomic>
+#include <memory>
 #include <sys/stat.h>
 using namespace clang;
 
@@ -262,7 +263,7 @@
     configList.split(configVals, ",");
     for (unsigned i = 0, e = configVals.size(); i != e; ++i) {
       StringRef key, val;
-      llvm::tie(key, val) = configVals[i].split("=");
+      std::tie(key, val) = configVals[i].split("=");
       if (val.empty()) {
         Diags.Report(SourceLocation(),
                      diag::err_analyzer_config_no_value) << configVals[i];
@@ -295,7 +296,8 @@
 }
 
 static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
-                             DiagnosticsEngine &Diags) {
+                             DiagnosticsEngine &Diags,
+                             const TargetOptions &TargetOpts) {
   using namespace options;
   bool Success = true;
 
@@ -322,10 +324,16 @@
     Opts.setDebugInfo(CodeGenOptions::DebugLineTablesOnly);
   } else if (Args.hasArg(OPT_g_Flag) || Args.hasArg(OPT_gdwarf_2) ||
              Args.hasArg(OPT_gdwarf_3) || Args.hasArg(OPT_gdwarf_4)) {
-    if (Args.hasFlag(OPT_flimit_debug_info, OPT_fno_limit_debug_info, true))
-      Opts.setDebugInfo(CodeGenOptions::LimitedDebugInfo);
-    else
+    bool Default = false;
+    // Until dtrace (via CTF) can deal with distributed debug info,
+    // Darwin defaults to standalone/full debug info.
+    if (llvm::Triple(TargetOpts.Triple).isOSDarwin())
+      Default = true;
+
+    if (Args.hasFlag(OPT_fstandalone_debug, OPT_fno_standalone_debug, Default))
       Opts.setDebugInfo(CodeGenOptions::FullDebugInfo);
+    else
+      Opts.setDebugInfo(CodeGenOptions::LimitedDebugInfo);
   }
   Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
   Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
@@ -358,8 +366,11 @@
                    (Opts.OptimizationLevel > 1 && !Opts.OptimizeSize));
   Opts.RerollLoops = Args.hasArg(OPT_freroll_loops);
 
+  Opts.DisableIntegratedAS = Args.hasArg(OPT_fno_integrated_as);
   Opts.Autolink = !Args.hasArg(OPT_fno_autolink);
   Opts.SampleProfileFile = Args.getLastArgValue(OPT_fprofile_sample_use_EQ);
+  Opts.ProfileInstrGenerate = Args.hasArg(OPT_fprofile_instr_generate);
+  Opts.InstrProfileInput = Args.getLastArgValue(OPT_fprofile_instr_use_EQ);
   Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
   Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
   Opts.CUDAIsDevice = Args.hasArg(OPT_fcuda_is_device);
@@ -371,7 +382,6 @@
   Opts.DisableFree = Args.hasArg(OPT_disable_free);
   Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
   Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
-  Opts.HiddenWeakVTables = Args.hasArg(OPT_fhidden_weak_vtables);
   Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable);
   Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
   Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
@@ -437,15 +447,14 @@
   Opts.InstrumentFunctions = Args.hasArg(OPT_finstrument_functions);
   Opts.InstrumentForProfiling = Args.hasArg(OPT_pg);
   Opts.EmitOpenCLArgMetadata = Args.hasArg(OPT_cl_kernel_arg_info);
+  Opts.CompressDebugSections = Args.hasArg(OPT_compress_debug_sections);
   Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
   Opts.LinkBitcodeFile = Args.getLastArgValue(OPT_mlink_bitcode_file);
   Opts.SanitizerBlacklistFile = Args.getLastArgValue(OPT_fsanitize_blacklist);
   Opts.SanitizeMemoryTrackOrigins =
-    Args.hasArg(OPT_fsanitize_memory_track_origins);
-  Opts.SanitizeAddressZeroBaseShadow =
-    Args.hasArg(OPT_fsanitize_address_zero_base_shadow);
+      getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
   Opts.SanitizeUndefinedTrapOnError =
-    Args.hasArg(OPT_fsanitize_undefined_trap_on_error);
+      Args.hasArg(OPT_fsanitize_undefined_trap_on_error);
   Opts.SSPBufferSize =
       getLastArgIntValue(Args, OPT_stack_protector_buffer_size, 8, Diags);
   Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
@@ -520,6 +529,7 @@
   Opts.OutputFile = Args.getLastArgValue(OPT_dependency_file);
   Opts.Targets = Args.getAllArgValues(OPT_MT);
   Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
+  Opts.IncludeModuleFiles = Args.hasArg(OPT_module_file_deps);
   Opts.UsePhonyTargets = Args.hasArg(OPT_MP);
   Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
   Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
@@ -692,6 +702,8 @@
       Opts.ProgramAction = frontend::ParseSyntaxOnly; break;
     case OPT_module_file_info:
       Opts.ProgramAction = frontend::ModuleFileInfo; break;
+    case OPT_verify_pch:
+      Opts.ProgramAction = frontend::VerifyPCH; break;
     case OPT_print_decl_contexts:
       Opts.ProgramAction = frontend::PrintDeclContext; break;
     case OPT_print_preamble:
@@ -820,10 +832,12 @@
     Opts.ObjCMTAction |= FrontendOptions::ObjCMT_AtomicProperty;
   if (Args.hasArg(OPT_objcmt_ns_nonatomic_iosonly))
     Opts.ObjCMTAction |= FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty;
+  if (Args.hasArg(OPT_objcmt_migrate_designated_init))
+    Opts.ObjCMTAction |= FrontendOptions::ObjCMT_DesignatedInitializer;
   if (Args.hasArg(OPT_objcmt_migrate_all))
     Opts.ObjCMTAction |= FrontendOptions::ObjCMT_MigrateDecls;
 
-  Opts.ObjCMTWhiteListPath = Args.getLastArgValue(OPT_objcmt_white_list_dir_path);
+  Opts.ObjCMTWhiteListPath = Args.getLastArgValue(OPT_objcmt_whitelist_dir_path);
 
   if (Opts.ARCMTAction != FrontendOptions::ARCMT_None &&
       Opts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
@@ -906,6 +920,7 @@
     Opts.UseLibcxx = (strcmp(A->getValue(), "libc++") == 0);
   Opts.ResourceDir = Args.getLastArgValue(OPT_resource_dir);
   Opts.ModuleCachePath = Args.getLastArgValue(OPT_fmodules_cache_path);
+  Opts.ModuleUserBuildPath = Args.getLastArgValue(OPT_fmodules_user_build_path);
   Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
   // -fmodules implies -fmodule-maps
   Opts.ModuleMaps = Args.hasArg(OPT_fmodule_maps) || Args.hasArg(OPT_fmodules);
@@ -913,6 +928,13 @@
       getLastArgIntValue(Args, OPT_fmodules_prune_interval, 7 * 24 * 60 * 60);
   Opts.ModuleCachePruneAfter =
       getLastArgIntValue(Args, OPT_fmodules_prune_after, 31 * 24 * 60 * 60);
+  Opts.ModulesValidateOncePerBuildSession =
+      Args.hasArg(OPT_fmodules_validate_once_per_build_session);
+  Opts.BuildSessionTimestamp =
+      getLastArgUInt64Value(Args, OPT_fbuild_session_timestamp, 0);
+  Opts.ModulesValidateSystemHeaders =
+      Args.hasArg(OPT_fmodules_validate_system_headers);
+
   for (arg_iterator it = Args.filtered_begin(OPT_fmodules_ignore_macro),
                     ie = Args.filtered_end();
        it != ie; ++it) {
@@ -998,12 +1020,16 @@
   }
 
   // Add the path prefixes which are implicitly treated as being system headers.
-  for (arg_iterator I = Args.filtered_begin(OPT_isystem_prefix,
-                                            OPT_ino_system_prefix),
+  for (arg_iterator I = Args.filtered_begin(OPT_system_header_prefix,
+                                            OPT_no_system_header_prefix),
                     E = Args.filtered_end();
        I != E; ++I)
-    Opts.AddSystemHeaderPrefix((*I)->getValue(),
-                               (*I)->getOption().matches(OPT_isystem_prefix));
+    Opts.AddSystemHeaderPrefix(
+        (*I)->getValue(), (*I)->getOption().matches(OPT_system_header_prefix));
+
+  for (arg_iterator I = Args.filtered_begin(OPT_ivfsoverlay),
+       E = Args.filtered_end(); I != E; ++I)
+    Opts.AddVFSOverlayFile((*I)->getValue());
 }
 
 void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
@@ -1091,6 +1117,9 @@
   // OpenCL and C++ both have bool, true, false keywords.
   Opts.Bool = Opts.OpenCL || Opts.CPlusPlus;
 
+  // OpenCL has half keyword
+  Opts.Half = Opts.OpenCL;
+
   // C++ has wchar_t keyword.
   Opts.WChar = Opts.CPlusPlus;
 
@@ -1139,7 +1168,8 @@
       Diags.Report(diag::err_drv_invalid_value)
         << A->getAsString(Args) << A->getValue();
     else {
-      // Valid standard, check to make sure language and standard are compatable.    
+      // Valid standard, check to make sure language and standard are
+      // compatible.
       const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
       switch (IK) {
       case IK_C:
@@ -1290,11 +1320,11 @@
                                    OPT_fno_dollars_in_identifiers,
                                    Opts.DollarIdents);
   Opts.PascalStrings = Args.hasArg(OPT_fpascal_strings);
-  Opts.MicrosoftExt
-    = Args.hasArg(OPT_fms_extensions) || Args.hasArg(OPT_fms_compatibility);
-  Opts.MicrosoftMode = Args.hasArg(OPT_fms_compatibility);
+  Opts.MSVCCompat = Args.hasArg(OPT_fms_compatibility);
+  Opts.MicrosoftExt = Opts.MSVCCompat || Args.hasArg(OPT_fms_extensions);
   Opts.AsmBlocks = Args.hasArg(OPT_fasm_blocks) || Opts.MicrosoftExt;
   Opts.MSCVersion = getLastArgIntValue(Args, OPT_fmsc_version, 0, Diags);
+  Opts.VtorDispMode = getLastArgIntValue(Args, OPT_vtordisp_mode_EQ, 1, Diags);
   Opts.Borland = Args.hasArg(OPT_fborland_extensions);
   Opts.WritableStrings = Args.hasArg(OPT_fwritable_strings);
   Opts.ConstStrings = Args.hasFlag(OPT_fconst_strings, OPT_fno_const_strings,
@@ -1316,7 +1346,7 @@
   Opts.ModulesDeclUse = Args.hasArg(OPT_fmodules_decluse);
   Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
   Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
-  Opts.ShortWChar = Args.hasArg(OPT_fshort_wchar);
+  Opts.ShortWChar = Args.hasFlag(OPT_fshort_wchar, OPT_fno_short_wchar, false);
   Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
   Opts.Freestanding = Args.hasArg(OPT_ffreestanding);
   Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
@@ -1391,8 +1421,30 @@
     }
   }
 
-  // Check if -fopenmp is specified.
-  Opts.OpenMP = Args.hasArg(OPT_fopenmp);
+  if (Arg *A = Args.getLastArg(OPT_fms_memptr_rep_EQ)) {
+    LangOptions::PragmaMSPointersToMembersKind InheritanceModel =
+        llvm::StringSwitch<LangOptions::PragmaMSPointersToMembersKind>(
+            A->getValue())
+            .Case("single",
+                  LangOptions::PPTMK_FullGeneralitySingleInheritance)
+            .Case("multiple",
+                  LangOptions::PPTMK_FullGeneralityMultipleInheritance)
+            .Case("virtual",
+                  LangOptions::PPTMK_FullGeneralityVirtualInheritance)
+            .Default(LangOptions::PPTMK_BestCase);
+    if (InheritanceModel == LangOptions::PPTMK_BestCase)
+      Diags.Report(diag::err_drv_invalid_value)
+          << "-fms-memptr-rep=" << A->getValue();
+
+    Opts.setMSPointerToMemberRepresentationMethod(InheritanceModel);
+  }
+
+  // Check if -fopenmp= is specified.
+  if (const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ)) {
+    Opts.OpenMP = llvm::StringSwitch<bool>(A->getValue())
+        .Case("libiomp5", true)
+        .Default(false);
+  }
 
   // Record whether the __DEPRECATED define was requested.
   Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
@@ -1424,7 +1476,8 @@
     break;
   case 0: Opts.setStackProtector(LangOptions::SSPOff); break;
   case 1: Opts.setStackProtector(LangOptions::SSPOn);  break;
-  case 2: Opts.setStackProtector(LangOptions::SSPReq); break;
+  case 2: Opts.setStackProtector(LangOptions::SSPStrong); break;
+  case 3: Opts.setStackProtector(LangOptions::SSPReq); break;
   }
 
   // Parse -fsanitize= arguments.
@@ -1575,6 +1628,7 @@
   case frontend::GeneratePTH:
   case frontend::ParseSyntaxOnly:
   case frontend::ModuleFileInfo:
+  case frontend::VerifyPCH:
   case frontend::PluginAction:
   case frontend::PrintDeclContext:
   case frontend::RewriteObjC:
@@ -1605,7 +1659,6 @@
 static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
   using namespace options;
   Opts.ABI = Args.getLastArgValue(OPT_target_abi);
-  Opts.CXXABI = Args.getLastArgValue(OPT_cxx_abi);
   Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
   Opts.FPMath = Args.getLastArgValue(OPT_mfpmath);
   Opts.FeaturesAsWritten = Args.getAllArgValues(OPT_target_feature);
@@ -1617,8 +1670,6 @@
     Opts.Triple = llvm::sys::getDefaultTargetTriple();
 }
 
-//
-
 bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
                                         const char *const *ArgBegin,
                                         const char *const *ArgEnd,
@@ -1626,12 +1677,12 @@
   bool Success = true;
 
   // Parse the arguments.
-  OwningPtr<OptTable> Opts(createDriverOptTable());
+  std::unique_ptr<OptTable> Opts(createDriverOptTable());
   const unsigned IncludedFlagsBitmask = options::CC1Option;
   unsigned MissingArgIndex, MissingArgCount;
-  OwningPtr<InputArgList> Args(
-    Opts->ParseArgs(ArgBegin, ArgEnd, MissingArgIndex, MissingArgCount,
-                    IncludedFlagsBitmask));
+  std::unique_ptr<InputArgList> Args(
+      Opts->ParseArgs(ArgBegin, ArgEnd, MissingArgIndex, MissingArgCount,
+                      IncludedFlagsBitmask));
 
   // Check for missing argument error.
   if (MissingArgCount) {
@@ -1656,8 +1707,9 @@
   ParseFileSystemArgs(Res.getFileSystemOpts(), *Args);
   // FIXME: We shouldn't have to pass the DashX option around here
   InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), *Args, Diags);
-  Success = ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, DashX, Diags)
-            && Success;
+  ParseTargetArgs(Res.getTargetOpts(), *Args);
+  Success = ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, DashX, Diags,
+                             Res.getTargetOpts()) && Success;
   ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), *Args);
   if (DashX != IK_AST && DashX != IK_LLVM_IR) {
     ParseLangArgs(*Res.getLangOpts(), *Args, DashX, Diags);
@@ -1672,8 +1724,6 @@
   ParsePreprocessorArgs(Res.getPreprocessorOpts(), *Args, FileMgr, Diags);
   ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), *Args,
                               Res.getFrontendOpts().ProgramAction);
-  ParseTargetArgs(Res.getTargetOpts(), *Args);
-
   return Success;
 }
 
@@ -1753,8 +1803,7 @@
   
   // Extend the signature with the target options.
   code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
-                      TargetOpts->ABI, TargetOpts->CXXABI,
-                      TargetOpts->LinkerVersion);
+                      TargetOpts->ABI, TargetOpts->LinkerVersion);
   for (unsigned i = 0, n = TargetOpts->FeaturesAsWritten.size(); i != n; ++i)
     code = hash_combine(code, TargetOpts->FeaturesAsWritten[i]);
 
@@ -1763,7 +1812,6 @@
   const HeaderSearchOptions &hsOpts = getHeaderSearchOpts();
   code = hash_combine(code, ppOpts.UsePredefines, ppOpts.DetailedRecord);
 
-  std::vector<StringRef> MacroDefs;
   for (std::vector<std::pair<std::string, bool/*isUndef*/> >::const_iterator 
             I = getPreprocessorOpts().Macros.begin(),
          IEnd = getPreprocessorOpts().Macros.end();
@@ -1786,12 +1834,15 @@
                       hsOpts.UseStandardCXXIncludes,
                       hsOpts.UseLibcxx);
 
+  // Extend the signature with the user build path.
+  code = hash_combine(code, hsOpts.ModuleUserBuildPath);
+
   // Darwin-specific hack: if we have a sysroot, use the contents and
   // modification time of
   //   $sysroot/System/Library/CoreServices/SystemVersion.plist
   // as part of the module hash.
   if (!hsOpts.Sysroot.empty()) {
-    llvm::OwningPtr<llvm::MemoryBuffer> buffer;
+    std::unique_ptr<llvm::MemoryBuffer> buffer;
     SmallString<128> systemVersionFile;
     systemVersionFile += hsOpts.Sysroot;
     llvm::sys::path::append(systemVersionFile, "System");
@@ -1812,10 +1863,11 @@
 
 namespace clang {
 
-// Declared in clang/Frontend/Utils.h.
-int getLastArgIntValue(const ArgList &Args, OptSpecifier Id, int Default,
-                       DiagnosticsEngine *Diags) {
-  int Res = Default;
+template<typename IntTy>
+static IntTy getLastArgIntValueImpl(const ArgList &Args, OptSpecifier Id,
+                                    IntTy Default,
+                                    DiagnosticsEngine *Diags) {
+  IntTy Res = Default;
   if (Arg *A = Args.getLastArg(Id)) {
     if (StringRef(A->getValue()).getAsInteger(10, Res)) {
       if (Diags)
@@ -1825,4 +1877,32 @@
   }
   return Res;
 }
+
+
+// Declared in clang/Frontend/Utils.h.
+int getLastArgIntValue(const ArgList &Args, OptSpecifier Id, int Default,
+                       DiagnosticsEngine *Diags) {
+  return getLastArgIntValueImpl<int>(Args, Id, Default, Diags);
+}
+
+uint64_t getLastArgUInt64Value(const ArgList &Args, OptSpecifier Id,
+                               uint64_t Default,
+                               DiagnosticsEngine *Diags) {
+  return getLastArgIntValueImpl<uint64_t>(Args, Id, Default, Diags);
+}
+
+void BuryPointer(const void *Ptr) {
+  // This function may be called only a small fixed amount of times per each
+  // invocation, otherwise we do actually have a leak which we want to report.
+  // If this function is called more than kGraveYardMaxSize times, the pointers
+  // will not be properly buried and a leak detector will report a leak, which
+  // is what we want in such case.
+  static const size_t kGraveYardMaxSize = 16;
+  LLVM_ATTRIBUTE_UNUSED static const void *GraveYard[kGraveYardMaxSize];
+  static std::atomic<unsigned> GraveYardSize;
+  unsigned Idx = GraveYardSize++;
+  if (Idx >= kGraveYardMaxSize)
+    return;
+  GraveYard[Idx] = Ptr;
+}
 }
diff --git a/lib/Frontend/CreateInvocationFromCommandLine.cpp b/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 78f39d4..ededf9a 100644
--- a/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -52,7 +52,7 @@
   // Don't check that inputs exist, they may have been remapped.
   TheDriver.setCheckInputsExist(false);
 
-  OwningPtr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
+  std::unique_ptr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
 
   // Just print the cc1 options if -### was present.
   if (C->getArgs().hasArg(driver::options::OPT__HASH_HASH_HASH)) {
@@ -78,12 +78,12 @@
   }
 
   const ArgStringList &CCArgs = Cmd->getArguments();
-  OwningPtr<CompilerInvocation> CI(new CompilerInvocation());
+  std::unique_ptr<CompilerInvocation> CI(new CompilerInvocation());
   if (!CompilerInvocation::CreateFromArgs(*CI,
                                      const_cast<const char **>(CCArgs.data()),
                                      const_cast<const char **>(CCArgs.data()) +
                                      CCArgs.size(),
                                      *Diags))
     return 0;
-  return CI.take();
+  return CI.release();
 }
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index 4037af9..b472eaa 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -20,6 +20,7 @@
 #include "clang/Lex/LexDiagnostic.h"
 #include "clang/Lex/PPCallbacks.h"
 #include "clang/Lex/Preprocessor.h"
+#include "clang/Serialization/ASTReader.h"
 #include "llvm/ADT/StringSet.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/Path.h"
@@ -28,7 +29,8 @@
 using namespace clang;
 
 namespace {
-class DependencyFileCallback : public PPCallbacks {
+/// Private implementation for DependencyFileGenerator
+class DFGImpl : public PPCallbacks {
   std::vector<std::string> Files;
   llvm::StringSet<> FilesSet;
   const Preprocessor *PP;
@@ -38,58 +40,84 @@
   bool PhonyTarget;
   bool AddMissingHeaderDeps;
   bool SeenMissingHeader;
+  bool IncludeModuleFiles;
 private:
   bool FileMatchesDepCriteria(const char *Filename,
                               SrcMgr::CharacteristicKind FileType);
-  void AddFilename(StringRef Filename);
   void OutputDependencyFile();
 
 public:
-  DependencyFileCallback(const Preprocessor *_PP,
-                         const DependencyOutputOptions &Opts)
+  DFGImpl(const Preprocessor *_PP, const DependencyOutputOptions &Opts)
     : PP(_PP), OutputFile(Opts.OutputFile), Targets(Opts.Targets),
       IncludeSystemHeaders(Opts.IncludeSystemHeaders),
       PhonyTarget(Opts.UsePhonyTargets),
       AddMissingHeaderDeps(Opts.AddMissingHeaderDeps),
-      SeenMissingHeader(false) {}
+      SeenMissingHeader(false),
+      IncludeModuleFiles(Opts.IncludeModuleFiles) {}
 
-  virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
-                           SrcMgr::CharacteristicKind FileType,
-                           FileID PrevFID);
-  virtual void InclusionDirective(SourceLocation HashLoc,
-                                  const Token &IncludeTok,
-                                  StringRef FileName,
-                                  bool IsAngled,
-                                  CharSourceRange FilenameRange,
-                                  const FileEntry *File,
-                                  StringRef SearchPath,
-                                  StringRef RelativePath,
-                                  const Module *Imported);
+  void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+                   SrcMgr::CharacteristicKind FileType,
+                   FileID PrevFID) override;
+  void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+                          StringRef FileName, bool IsAngled,
+                          CharSourceRange FilenameRange, const FileEntry *File,
+                          StringRef SearchPath, StringRef RelativePath,
+                          const Module *Imported) override;
 
-  virtual void EndOfMainFile() {
+  void EndOfMainFile() override {
     OutputDependencyFile();
   }
+
+  void AddFilename(StringRef Filename);
+  bool includeSystemHeaders() const { return IncludeSystemHeaders; }
+  bool includeModuleFiles() const { return IncludeModuleFiles; }
+};
+
+class DFGASTReaderListener : public ASTReaderListener {
+  DFGImpl &Parent;
+public:
+  DFGASTReaderListener(DFGImpl &Parent)
+  : Parent(Parent) { }
+  bool needsInputFileVisitation() override { return true; }
+  bool needsSystemInputFileVisitation() override {
+    return Parent.includeSystemHeaders();
+  }
+  void visitModuleFile(StringRef Filename) override;
+  bool visitInputFile(StringRef Filename, bool isSystem,
+                      bool isOverridden) override;
 };
 }
 
-void clang::AttachDependencyFileGen(Preprocessor &PP,
-                                    const DependencyOutputOptions &Opts) {
+DependencyFileGenerator::DependencyFileGenerator(void *Impl)
+: Impl(Impl) { }
+
+DependencyFileGenerator *DependencyFileGenerator::CreateAndAttachToPreprocessor(
+    clang::Preprocessor &PP, const clang::DependencyOutputOptions &Opts) {
+
   if (Opts.Targets.empty()) {
     PP.getDiagnostics().Report(diag::err_fe_dependency_file_requires_MT);
-    return;
+    return NULL;
   }
 
   // Disable the "file not found" diagnostic if the -MG option was given.
   if (Opts.AddMissingHeaderDeps)
     PP.SetSuppressIncludeNotFoundError(true);
 
-  PP.addPPCallbacks(new DependencyFileCallback(&PP, Opts));
+  DFGImpl *Callback = new DFGImpl(&PP, Opts);
+  PP.addPPCallbacks(Callback); // PP owns the Callback
+  return new DependencyFileGenerator(Callback);
+}
+
+void DependencyFileGenerator::AttachToASTReader(ASTReader &R) {
+  DFGImpl *I = reinterpret_cast<DFGImpl *>(Impl);
+  assert(I && "missing implementation");
+  R.addListener(new DFGASTReaderListener(*I));
 }
 
 /// FileMatchesDepCriteria - Determine whether the given Filename should be
 /// considered as a dependency.
-bool DependencyFileCallback::FileMatchesDepCriteria(const char *Filename,
-                                          SrcMgr::CharacteristicKind FileType) {
+bool DFGImpl::FileMatchesDepCriteria(const char *Filename,
+                                     SrcMgr::CharacteristicKind FileType) {
   if (strcmp("<built-in>", Filename) == 0)
     return false;
 
@@ -99,10 +127,10 @@
   return FileType == SrcMgr::C_User;
 }
 
-void DependencyFileCallback::FileChanged(SourceLocation Loc,
-                                         FileChangeReason Reason,
-                                         SrcMgr::CharacteristicKind FileType,
-                                         FileID PrevFID) {
+void DFGImpl::FileChanged(SourceLocation Loc,
+                          FileChangeReason Reason,
+                          SrcMgr::CharacteristicKind FileType,
+                          FileID PrevFID) {
   if (Reason != PPCallbacks::EnterFile)
     return;
 
@@ -130,15 +158,15 @@
   AddFilename(Filename);
 }
 
-void DependencyFileCallback::InclusionDirective(SourceLocation HashLoc,
-                                                const Token &IncludeTok,
-                                                StringRef FileName,
-                                                bool IsAngled,
-                                                CharSourceRange FilenameRange,
-                                                const FileEntry *File,
-                                                StringRef SearchPath,
-                                                StringRef RelativePath,
-                                                const Module *Imported) {
+void DFGImpl::InclusionDirective(SourceLocation HashLoc,
+                                 const Token &IncludeTok,
+                                 StringRef FileName,
+                                 bool IsAngled,
+                                 CharSourceRange FilenameRange,
+                                 const FileEntry *File,
+                                 StringRef SearchPath,
+                                 StringRef RelativePath,
+                                 const Module *Imported) {
   if (!File) {
     if (AddMissingHeaderDeps)
       AddFilename(FileName);
@@ -147,7 +175,7 @@
   }
 }
 
-void DependencyFileCallback::AddFilename(StringRef Filename) {
+void DFGImpl::AddFilename(StringRef Filename) {
   if (FilesSet.insert(Filename))
     Files.push_back(Filename);
 }
@@ -164,15 +192,14 @@
   }
 }
 
-void DependencyFileCallback::OutputDependencyFile() {
+void DFGImpl::OutputDependencyFile() {
   if (SeenMissingHeader) {
-    bool existed;
-    llvm::sys::fs::remove(OutputFile, existed);
+    llvm::sys::fs::remove(OutputFile);
     return;
   }
 
   std::string Err;
-  llvm::raw_fd_ostream OS(OutputFile.c_str(), Err);
+  llvm::raw_fd_ostream OS(OutputFile.c_str(), Err, llvm::sys::fs::F_Text);
   if (!Err.empty()) {
     PP->getDiagnostics().Report(diag::err_fe_error_opening)
       << OutputFile << Err;
@@ -235,3 +262,17 @@
   }
 }
 
+bool DFGASTReaderListener::visitInputFile(llvm::StringRef Filename,
+                                          bool IsSystem, bool IsOverridden) {
+  assert(!IsSystem || needsSystemInputFileVisitation());
+  if (IsOverridden)
+    return true;
+
+  Parent.AddFilename(Filename);
+  return true;
+}
+
+void DFGASTReaderListener::visitModuleFile(llvm::StringRef Filename) {
+  if (Parent.includeModuleFiles())
+    Parent.AddFilename(Filename);
+}
diff --git a/lib/Frontend/DependencyGraph.cpp b/lib/Frontend/DependencyGraph.cpp
index e128d91..4bff3db 100644
--- a/lib/Frontend/DependencyGraph.cpp
+++ b/lib/Frontend/DependencyGraph.cpp
@@ -46,17 +46,13 @@
                           StringRef SysRoot)
     : PP(_PP), OutputFile(OutputFile.str()), SysRoot(SysRoot.str()) { }
 
-  virtual void InclusionDirective(SourceLocation HashLoc,
-                                  const Token &IncludeTok,
-                                  StringRef FileName,
-                                  bool IsAngled,
-                                  CharSourceRange FilenameRange,
-                                  const FileEntry *File,
-                                  StringRef SearchPath,
-                                  StringRef RelativePath,
-                                  const Module *Imported);
+  void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+                          StringRef FileName, bool IsAngled,
+                          CharSourceRange FilenameRange, const FileEntry *File,
+                          StringRef SearchPath, StringRef RelativePath,
+                          const Module *Imported) override;
 
-  virtual void EndOfMainFile() {
+  void EndOfMainFile() override {
     OutputGraphFile();
   }
   
@@ -101,7 +97,7 @@
 
 void DependencyGraphCallback::OutputGraphFile() {
   std::string Err;
-  llvm::raw_fd_ostream OS(OutputFile.c_str(), Err);
+  llvm::raw_fd_ostream OS(OutputFile.c_str(), Err, llvm::sys::fs::F_Text);
   if (!Err.empty()) {
     PP->getDiagnostics().Report(diag::err_fe_error_opening)
       << OutputFile << Err;
diff --git a/lib/Frontend/DiagnosticRenderer.cpp b/lib/Frontend/DiagnosticRenderer.cpp
index 4eee595..85e87a2 100644
--- a/lib/Frontend/DiagnosticRenderer.cpp
+++ b/lib/Frontend/DiagnosticRenderer.cpp
@@ -79,10 +79,10 @@
 public:
   FixitReceiver(SmallVectorImpl<FixItHint> &MergedFixits)
     : MergedFixits(MergedFixits) { }
-  virtual void insert(SourceLocation loc, StringRef text) {
+  void insert(SourceLocation loc, StringRef text) override {
     MergedFixits.push_back(FixItHint::CreateInsertion(loc, text));
   }
-  virtual void replace(CharSourceRange range, StringRef text) {
+  void replace(CharSourceRange range, StringRef text) override {
     MergedFixits.push_back(FixItHint::CreateReplacement(range, text));
   }
 };
@@ -499,8 +499,11 @@
   // Generate a note indicating the include location.
   SmallString<200> MessageStorage;
   llvm::raw_svector_ostream Message(MessageStorage);
-  Message << "while building module '" << ModuleName << "' imported from "
-          << PLoc.getFilename() << ':' << PLoc.getLine() << ":";
+  if (PLoc.getFilename())
+    Message << "while building module '" << ModuleName << "' imported from "
+            << PLoc.getFilename() << ':' << PLoc.getLine() << ":";
+  else
+    Message << "while building module '" << ModuleName << ":";
   emitNote(Loc, Message.str(), &SM);
 }
 
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 075fe93..d2ece7e 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -18,6 +18,7 @@
 #include "clang/Frontend/FrontendPluginRegistry.h"
 #include "clang/Frontend/LayoutOverrideSource.h"
 #include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Frontend/Utils.h"
 #include "clang/Lex/HeaderSearch.h"
 #include "clang/Lex/Preprocessor.h"
 #include "clang/Parse/ParseAST.h"
@@ -42,29 +43,29 @@
                                            ASTDeserializationListener *Previous)
     : Previous(Previous) { }
 
-  virtual void ReaderInitialized(ASTReader *Reader) {
+  void ReaderInitialized(ASTReader *Reader) override {
     if (Previous)
       Previous->ReaderInitialized(Reader);
   }
-  virtual void IdentifierRead(serialization::IdentID ID,
-                              IdentifierInfo *II) {
+  void IdentifierRead(serialization::IdentID ID,
+                      IdentifierInfo *II) override {
     if (Previous)
       Previous->IdentifierRead(ID, II);
   }
-  virtual void TypeRead(serialization::TypeIdx Idx, QualType T) {
+  void TypeRead(serialization::TypeIdx Idx, QualType T) override {
     if (Previous)
       Previous->TypeRead(Idx, T);
   }
-  virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+  void DeclRead(serialization::DeclID ID, const Decl *D) override {
     if (Previous)
       Previous->DeclRead(ID, D);
   }
-  virtual void SelectorRead(serialization::SelectorID ID, Selector Sel) {
+  void SelectorRead(serialization::SelectorID ID, Selector Sel) override {
     if (Previous)
       Previous->SelectorRead(ID, Sel);
   }
-  virtual void MacroDefinitionRead(serialization::PreprocessedEntityID PPID, 
-                                   MacroDefinition *MD) {
+  void MacroDefinitionRead(serialization::PreprocessedEntityID PPID,
+                           MacroDefinition *MD) override {
     if (Previous)
       Previous->MacroDefinitionRead(PPID, MD);
   }
@@ -76,7 +77,7 @@
   explicit DeserializedDeclsDumper(ASTDeserializationListener *Previous)
     : DelegatingDeserializationListener(Previous) { }
 
-  virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+  void DeclRead(serialization::DeclID ID, const Decl *D) override {
     llvm::outs() << "PCH DECL: " << D->getDeclKindName();
     if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
       llvm::outs() << " - " << *ND;
@@ -99,7 +100,7 @@
     : DelegatingDeserializationListener(Previous),
       Ctx(Ctx), NamesToCheck(NamesToCheck) { }
 
-  virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+  void DeclRead(serialization::DeclID ID, const Decl *D) override {
     if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
       if (NamesToCheck.find(ND->getNameAsString()) != NamesToCheck.end()) {
         unsigned DiagID
@@ -147,7 +148,7 @@
         ie = FrontendPluginRegistry::end();
         it != ie; ++it) {
       if (it->getName() == CI.getFrontendOpts().AddPluginActions[i]) {
-        OwningPtr<PluginASTAction> P(it->instantiate());
+        std::unique_ptr<PluginASTAction> P(it->instantiate());
         FrontendAction* c = P.get();
         if (P->ParseArgs(CI, CI.getFrontendOpts().AddPluginArgs[i]))
           Consumers.push_back(c->CreateASTConsumer(CI, InFile));
@@ -158,7 +159,6 @@
   return new MultiplexConsumer(Consumers);
 }
 
-
 bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
                                      const FrontendInputFile &Input) {
   assert(!Instance && "Already processing a source file!");
@@ -180,7 +180,7 @@
            "This action does not have AST file support!");
 
     IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
-    std::string Error;
+
     ASTUnit *AST = ASTUnit::LoadFromASTFile(InputFile, Diags,
                                             CI.getFileSystemOpts());
     if (!AST)
@@ -211,6 +211,32 @@
     return true;
   }
 
+  if (!CI.getHeaderSearchOpts().VFSOverlayFiles.empty()) {
+    IntrusiveRefCntPtr<vfs::OverlayFileSystem>
+        Overlay(new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
+    // earlier vfs files are on the bottom
+    const std::vector<std::string> &Files =
+        CI.getHeaderSearchOpts().VFSOverlayFiles;
+    for (std::vector<std::string>::const_iterator I = Files.begin(),
+                                                  E = Files.end();
+         I != E; ++I) {
+      std::unique_ptr<llvm::MemoryBuffer> Buffer;
+      if (llvm::errc::success != llvm::MemoryBuffer::getFile(*I, Buffer)) {
+        CI.getDiagnostics().Report(diag::err_missing_vfs_overlay_file) << *I;
+        goto failure;
+      }
+
+      IntrusiveRefCntPtr<vfs::FileSystem> FS =
+          vfs::getVFSFromYAML(Buffer.release(), /*DiagHandler*/ 0);
+      if (!FS.getPtr()) {
+        CI.getDiagnostics().Report(diag::err_invalid_vfs_overlay) << *I;
+        goto failure;
+      }
+      Overlay->pushOverlay(FS);
+    }
+    CI.setVirtualFileSystem(Overlay);
+  }
+
   // Set up the file and source managers, if needed.
   if (!CI.hasFileManager())
     CI.createFileManager();
@@ -265,7 +291,7 @@
   }
 
   // Set up the preprocessor.
-  CI.createPreprocessor();
+  CI.createPreprocessor(getTranslationUnitKind());
 
   // Inform the diagnostic client we are processing a source file.
   CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(),
@@ -281,8 +307,8 @@
   if (!usesPreprocessorOnly()) {
     CI.createASTContext();
 
-    OwningPtr<ASTConsumer> Consumer(
-                                   CreateWrappedASTConsumer(CI, InputFile));
+    std::unique_ptr<ASTConsumer> Consumer(
+        CreateWrappedASTConsumer(CI, InputFile));
     if (!Consumer)
       goto failure;
 
@@ -290,12 +316,11 @@
     
     if (!CI.getPreprocessorOpts().ChainedIncludes.empty()) {
       // Convert headers to PCH and chain them.
-      OwningPtr<ExternalASTSource> source;
-      source.reset(ChainedIncludesSource::create(CI));
+      IntrusiveRefCntPtr<ChainedIncludesSource> source;
+      source = ChainedIncludesSource::create(CI);
       if (!source)
         goto failure;
-      CI.setModuleManager(static_cast<ASTReader*>(
-         &static_cast<ChainedIncludesSource*>(source.get())->getFinalReader()));
+      CI.setModuleManager(static_cast<ASTReader*>(&source->getFinalReader()));
       CI.getASTContext().setExternalSource(source);
 
     } else if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
@@ -318,7 +343,7 @@
         goto failure;
     }
 
-    CI.setASTConsumer(Consumer.take());
+    CI.setASTConsumer(Consumer.release());
     if (!CI.hasASTConsumer())
       goto failure;
   }
@@ -335,7 +360,7 @@
   // provides the layouts from that file.
   if (!CI.getFrontendOpts().OverrideRecordLayoutsFile.empty() && 
       CI.hasASTContext() && !CI.getASTContext().getExternalSource()) {
-    OwningPtr<ExternalASTSource> 
+    IntrusiveRefCntPtr<ExternalASTSource> 
       Override(new LayoutOverrideSource(
                      CI.getFrontendOpts().OverrideRecordLayoutsFile));
     CI.getASTContext().setExternalSource(Override);
@@ -403,9 +428,9 @@
   //
   // FIXME: There is more per-file stuff we could just drop here?
   if (CI.getFrontendOpts().DisableFree) {
-    CI.takeASTConsumer();
+    BuryPointer(CI.takeASTConsumer());
     if (!isCurrentFileAST()) {
-      CI.takeSema();
+      BuryPointer(CI.takeSema());
       CI.resetAndLeakASTContext();
     }
   } else {
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index a3ab1be..2657c98 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -21,11 +21,11 @@
 #include "clang/Parse/Parser.h"
 #include "clang/Serialization/ASTReader.h"
 #include "clang/Serialization/ASTWriter.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/system_error.h"
+#include <memory>
 
 using namespace clang;
 
@@ -128,21 +128,38 @@
   return Includes;
 }
 
-static void addHeaderInclude(StringRef HeaderName,
-                             SmallVectorImpl<char> &Includes,
-                             const LangOptions &LangOpts) {
+static llvm::error_code addHeaderInclude(StringRef HeaderName,
+                                         SmallVectorImpl<char> &Includes,
+                                         const LangOptions &LangOpts,
+                                         bool IsExternC) {
+  if (IsExternC && LangOpts.CPlusPlus)
+    Includes += "extern \"C\" {\n";
   if (LangOpts.ObjC1)
     Includes += "#import \"";
   else
     Includes += "#include \"";
-  Includes += HeaderName;
+  // Use an absolute path for the include; there's no reason to think that
+  // a relative path will work (. might not be on our include path) or that
+  // it will find the same file.
+  if (llvm::sys::path::is_absolute(HeaderName)) {
+    Includes += HeaderName;
+  } else {
+    SmallString<256> Header = HeaderName;
+    if (llvm::error_code Err = llvm::sys::fs::make_absolute(Header))
+      return Err;
+    Includes += Header;
+  }
   Includes += "\"\n";
+  if (IsExternC && LangOpts.CPlusPlus)
+    Includes += "}\n";
+  return llvm::error_code::success();
 }
 
-static void addHeaderInclude(const FileEntry *Header,
-                             SmallVectorImpl<char> &Includes,
-                             const LangOptions &LangOpts) {
-  addHeaderInclude(Header->getName(), Includes, LangOpts);
+static llvm::error_code addHeaderInclude(const FileEntry *Header,
+                                         SmallVectorImpl<char> &Includes,
+                                         const LangOptions &LangOpts,
+                                         bool IsExternC) {
+  return addHeaderInclude(Header->getName(), Includes, LangOpts, IsExternC);
 }
 
 /// \brief Collect the set of header includes needed to construct the given 
@@ -152,20 +169,21 @@
 ///
 /// \param Includes Will be augmented with the set of \#includes or \#imports
 /// needed to load all of the named headers.
-static void collectModuleHeaderIncludes(const LangOptions &LangOpts,
-                                        FileManager &FileMgr,
-                                        ModuleMap &ModMap,
-                                        clang::Module *Module,
-                                        SmallVectorImpl<char> &Includes) {
+static llvm::error_code
+collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
+                            ModuleMap &ModMap, clang::Module *Module,
+                            SmallVectorImpl<char> &Includes) {
   // Don't collect any headers for unavailable modules.
   if (!Module->isAvailable())
-    return;
+    return llvm::error_code::success();
 
   // Add includes for each of these headers.
   for (unsigned I = 0, N = Module->NormalHeaders.size(); I != N; ++I) {
     const FileEntry *Header = Module->NormalHeaders[I];
     Module->addTopHeader(Header);
-    addHeaderInclude(Header, Includes, LangOpts);
+    if (llvm::error_code Err =
+            addHeaderInclude(Header, Includes, LangOpts, Module->IsExternC))
+      return Err;
   }
   // Note that Module->PrivateHeaders will not be a TopHeader.
 
@@ -173,7 +191,9 @@
     Module->addTopHeader(UmbrellaHeader);
     if (Module->Parent) {
       // Include the umbrella header for submodules.
-      addHeaderInclude(UmbrellaHeader, Includes, LangOpts);
+      if (llvm::error_code Err = addHeaderInclude(UmbrellaHeader, Includes,
+                                                  LangOpts, Module->IsExternC))
+        return Err;
     }
   } else if (const DirectoryEntry *UmbrellaDir = Module->getUmbrellaDir()) {
     // Add all of the headers we find in this subdirectory.
@@ -198,16 +218,25 @@
         Module->addTopHeader(Header);
       }
       
-      // Include this header umbrella header for submodules.
-      addHeaderInclude(Dir->path(), Includes, LangOpts);
+      // Include this header as part of the umbrella directory.
+      if (llvm::error_code Err = addHeaderInclude(Dir->path(), Includes,
+                                                  LangOpts, Module->IsExternC))
+        return Err;
     }
+
+    if (EC)
+      return EC;
   }
   
   // Recurse into submodules.
   for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
                                       SubEnd = Module->submodule_end();
        Sub != SubEnd; ++Sub)
-    collectModuleHeaderIncludes(LangOpts, FileMgr, ModMap, *Sub, Includes);
+    if (llvm::error_code Err = collectModuleHeaderIncludes(
+            LangOpts, FileMgr, ModMap, *Sub, Includes))
+      return Err;
+
+  return llvm::error_code::success();
 }
 
 bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI, 
@@ -234,7 +263,15 @@
     // map with a single module (the common case).
     return false;
   }
-  
+
+  // If we're being run from the command-line, the module build stack will not
+  // have been filled in yet, so complete it now in order to allow us to detect
+  // module cycles.
+  SourceManager &SourceMgr = CI.getSourceManager();
+  if (SourceMgr.getModuleBuildStack().empty())
+    SourceMgr.pushModuleBuildStack(CI.getLangOpts().CurrentModule,
+                                   FullSourceLoc(SourceLocation(), SourceMgr));
+
   // Dig out the module definition.
   Module = HS.lookupModule(CI.getLangOpts().CurrentModule, 
                            /*AllowSearch=*/false);
@@ -247,10 +284,17 @@
 
   // Check whether we can build this module at all.
   clang::Module::Requirement Requirement;
-  if (!Module->isAvailable(CI.getLangOpts(), CI.getTarget(), Requirement)) {
-    CI.getDiagnostics().Report(diag::err_module_unavailable)
-      << Module->getFullModuleName()
-      << Requirement.second << Requirement.first;
+  clang::Module::HeaderDirective MissingHeader;
+  if (!Module->isAvailable(CI.getLangOpts(), CI.getTarget(), Requirement,
+                           MissingHeader)) {
+    if (MissingHeader.FileNameLoc.isValid()) {
+      CI.getDiagnostics().Report(diag::err_module_header_missing)
+        << MissingHeader.IsUmbrella << MissingHeader.FileName;
+    } else {
+      CI.getDiagnostics().Report(diag::err_module_unavailable)
+        << Module->getFullModuleName()
+        << Requirement.second << Requirement.first;
+    }
 
     return false;
   }
@@ -259,16 +303,26 @@
 
   // Collect the set of #includes we need to build the module.
   SmallString<256> HeaderContents;
+  llvm::error_code Err = llvm::error_code::success();
   if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader())
-    addHeaderInclude(UmbrellaHeader, HeaderContents, CI.getLangOpts());
-  collectModuleHeaderIncludes(CI.getLangOpts(), FileMgr,
-    CI.getPreprocessor().getHeaderSearchInfo().getModuleMap(),
-    Module, HeaderContents);
+    Err = addHeaderInclude(UmbrellaHeader, HeaderContents, CI.getLangOpts(),
+                           Module->IsExternC);
+  if (!Err)
+    Err = collectModuleHeaderIncludes(
+        CI.getLangOpts(), FileMgr,
+        CI.getPreprocessor().getHeaderSearchInfo().getModuleMap(), Module,
+        HeaderContents);
+
+  if (Err) {
+    CI.getDiagnostics().Report(diag::err_module_cannot_create_includes)
+      << Module->getFullModuleName() << Err.message();
+    return false;
+  }
 
   llvm::MemoryBuffer *InputBuffer =
       llvm::MemoryBuffer::getMemBufferCopy(HeaderContents,
                                            Module::getModuleInputBufferName());
-  // Ownership of InputBuffer will be transfered to the SourceManager.
+  // Ownership of InputBuffer will be transferred to the SourceManager.
   setCurrentInput(FrontendInputFile(InputBuffer, getCurrentFileKind(),
                                     Module->IsSystem));
   return true;
@@ -313,6 +367,30 @@
   return new ASTConsumer();
 }
 
+ASTConsumer *VerifyPCHAction::CreateASTConsumer(CompilerInstance &CI,
+                                                StringRef InFile) {
+  return new ASTConsumer();
+}
+
+void VerifyPCHAction::ExecuteAction() {
+  CompilerInstance &CI = getCompilerInstance();
+  bool Preamble = CI.getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
+  const std::string &Sysroot = CI.getHeaderSearchOpts().Sysroot;
+  std::unique_ptr<ASTReader> Reader(
+      new ASTReader(CI.getPreprocessor(), CI.getASTContext(),
+                    Sysroot.empty() ? "" : Sysroot.c_str(),
+                    /*DisableValidation*/ false,
+                    /*AllowPCHWithCompilerErrors*/ false,
+                    /*AllowConfigurationMismatch*/ true,
+                    /*ValidateSystemInputs*/ true));
+
+  Reader->ReadAST(getCurrentFile(),
+                  Preamble ? serialization::MK_Preamble
+                           : serialization::MK_PCH,
+                  SourceLocation(),
+                  ASTReader::ARR_ConfigurationMismatch);
+}
+
 namespace {
   /// \brief AST reader listener that dumps module information for a module
   /// file.
@@ -325,7 +403,7 @@
 #define DUMP_BOOLEAN(Value, Text)                       \
     Out.indent(4) << Text << ": " << (Value? "Yes" : "No") << "\n"
 
-    virtual bool ReadFullVersionInformation(StringRef FullVersion) {
+    bool ReadFullVersionInformation(StringRef FullVersion) override {
       Out.indent(2)
         << "Generated by "
         << (FullVersion == getClangFullRepositoryVersion()? "this"
@@ -334,8 +412,8 @@
       return ASTReaderListener::ReadFullVersionInformation(FullVersion);
     }
 
-    virtual bool ReadLanguageOptions(const LangOptions &LangOpts,
-                                     bool Complain) {
+    bool ReadLanguageOptions(const LangOptions &LangOpts,
+                             bool Complain) override {
       Out.indent(2) << "Language options:\n";
 #define LANGOPT(Name, Bits, Default, Description) \
       DUMP_BOOLEAN(LangOpts.Name, Description);
@@ -350,13 +428,12 @@
       return false;
     }
 
-    virtual bool ReadTargetOptions(const TargetOptions &TargetOpts,
-                                   bool Complain) {
+    bool ReadTargetOptions(const TargetOptions &TargetOpts,
+                           bool Complain) override {
       Out.indent(2) << "Target options:\n";
       Out.indent(4) << "  Triple: " << TargetOpts.Triple << "\n";
       Out.indent(4) << "  CPU: " << TargetOpts.CPU << "\n";
       Out.indent(4) << "  ABI: " << TargetOpts.ABI << "\n";
-      Out.indent(4) << "  C++ ABI: " << TargetOpts.CXXABI << "\n";
       Out.indent(4) << "  Linker version: " << TargetOpts.LinkerVersion << "\n";
 
       if (!TargetOpts.FeaturesAsWritten.empty()) {
@@ -370,8 +447,8 @@
       return false;
     }
 
-    virtual bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
-                                         bool Complain) {
+    bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
+                                 bool Complain) override {
       Out.indent(2) << "Header search options:\n";
       Out.indent(4) << "System root [-isysroot=]: '" << HSOpts.Sysroot << "'\n";
       DUMP_BOOLEAN(HSOpts.UseBuiltinIncludes,
@@ -385,9 +462,9 @@
       return false;
     }
 
-    virtual bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
-                                         bool Complain,
-                                         std::string &SuggestedPredefines) {
+    bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+                                 bool Complain,
+                                 std::string &SuggestedPredefines) override {
       Out.indent(2) << "Preprocessor options:\n";
       DUMP_BOOLEAN(PPOpts.UsePredefines,
                    "Uses compiler/target-specific predefines [-undef]");
@@ -416,12 +493,12 @@
 
 void DumpModuleInfoAction::ExecuteAction() {
   // Set up the output file.
-  llvm::OwningPtr<llvm::raw_fd_ostream> OutFile;
+  std::unique_ptr<llvm::raw_fd_ostream> OutFile;
   StringRef OutputFileName = getCompilerInstance().getFrontendOpts().OutputFile;
   if (!OutputFileName.empty() && OutputFileName != "-") {
     std::string ErrorInfo;
     OutFile.reset(new llvm::raw_fd_ostream(OutputFileName.str().c_str(),
-                                           ErrorInfo));
+                                           ErrorInfo, llvm::sys::fs::F_Text));
   }
   llvm::raw_ostream &Out = OutFile.get()? *OutFile.get() : llvm::outs();
 
diff --git a/lib/Frontend/HeaderIncludeGen.cpp b/lib/Frontend/HeaderIncludeGen.cpp
index 237e5b1..6d2e378 100644
--- a/lib/Frontend/HeaderIncludeGen.cpp
+++ b/lib/Frontend/HeaderIncludeGen.cpp
@@ -40,9 +40,9 @@
       delete OutputFile;
   }
 
-  virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
-                           SrcMgr::CharacteristicKind FileType,
-                           FileID PrevFID);
+  void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+                   SrcMgr::CharacteristicKind FileType,
+                   FileID PrevFID) override;
 };
 }
 
@@ -56,7 +56,8 @@
   if (!OutputPath.empty()) {
     std::string Error;
     llvm::raw_fd_ostream *OS = new llvm::raw_fd_ostream(
-        OutputPath.str().c_str(), Error, llvm::sys::fs::F_Append);
+        OutputPath.str().c_str(), Error,
+        llvm::sys::fs::F_Append | llvm::sys::fs::F_Text);
     if (!Error.empty()) {
       PP.getDiagnostics().Report(
         clang::diag::warn_fe_cc_print_header_failure) << Error;
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index d144cbb..34c01b4 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -110,7 +110,7 @@
 }  // end anonymous namespace.
 
 static bool CanPrefixSysroot(StringRef Path) {
-#if defined(_WIN32)
+#if defined(LLVM_ON_WIN32)
   return !Path.empty() && llvm::sys::path::is_separator(Path[0]);
 #else
   return llvm::sys::path::is_absolute(Path);
@@ -268,7 +268,6 @@
 
   switch (os) {
   case llvm::Triple::Linux:
-  case llvm::Triple::Win32:
     llvm_unreachable("Include management is handled in the driver.");
 
   case llvm::Triple::Haiku:
@@ -307,10 +306,13 @@
     break;
   case llvm::Triple::RTEMS:
     break;
-  case llvm::Triple::Cygwin:
-    AddPath("/usr/include/w32api", System, false);
-    break;
-  case llvm::Triple::MinGW32: { 
+  case llvm::Triple::Win32:
+    switch (triple.getEnvironment()) {
+    default: llvm_unreachable("Include management is handled in the driver.");
+    case llvm::Triple::Cygnus:
+      AddPath("/usr/include/w32api", System, false);
+      break;
+    case llvm::Triple::GNU:
       // mingw-w64 crt include paths
       // <sysroot>/i686-w64-mingw32/include
       SmallString<128> P = StringRef(HSOpts.ResourceDir);
@@ -328,12 +330,12 @@
       llvm::sys::path::append(P, "../../../include");
       AddPath(P.str(), System, false);
       AddPath("/mingw/include", System, false);
-#if defined(_WIN32)
+#if defined(LLVM_ON_WIN32)
       AddPath("c:/mingw/include", System, false); 
 #endif
+      break;
     }
     break;
-      
   default:
     break;
   }
@@ -376,58 +378,57 @@
       AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
                                   "arm-apple-darwin10", "v6", "", triple);
       break;
+
+    case llvm::Triple::arm64:
+      AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+                                  "arm64-apple-darwin10", "", "", triple);
+      break;
     }
     return;
   }
 
   switch (os) {
   case llvm::Triple::Linux:
-  case llvm::Triple::Win32:
     llvm_unreachable("Include management is handled in the driver.");
 
-  case llvm::Triple::Cygwin:
-    // Cygwin-1.7
-    AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.7.3");
-    AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.5.3");
-    AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.4");
-    // g++-4 / Cygwin-1.5
-    AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.2");
-    break;
-  case llvm::Triple::MinGW32:
-    // mingw-w64 C++ include paths (i686-w64-mingw32 and x86_64-w64-mingw32)
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.0");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.1");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.2");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.3");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.4");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.0");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.1");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.2");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.3");
-    AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.0");
-    // mingw.org C++ include paths
-    AddMinGWCPlusPlusIncludePaths("/mingw/lib/gcc", "mingw32", "4.5.2"); //MSYS
-#if defined(_WIN32)
-    AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.8.1");
-    AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.6.2");
-    AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.6.1");
-    AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.2");
-    AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.0");
-    AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.4.0");
-    AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.3.0");
+  case llvm::Triple::Win32:
+    switch (triple.getEnvironment()) {
+    default: llvm_unreachable("Include management is handled in the driver.");
+    case llvm::Triple::Cygnus:
+      // Cygwin-1.7
+      AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.7.3");
+      AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.5.3");
+      AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.4");
+      // g++-4 / Cygwin-1.5
+      AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.2");
+      break;
+    case llvm::Triple::GNU:
+      // mingw-w64 C++ include paths (i686-w64-mingw32 and x86_64-w64-mingw32)
+      AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.0");
+      AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.1");
+      AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.2");
+      AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.3");
+      AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.8.0");
+      AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.8.1");
+      AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.8.2");
+      // mingw.org C++ include paths
+#if defined(LLVM_ON_WIN32)
+      AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.7.0");
+      AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.7.1");
+      AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.7.2");
+      AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.7.3");
+      AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.8.0");
+      AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.8.1");
+      AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.8.2");
 #endif
-    break;
+      break;
+    }
   case llvm::Triple::DragonFly:
     if (llvm::sys::fs::exists("/usr/lib/gcc47"))
       AddPath("/usr/include/c++/4.7", CXXSystem, false);
     else
       AddPath("/usr/include/c++/4.4", CXXSystem, false);
     break;
-  case llvm::Triple::FreeBSD:
-    // FreeBSD 8.0
-    // FreeBSD 7.3
-    AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2", "", "", "", triple);
-    break;
   case llvm::Triple::OpenBSD: {
     std::string t = triple.getTriple();
     if (t.substr(0, 6) == "x86_64")
@@ -466,8 +467,13 @@
     break; // Everything else continues to use this routine's logic.
 
   case llvm::Triple::Linux:
-  case llvm::Triple::Win32:
     return;
+
+  case llvm::Triple::Win32:
+    if (triple.getEnvironment() == llvm::Triple::MSVC ||
+        triple.getEnvironment() == llvm::Triple::Itanium)
+      return;
+    break;
   }
 
   if (Lang.CPlusPlus && HSOpts.UseStandardCXXIncludes &&
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index c7d2550..761dba4 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -300,7 +300,7 @@
                                                const LangOptions &LangOpts,
                                                const FrontendOptions &FEOpts,
                                                MacroBuilder &Builder) {
-  if (!LangOpts.MicrosoftMode && !LangOpts.TraditionalCPP)
+  if (!LangOpts.MSVCCompat && !LangOpts.TraditionalCPP)
     Builder.defineMacro("__STDC__");
   if (LangOpts.Freestanding)
     Builder.defineMacro("__STDC_HOSTED__", "0");
@@ -315,9 +315,11 @@
     else if (!LangOpts.GNUMode && LangOpts.Digraphs)
       Builder.defineMacro("__STDC_VERSION__", "199409L");
   } else {
-    // FIXME: Use the right value for __cplusplus for C++1y once one is chosen.
-    if (LangOpts.CPlusPlus1y)
-      Builder.defineMacro("__cplusplus", "201305L");
+    // C++1y [cpp.predefined]p1:
+    //   The name __cplusplus is defined to the value 201402L when compiling a
+    //   C++ translation unit.
+     if (LangOpts.CPlusPlus1y)
+      Builder.defineMacro("__cplusplus", "201402L");
     // C++11 [cpp.predefined]p1:
     //   The name __cplusplus is defined to the value 201103L when compiling a
     //   C++ translation unit.
@@ -399,7 +401,7 @@
                       + getClangFullRepositoryVersion() + "\"");
 #undef TOSTR
 #undef TOSTR2
-  if (!LangOpts.MicrosoftMode) {
+  if (!LangOpts.MSVCCompat) {
     // Currently claim to be compatible with GCC 4.2.1-5621, but only if we're
     // not compiling for MSVC compatibility
     Builder.defineMacro("__GNUC_MINOR__", "2");
@@ -515,11 +517,6 @@
       Builder.defineMacro("_WCHAR_T_DEFINED");
       Builder.defineMacro("_NATIVE_WCHAR_T_DEFINED");
     }
-    if (LangOpts.CPlusPlus) {
-      // FIXME: Support Microsoft's __identifier extension in the lexer.
-      Builder.append("#define __identifier(x) x");
-      Builder.append("class type_info;");
-    }
   }
 
   if (LangOpts.Optimize)
@@ -540,11 +537,13 @@
   Builder.defineMacro("__ORDER_LITTLE_ENDIAN__", "1234");
   Builder.defineMacro("__ORDER_BIG_ENDIAN__",    "4321");
   Builder.defineMacro("__ORDER_PDP_ENDIAN__",    "3412");
-  if (TI.isBigEndian())
+  if (TI.isBigEndian()) {
     Builder.defineMacro("__BYTE_ORDER__", "__ORDER_BIG_ENDIAN__");
-  else
+    Builder.defineMacro("__BIG_ENDIAN__");
+  } else {
     Builder.defineMacro("__BYTE_ORDER__", "__ORDER_LITTLE_ENDIAN__");
-
+    Builder.defineMacro("__LITTLE_ENDIAN__");
+  }
 
   if (TI.getPointerWidth(0) == 64 && TI.getLongWidth() == 64
       && TI.getIntWidth() == 32) {
@@ -693,8 +692,10 @@
 
   if (LangOpts.getStackProtector() == LangOptions::SSPOn)
     Builder.defineMacro("__SSP__");
+  else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
+    Builder.defineMacro("__SSP_STRONG__", "2");
   else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
-    Builder.defineMacro("__SSP_ALL__", "2");
+    Builder.defineMacro("__SSP_ALL__", "3");
 
   if (FEOpts.ProgramAction == frontend::RewriteObjC)
     Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
@@ -717,12 +718,12 @@
 
   // OpenMP definition
   if (LangOpts.OpenMP) {
-    // OpenMP 2.2: 
+    // OpenMP 2.2:
     //   In implementations that support a preprocessor, the _OPENMP
     //   macro name is defined to have the decimal value yyyymm where
     //   yyyy and mm are the year and the month designations of the
     //   version of the OpenMP API that the implementation support.
-    Builder.defineMacro("_OPENMP", "201107");
+    Builder.defineMacro("_OPENMP", "201307");
   }
 
   // Get other target #defines.
diff --git a/lib/Frontend/LogDiagnosticPrinter.cpp b/lib/Frontend/LogDiagnosticPrinter.cpp
index 2189b86..53b13fc 100644
--- a/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -31,6 +31,7 @@
 static StringRef getLevelName(DiagnosticsEngine::Level Level) {
   switch (Level) {
   case DiagnosticsEngine::Ignored: return "ignored";
+  case DiagnosticsEngine::Remark:  return "remark";
   case DiagnosticsEngine::Note:    return "note";
   case DiagnosticsEngine::Warning: return "warning";
   case DiagnosticsEngine::Error:   return "error";
@@ -130,7 +131,7 @@
     FileID FID = SM.getMainFileID();
     if (!FID.isInvalid()) {
       const FileEntry *FE = SM.getFileEntryForID(FID);
-      if (FE && FE->getName())
+      if (FE && FE->isValid())
         MainFilename = FE->getName();
     }
   }
@@ -157,7 +158,7 @@
       FileID FID = SM.getFileID(Info.getLocation());
       if (!FID.isInvalid()) {
         const FileEntry *FE = SM.getFileEntryForID(FID);
-        if (FE && FE->getName())
+        if (FE && FE->isValid())
           DE.Filename = FE->getName();
       }
     } else {
diff --git a/lib/Frontend/MultiplexConsumer.cpp b/lib/Frontend/MultiplexConsumer.cpp
index 9cf68a5..4b4804f 100644
--- a/lib/Frontend/MultiplexConsumer.cpp
+++ b/lib/Frontend/MultiplexConsumer.cpp
@@ -30,14 +30,14 @@
   // Does NOT take ownership of the elements in L.
   MultiplexASTDeserializationListener(
       const std::vector<ASTDeserializationListener*>& L);
-  virtual void ReaderInitialized(ASTReader *Reader);
-  virtual void IdentifierRead(serialization::IdentID ID,
-                              IdentifierInfo *II);
-  virtual void TypeRead(serialization::TypeIdx Idx, QualType T);
-  virtual void DeclRead(serialization::DeclID ID, const Decl *D);
-  virtual void SelectorRead(serialization::SelectorID iD, Selector Sel);
-  virtual void MacroDefinitionRead(serialization::PreprocessedEntityID, 
-                                   MacroDefinition *MD);
+  void ReaderInitialized(ASTReader *Reader) override;
+  void IdentifierRead(serialization::IdentID ID,
+                      IdentifierInfo *II) override;
+  void TypeRead(serialization::TypeIdx Idx, QualType T) override;
+  void DeclRead(serialization::DeclID ID, const Decl *D) override;
+  void SelectorRead(serialization::SelectorID iD, Selector Sel) override;
+  void MacroDefinitionRead(serialization::PreprocessedEntityID,
+                           MacroDefinition *MD) override;
 private:
   std::vector<ASTDeserializationListener*> Listeners;
 };
@@ -89,25 +89,24 @@
 public:
   // Does NOT take ownership of the elements in L.
   MultiplexASTMutationListener(ArrayRef<ASTMutationListener*> L);
-  virtual void CompletedTagDefinition(const TagDecl *D);
-  virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D);
-  virtual void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D);
-  virtual void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
-                                    const ClassTemplateSpecializationDecl *D);
-  virtual void
-  AddedCXXTemplateSpecialization(const VarTemplateDecl *TD,
-                                 const VarTemplateSpecializationDecl *D);
-  virtual void AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
-                                              const FunctionDecl *D);
-  virtual void DeducedReturnType(const FunctionDecl *FD, QualType ReturnType);
-  virtual void CompletedImplicitDefinition(const FunctionDecl *D);
-  virtual void StaticDataMemberInstantiated(const VarDecl *D);
-  virtual void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
-                                            const ObjCInterfaceDecl *IFD);
-  virtual void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
-                                            const ObjCPropertyDecl *OrigProp,
-                                            const ObjCCategoryDecl *ClassExt);
-  void DeclarationMarkedUsed(const Decl *D) LLVM_OVERRIDE;
+  void CompletedTagDefinition(const TagDecl *D) override;
+  void AddedVisibleDecl(const DeclContext *DC, const Decl *D) override;
+  void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) override;
+  void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+                            const ClassTemplateSpecializationDecl *D) override;
+  void AddedCXXTemplateSpecialization(const VarTemplateDecl *TD,
+                               const VarTemplateSpecializationDecl *D) override;
+  void AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
+                                      const FunctionDecl *D) override;
+  void DeducedReturnType(const FunctionDecl *FD, QualType ReturnType) override;
+  void CompletedImplicitDefinition(const FunctionDecl *D) override;
+  void StaticDataMemberInstantiated(const VarDecl *D) override;
+  void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
+                                    const ObjCInterfaceDecl *IFD) override;
+  void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+                                    const ObjCPropertyDecl *OrigProp,
+                                    const ObjCCategoryDecl *ClassExt) override;
+  void DeclarationMarkedUsed(const Decl *D) override;
 
 private:
   std::vector<ASTMutationListener*> Listeners;
@@ -184,10 +183,9 @@
 
 }  // end namespace clang
 
-
-MultiplexConsumer::MultiplexConsumer(ArrayRef<ASTConsumer*> C)
-    : Consumers(C.begin(), C.end()),
-      MutationListener(0), DeserializationListener(0) {
+MultiplexConsumer::MultiplexConsumer(ArrayRef<ASTConsumer *> C)
+    : Consumers(C.begin(), C.end()), MutationListener(),
+      DeserializationListener() {
   // Collect the mutation listeners and deserialization listeners of all
   // children, and create a multiplex listener each if so.
   std::vector<ASTMutationListener*> mutationListeners;
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index 55a66d8..349401a 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -123,39 +123,27 @@
   }
 
   bool startNewLineIfNeeded(bool ShouldUpdateCurrentLine = true);
-  
-  virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
-                           SrcMgr::CharacteristicKind FileType,
-                           FileID PrevFID);
-  virtual void InclusionDirective(SourceLocation HashLoc,
-                                  const Token &IncludeTok,
-                                  StringRef FileName,
-                                  bool IsAngled,
-                                  CharSourceRange FilenameRange,
-                                  const FileEntry *File,
-                                  StringRef SearchPath,
-                                  StringRef RelativePath,
-                                  const Module *Imported);
-  virtual void Ident(SourceLocation Loc, const std::string &str);
-  virtual void PragmaCaptured(SourceLocation Loc, StringRef Str);
-  virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind,
-                             const std::string &Str);
-  virtual void PragmaDetectMismatch(SourceLocation Loc,
-                                    const std::string &Name,
-                                    const std::string &Value);
-  virtual void PragmaMessage(SourceLocation Loc, StringRef Namespace,
-                             PragmaMessageKind Kind, StringRef Str);
-  virtual void PragmaDebug(SourceLocation Loc, StringRef DebugType);
-  virtual void PragmaDiagnosticPush(SourceLocation Loc,
-                                    StringRef Namespace);
-  virtual void PragmaDiagnosticPop(SourceLocation Loc,
-                                   StringRef Namespace);
-  virtual void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
-                                diag::Mapping Map, StringRef Str);
-  virtual void PragmaWarning(SourceLocation Loc, StringRef WarningSpec,
-                             ArrayRef<int> Ids);
-  virtual void PragmaWarningPush(SourceLocation Loc, int Level);
-  virtual void PragmaWarningPop(SourceLocation Loc);
+
+  void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+                   SrcMgr::CharacteristicKind FileType,
+                   FileID PrevFID) override;
+  void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+                          StringRef FileName, bool IsAngled,
+                          CharSourceRange FilenameRange, const FileEntry *File,
+                          StringRef SearchPath, StringRef RelativePath,
+                          const Module *Imported) override;
+  void Ident(SourceLocation Loc, const std::string &str) override;
+  void PragmaMessage(SourceLocation Loc, StringRef Namespace,
+                     PragmaMessageKind Kind, StringRef Str) override;
+  void PragmaDebug(SourceLocation Loc, StringRef DebugType) override;
+  void PragmaDiagnosticPush(SourceLocation Loc, StringRef Namespace) override;
+  void PragmaDiagnosticPop(SourceLocation Loc, StringRef Namespace) override;
+  void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
+                        diag::Mapping Map, StringRef Str) override;
+  void PragmaWarning(SourceLocation Loc, StringRef WarningSpec,
+                     ArrayRef<int> Ids) override;
+  void PragmaWarningPush(SourceLocation Loc, int Level) override;
+  void PragmaWarningPop(SourceLocation Loc) override;
 
   bool HandleFirstTokOnLine(Token &Tok);
 
@@ -179,10 +167,12 @@
   void HandleNewlinesInToken(const char *TokStr, unsigned Len);
 
   /// MacroDefined - This hook is called whenever a macro definition is seen.
-  void MacroDefined(const Token &MacroNameTok, const MacroDirective *MD);
+  void MacroDefined(const Token &MacroNameTok,
+                    const MacroDirective *MD) override;
 
   /// MacroUndefined - This hook is called whenever a macro #undef is seen.
-  void MacroUndefined(const Token &MacroNameTok, const MacroDirective *MD);
+  void MacroUndefined(const Token &MacroNameTok,
+                      const MacroDirective *MD) override;
 };
 }  // end anonymous namespace
 
@@ -355,15 +345,6 @@
   EmittedTokensOnThisLine = true;
 }
 
-void PrintPPOutputPPCallbacks::PragmaCaptured(SourceLocation Loc,
-                                              StringRef Str) {
-  startNewLineIfNeeded();
-  MoveToLine(Loc);
-  OS << "#pragma captured";
-
-  setEmittedDirectiveOnThisLine();
-}
-
 /// MacroDefined - This hook is called whenever a macro definition is seen.
 void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
                                             const MacroDirective *MD) {
@@ -402,36 +383,6 @@
     }
 }
 
-void PrintPPOutputPPCallbacks::PragmaComment(SourceLocation Loc,
-                                             const IdentifierInfo *Kind,
-                                             const std::string &Str) {
-  startNewLineIfNeeded();
-  MoveToLine(Loc);
-  OS << "#pragma comment(" << Kind->getName();
-
-  if (!Str.empty()) {
-    OS << ", \"";
-    outputPrintable(OS, Str);
-    OS << '"';
-  }
-
-  OS << ')';
-  setEmittedDirectiveOnThisLine();
-}
-
-void PrintPPOutputPPCallbacks::PragmaDetectMismatch(SourceLocation Loc,
-                                                    const std::string &Name,
-                                                    const std::string &Value) {
-  startNewLineIfNeeded();
-  MoveToLine(Loc);
-  OS << "#pragma detect_mismatch(\"" << Name << '"';
-  outputPrintable(OS, Name);
-  OS << "\", \"";
-  outputPrintable(OS, Value);
-  OS << "\")";
-  setEmittedDirectiveOnThisLine();
-}
-
 void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
                                              StringRef Namespace,
                                              PragmaMessageKind Kind,
@@ -494,6 +445,9 @@
   MoveToLine(Loc);
   OS << "#pragma " << Namespace << " diagnostic ";
   switch (Map) {
+  case diag::MAP_REMARK:
+    OS << "remark";
+    break;
   case diag::MAP_WARNING:
     OS << "warning";
     break;
@@ -556,6 +510,13 @@
   // indented for easy reading.
   unsigned ColNo = SM.getExpansionColumnNumber(Tok.getLocation());
 
+  // The first token on a line can have a column number of 1, yet still expect
+  // leading white space, if a macro expansion in column 1 starts with an empty
+  // macro argument, or an empty nested macro expansion. In this case, move the
+  // token to column 2.
+  if (ColNo == 1 && Tok.hasLeadingSpace())
+    ColNo = 2;
+
   // This hack prevents stuff like:
   // #define HASH #
   // HASH define foo bar
@@ -602,8 +563,8 @@
 
   UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks)
     : Prefix(prefix), Callbacks(callbacks) {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &PragmaTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &PragmaTok) override {
     // Figure out what line we went to and insert the appropriate number of
     // newline characters.
     Callbacks->startNewLineIfNeeded();
@@ -615,7 +576,13 @@
         Callbacks->OS << ' ';
       std::string TokSpell = PP.getSpelling(PragmaTok);
       Callbacks->OS.write(&TokSpell[0], TokSpell.size());
-      PP.LexUnexpandedToken(PragmaTok);
+
+      // Expand macros in pragmas with -fms-extensions.  The assumption is that
+      // the majority of pragmas in such a file will be Microsoft pragmas.
+      if (PP.getLangOpts().MicrosoftExt)
+        PP.Lex(PragmaTok);
+      else
+        PP.LexUnexpandedToken(PragmaTok);
     }
     Callbacks->setEmittedDirectiveOnThisLine();
   }
@@ -657,7 +624,9 @@
       // -traditional-cpp the lexer keeps /all/ whitespace, including comments.
       SourceLocation StartLoc = Tok.getLocation();
       Callbacks->MoveToLine(StartLoc.getLocWithOffset(Tok.getLength()));
-    } else if (Tok.is(tok::annot_module_include)) {
+    } else if (Tok.is(tok::annot_module_include) ||
+               Tok.is(tok::annot_module_begin) ||
+               Tok.is(tok::annot_module_end)) {
       // PrintPPOutputPPCallbacks::InclusionDirective handles producing
       // appropriate output here. Ignore this token entirely.
       PP.Lex(Tok);
diff --git a/lib/Frontend/SerializedDiagnosticPrinter.cpp b/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 6514321..527f727 100644
--- a/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -59,32 +59,32 @@
   virtual ~SDiagsRenderer() {}
   
 protected:
-  virtual void emitDiagnosticMessage(SourceLocation Loc,
-                                     PresumedLoc PLoc,
-                                     DiagnosticsEngine::Level Level,
-                                     StringRef Message,
-                                     ArrayRef<CharSourceRange> Ranges,
-                                     const SourceManager *SM,
-                                     DiagOrStoredDiag D);
-  
-  virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
-                                 DiagnosticsEngine::Level Level,
-                                 ArrayRef<CharSourceRange> Ranges,
-                                 const SourceManager &SM) {}
+  void emitDiagnosticMessage(SourceLocation Loc,
+                             PresumedLoc PLoc,
+                             DiagnosticsEngine::Level Level,
+                             StringRef Message,
+                             ArrayRef<CharSourceRange> Ranges,
+                             const SourceManager *SM,
+                             DiagOrStoredDiag D) override;
 
-  virtual void emitNote(SourceLocation Loc, StringRef Message,
-                        const SourceManager *SM);
+  void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+                         DiagnosticsEngine::Level Level,
+                         ArrayRef<CharSourceRange> Ranges,
+                         const SourceManager &SM) override {}
 
-  virtual void emitCodeContext(SourceLocation Loc,
-                               DiagnosticsEngine::Level Level,
-                               SmallVectorImpl<CharSourceRange>& Ranges,
-                               ArrayRef<FixItHint> Hints,
-                               const SourceManager &SM);
+  void emitNote(SourceLocation Loc, StringRef Message,
+                const SourceManager *SM) override;
 
-  virtual void beginDiagnostic(DiagOrStoredDiag D,
-                               DiagnosticsEngine::Level Level);
-  virtual void endDiagnostic(DiagOrStoredDiag D,
-                             DiagnosticsEngine::Level Level);
+  void emitCodeContext(SourceLocation Loc,
+                       DiagnosticsEngine::Level Level,
+                       SmallVectorImpl<CharSourceRange>& Ranges,
+                       ArrayRef<FixItHint> Hints,
+                       const SourceManager &SM) override;
+
+  void beginDiagnostic(DiagOrStoredDiag D,
+                       DiagnosticsEngine::Level Level) override;
+  void endDiagnostic(DiagOrStoredDiag D,
+                     DiagnosticsEngine::Level Level) override;
 };
   
 class SDiagsWriter : public DiagnosticConsumer {
@@ -103,16 +103,15 @@
   }
 
   ~SDiagsWriter() {}
-  
+
   void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
-                        const Diagnostic &Info);
-  
-  void BeginSourceFile(const LangOptions &LO,
-                       const Preprocessor *PP) {
+                        const Diagnostic &Info) override;
+
+  void BeginSourceFile(const LangOptions &LO, const Preprocessor *PP) override {
     LangOpts = &LO;
   }
 
-  virtual void finish();
+  void finish() override;
 
 private:
   /// \brief Emit the preamble for the serialized diagnostics.
@@ -174,7 +173,7 @@
                                   const SourceManager &SM);
 
   /// \brief The version of the diagnostics file.
-  enum { Version = 1 };
+  enum { Version = 2 };
 
   /// \brief Language options, which can differ from one clone of this client
   /// to another.
@@ -200,7 +199,7 @@
     llvm::BitstreamWriter Stream;
 
     /// \brief The name of the diagnostics file.
-    OwningPtr<raw_ostream> OS;
+    std::unique_ptr<raw_ostream> OS;
 
     /// \brief The set of constructed record abbreviations.
     AbbreviationMap Abbrevs;
@@ -566,6 +565,21 @@
                           &Info);
 }
 
+static serialized_diags::Level getStableLevel(DiagnosticsEngine::Level Level) {
+  switch (Level) {
+#define CASE(X) case DiagnosticsEngine::X: return serialized_diags::X;
+  CASE(Ignored)
+  CASE(Note)
+  CASE(Remark)
+  CASE(Warning)
+  CASE(Error)
+  CASE(Fatal)
+#undef CASE
+  }
+
+  llvm_unreachable("invalid diagnostic level");
+}
+
 void SDiagsWriter::EmitDiagnosticMessage(SourceLocation Loc,
                                          PresumedLoc PLoc,
                                          DiagnosticsEngine::Level Level,
@@ -579,7 +593,7 @@
   // Emit the RECORD_DIAG record.
   Record.clear();
   Record.push_back(RECORD_DIAG);
-  Record.push_back(Level);
+  Record.push_back(getStableLevel(Level));
   AddLocToRecord(Loc, SM, PLoc, Record);
 
   if (const Diagnostic *Info = D.dyn_cast<const Diagnostic*>()) {
diff --git a/lib/Frontend/TextDiagnostic.cpp b/lib/Frontend/TextDiagnostic.cpp
index a2dc953..4c90d01 100644
--- a/lib/Frontend/TextDiagnostic.cpp
+++ b/lib/Frontend/TextDiagnostic.cpp
@@ -26,6 +26,8 @@
 
 static const enum raw_ostream::Colors noteColor =
   raw_ostream::BLACK;
+static const enum raw_ostream::Colors remarkColor =
+  raw_ostream::BLUE;
 static const enum raw_ostream::Colors fixitColor =
   raw_ostream::GREEN;
 static const enum raw_ostream::Colors caretColor =
@@ -312,14 +314,6 @@
   SmallVector<int,200> m_byteToColumn;
   SmallVector<int,200> m_columnToByte;
 };
-
-// used in assert in selectInterestingSourceRegion()
-struct char_out_of_range {
-  const char lower,upper;
-  char_out_of_range(char lower, char upper) :
-    lower(lower), upper(upper) {}
-  bool operator()(char c) { return c < lower || upper < c; }
-};
 } // end anonymous namespace
 
 /// \brief When the source code line we want to print is too long for
@@ -339,7 +333,7 @@
   // No special characters are allowed in CaretLine.
   assert(CaretLine.end() ==
          std::find_if(CaretLine.begin(), CaretLine.end(),
-         char_out_of_range(' ','~')));
+                      [](char c) { return c < ' ' || '~' < c; }));
 
   // Find the slice that we need to display the full caret line
   // correctly.
@@ -711,6 +705,7 @@
     case DiagnosticsEngine::Ignored:
       llvm_unreachable("Invalid diagnostic type");
     case DiagnosticsEngine::Note:    OS.changeColor(noteColor, true); break;
+    case DiagnosticsEngine::Remark:  OS.changeColor(remarkColor, true); break;
     case DiagnosticsEngine::Warning: OS.changeColor(warningColor, true); break;
     case DiagnosticsEngine::Error:   OS.changeColor(errorColor, true); break;
     case DiagnosticsEngine::Fatal:   OS.changeColor(fatalColor, true); break;
@@ -721,6 +716,7 @@
   case DiagnosticsEngine::Ignored:
     llvm_unreachable("Invalid diagnostic type");
   case DiagnosticsEngine::Note:    OS << "note"; break;
+  case DiagnosticsEngine::Remark:  OS << "remark"; break;
   case DiagnosticsEngine::Warning: OS << "warning"; break;
   case DiagnosticsEngine::Error:   OS << "error"; break;
   case DiagnosticsEngine::Fatal:   OS << "fatal error"; break;
@@ -787,7 +783,7 @@
     FileID FID = SM.getFileID(Loc);
     if (!FID.isInvalid()) {
       const FileEntry* FE = SM.getFileEntryForID(FID);
-      if (FE && FE->getName()) {
+      if (FE && FE->isValid()) {
         OS << FE->getName();
         if (FE->isInPCH())
           OS << " (in PCH)";
@@ -816,7 +812,9 @@
     if (unsigned ColNo = PLoc.getColumn()) {
       if (DiagOpts->getFormat() == DiagnosticOptions::Msvc) {
         OS << ',';
-        ColNo--;
+        // Visual Studio 2010 or earlier expects column number to be off by one
+        if (LangOpts.MSCVersion && LangOpts.MSCVersion < 1700)
+          ColNo--;
       } else
         OS << ':';
       OS << ColNo;
diff --git a/lib/Frontend/TextDiagnosticBuffer.cpp b/lib/Frontend/TextDiagnosticBuffer.cpp
index 5821436..b1c793a 100644
--- a/lib/Frontend/TextDiagnosticBuffer.cpp
+++ b/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -42,36 +42,16 @@
   }
 }
 
-/// \brief Escape diagnostic texts to avoid problems when they are fed into the
-/// diagnostic formatter a second time.
-static StringRef escapeDiag(StringRef Str, SmallVectorImpl<char> &Buf) {
-  size_t Pos = Str.find('%');
-  if (Pos == StringRef::npos)
-    return Str;
-
-  // We found a '%'. Replace this and all following '%' with '%%'.
-  Buf.clear();
-  Buf.append(Str.data(), Str.data() + Pos);
-  for (size_t I = Pos, E = Str.size(); I != E; ++I) {
-    if (Str[I] == '%')
-      Buf.push_back('%');
-    Buf.push_back(Str[I]);
-  }
-
-  return StringRef(Buf.data(), Buf.size());
-}
-
 void TextDiagnosticBuffer::FlushDiagnostics(DiagnosticsEngine &Diags) const {
-  SmallVector<char, 64> Buf;
   // FIXME: Flush the diagnostics in order.
   for (const_iterator it = err_begin(), ie = err_end(); it != ie; ++it)
-    Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error,
-                                       escapeDiag(it->second, Buf)));
+    Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
+        << it->second;
   for (const_iterator it = warn_begin(), ie = warn_end(); it != ie; ++it)
-    Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Warning,
-                                       escapeDiag(it->second, Buf)));
+    Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Warning, "%0"))
+        << it->second;
   for (const_iterator it = note_begin(), ie = note_end(); it != ie; ++it)
-    Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Note,
-                                       escapeDiag(it->second, Buf)));
+    Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Note, "%0"))
+        << it->second;
 }
 
diff --git a/lib/Frontend/VerifyDiagnosticConsumer.cpp b/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 045e60a..0221a55 100644
--- a/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -166,12 +166,12 @@
                     StringRef Text, unsigned Min, unsigned Max)
     : Directive(DirectiveLoc, DiagnosticLoc, Text, Min, Max) { }
 
-  virtual bool isValid(std::string &Error) {
+  bool isValid(std::string &Error) override {
     // all strings are considered valid; even empty ones
     return true;
   }
 
-  virtual bool match(StringRef S) {
+  bool match(StringRef S) override {
     return S.find(Text) != StringRef::npos;
   }
 };
@@ -181,16 +181,16 @@
 class RegexDirective : public Directive {
 public:
   RegexDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
-                 StringRef Text, unsigned Min, unsigned Max)
-    : Directive(DirectiveLoc, DiagnosticLoc, Text, Min, Max), Regex(Text) { }
+                 StringRef Text, unsigned Min, unsigned Max, StringRef RegexStr)
+    : Directive(DirectiveLoc, DiagnosticLoc, Text, Min, Max), Regex(RegexStr) { }
 
-  virtual bool isValid(std::string &Error) {
+  bool isValid(std::string &Error) override {
     if (Regex.isValid(Error))
       return true;
     return false;
   }
 
-  virtual bool match(StringRef S) {
+  bool match(StringRef S) override {
     return Regex.match(S);
   }
 
@@ -240,7 +240,7 @@
       if (!EnsureStartOfWord
             // Check if string literal starts a new word.
             || P == Begin || isWhitespace(P[-1])
-            // Or it could be preceeded by the start of a comment.
+            // Or it could be preceded by the start of a comment.
             || (P > (Begin + 1) && (P[-1] == '/' || P[-1] == '*')
                                 &&  P[-2] == '/'))
         return true;
@@ -249,6 +249,30 @@
     return false;
   }
 
+  // Return true if a CloseBrace that closes the OpenBrace at the current nest
+  // level is found. When true, P marks begin-position of CloseBrace.
+  bool SearchClosingBrace(StringRef OpenBrace, StringRef CloseBrace) {
+    unsigned Depth = 1;
+    P = C;
+    while (P < End) {
+      StringRef S(P, End - P);
+      if (S.startswith(OpenBrace)) {
+        ++Depth;
+        P += OpenBrace.size();
+      } else if (S.startswith(CloseBrace)) {
+        --Depth;
+        if (Depth == 0) {
+          PEnd = P + CloseBrace.size();
+          return true;
+        }
+        P += CloseBrace.size();
+      } else {
+        ++P;
+      }
+    }
+    return false;
+  }
+
   // Advance 1-past previous next/search.
   // Behavior is undefined if previous next/search failed.
   bool Advance() {
@@ -437,7 +461,7 @@
     const char* const ContentBegin = PH.C; // mark content begin
 
     // Search for token: }}
-    if (!PH.Search("}}")) {
+    if (!PH.SearchClosingBrace("{{", "}}")) {
       Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
                    diag::err_verify_missing_end) << KindStr;
       continue;
@@ -459,6 +483,13 @@
     if (Text.empty())
       Text.assign(ContentBegin, ContentEnd);
 
+    // Check that regex directives contain at least one regex.
+    if (RegexKind && Text.find("{{") == StringRef::npos) {
+      Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
+                   diag::err_verify_missing_regex) << Text;
+      return false;
+    }
+
     // Construct new directive.
     Directive *D = Directive::create(RegexKind, Pos, ExpectedLoc, Text,
                                      Min, Max);
@@ -823,7 +854,31 @@
 Directive *Directive::create(bool RegexKind, SourceLocation DirectiveLoc,
                              SourceLocation DiagnosticLoc, StringRef Text,
                              unsigned Min, unsigned Max) {
-  if (RegexKind)
-    return new RegexDirective(DirectiveLoc, DiagnosticLoc, Text, Min, Max);
-  return new StandardDirective(DirectiveLoc, DiagnosticLoc, Text, Min, Max);
+  if (!RegexKind)
+    return new StandardDirective(DirectiveLoc, DiagnosticLoc, Text, Min, Max);
+
+  // Parse the directive into a regular expression.
+  std::string RegexStr;
+  StringRef S = Text;
+  while (!S.empty()) {
+    if (S.startswith("{{")) {
+      S = S.drop_front(2);
+      size_t RegexMatchLength = S.find("}}");
+      assert(RegexMatchLength != StringRef::npos);
+      // Append the regex, enclosed in parentheses.
+      RegexStr += "(";
+      RegexStr.append(S.data(), RegexMatchLength);
+      RegexStr += ")";
+      S = S.drop_front(RegexMatchLength + 2);
+    } else {
+      size_t VerbatimMatchLength = S.find("{{");
+      if (VerbatimMatchLength == StringRef::npos)
+        VerbatimMatchLength = S.size();
+      // Escape and append the fixed string.
+      RegexStr += llvm::Regex::escape(S.substr(0, VerbatimMatchLength));
+      S = S.drop_front(VerbatimMatchLength);
+    }
+  }
+
+  return new RegexDirective(DirectiveLoc, DiagnosticLoc, Text, Min, Max, RegexStr);
 }
diff --git a/lib/FrontendTool/Android.mk b/lib/FrontendTool/Android.mk
index 2496f6b..5816095 100644
--- a/lib/FrontendTool/Android.mk
+++ b/lib/FrontendTool/Android.mk
@@ -17,6 +17,9 @@
 clang_frontend_tool_SRC_FILES := \
   ExecuteCompilerInvocation.cpp
 
+LOCAL_CFLAGS += -DCLANG_ENABLE_STATIC_ANALYZER
+LOCAL_CFLAGS += -DCLANG_ENABLE_REWRITER
+
 LOCAL_SRC_FILES := $(clang_frontend_tool_SRC_FILES)
 
 include $(CLANG_HOST_BUILD_MK)
diff --git a/lib/FrontendTool/CMakeLists.txt b/lib/FrontendTool/CMakeLists.txt
index 28a864a..7e11be0 100644
--- a/lib/FrontendTool/CMakeLists.txt
+++ b/lib/FrontendTool/CMakeLists.txt
@@ -1,31 +1,34 @@
-add_clang_library(clangFrontendTool
-  ExecuteCompilerInvocation.cpp
+set(LLVM_LINK_COMPONENTS
+  Option
+  Support
   )
 
-add_dependencies(clangFrontendTool
-  ClangDiagnosticCommon
-  ClangDiagnosticFrontend
-  ClangDriverOptions
-  )
-
-target_link_libraries(clangFrontendTool
+set(link_libs
+  clangBasic
+  clangCodeGen
   clangDriver
   clangFrontend
-  clangRewriteCore
   clangRewriteFrontend
-  clangCodeGen
   )
 
 if(CLANG_ENABLE_ARCMT)
-  target_link_libraries(clangFrontendTool
+  list(APPEND link_libs
     clangARCMigrate
     )
 endif()
 
 if(CLANG_ENABLE_STATIC_ANALYZER)
-  target_link_libraries(clangFrontendTool
+  list(APPEND link_libs
     clangStaticAnalyzerFrontend
-    clangStaticAnalyzerCheckers
-    clangStaticAnalyzerCore
     )
 endif()
+
+add_clang_library(clangFrontendTool
+  ExecuteCompilerInvocation.cpp
+
+  DEPENDS
+  ClangDriverOptions
+
+  LINK_LIBS
+  ${link_libs}
+  )
diff --git a/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index d755839..0edd577 100644
--- a/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -21,6 +21,7 @@
 #include "clang/Frontend/FrontendActions.h"
 #include "clang/Frontend/FrontendDiagnostic.h"
 #include "clang/Frontend/FrontendPluginRegistry.h"
+#include "clang/Frontend/Utils.h"
 #include "clang/Rewrite/Frontend/FrontendActions.h"
 #include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
 #include "llvm/Option/OptTable.h"
@@ -33,6 +34,7 @@
 static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
   using namespace clang::frontend;
   StringRef Action("unknown");
+  (void)Action;
 
   switch (CI.getFrontendOpts().ProgramAction) {
   case ASTDeclList:            return new ASTDeclListAction();
@@ -63,16 +65,17 @@
   case InitOnly:               return new InitOnlyAction();
   case ParseSyntaxOnly:        return new SyntaxOnlyAction();
   case ModuleFileInfo:         return new DumpModuleInfoAction();
+  case VerifyPCH:              return new VerifyPCHAction();
 
   case PluginAction: {
     for (FrontendPluginRegistry::iterator it =
            FrontendPluginRegistry::begin(), ie = FrontendPluginRegistry::end();
          it != ie; ++it) {
       if (it->getName() == CI.getFrontendOpts().ActionName) {
-        OwningPtr<PluginASTAction> P(it->instantiate());
+        std::unique_ptr<PluginASTAction> P(it->instantiate());
         if (!P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs))
           return 0;
-        return P.take();
+        return P.release();
       }
     }
 
@@ -178,7 +181,7 @@
 bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
   // Honor -help.
   if (Clang->getFrontendOpts().ShowHelp) {
-    OwningPtr<OptTable> Opts(driver::createDriverOptTable());
+    std::unique_ptr<OptTable> Opts(driver::createDriverOptTable());
     Opts->PrintHelp(llvm::outs(), "clang -cc1",
                     "LLVM 'Clang' Compiler: http://clang.llvm.org",
                     /*Include=*/ driver::options::CC1Option, /*Exclude=*/ 0);
@@ -230,11 +233,11 @@
   if (Clang->getDiagnostics().hasErrorOccurred())
     return false;
   // Create and execute the frontend action.
-  OwningPtr<FrontendAction> Act(CreateFrontendAction(*Clang));
+  std::unique_ptr<FrontendAction> Act(CreateFrontendAction(*Clang));
   if (!Act)
     return false;
   bool Success = Clang->ExecuteAction(*Act);
   if (Clang->getFrontendOpts().DisableFree)
-    Act.take();
+    BuryPointer(Act.release());
   return Success;
 }
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index 2da1a28..d0c2684 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -10,6 +10,7 @@
   float.h
   fma4intrin.h
   fmaintrin.h
+  ia32intrin.h
   immintrin.h
   iso646.h
   Intrin.h
@@ -47,17 +48,10 @@
   module.map
   )
 
-set(output_dir ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/clang/${CLANG_VERSION}/include)
-
-# If we are in an IDE that has a configuration directory, we need to
-# create a second copy of the headers so that 'clang' can find them if
-# it's run from the build directory.
-if(MSVC_IDE OR XCODE)
-   set(other_output_dir ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/lib/clang/${CLANG_VERSION}/include)
-endif()
+set(output_dir ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include)
 
 # Generate arm_neon.h
-clang_tablegen(arm_neon.h.inc -gen-arm-neon
+clang_tablegen(arm_neon.h -gen-arm-neon
   SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/arm_neon.td)
 
 set(out_files)
@@ -69,43 +63,18 @@
     COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${dst}
     COMMENT "Copying clang's ${f}...")
   list(APPEND out_files ${dst})
-
-  if(other_output_dir)
-   set(other_dst ${other_output_dir}/${f})
-    add_custom_command(OUTPUT ${other_dst}
-      DEPENDS ${src}
-      COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${other_dst}
-      COMMENT "Copying clang's ${f}...")    
-    list(APPEND out_files ${other_dst})
-  endif()
 endforeach( f )
 
 add_custom_command(OUTPUT ${output_dir}/arm_neon.h 
-  DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h.inc
-  COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h.inc ${output_dir}/arm_neon.h
+  DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h
+  COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h ${output_dir}/arm_neon.h
   COMMENT "Copying clang's arm_neon.h...")
 list(APPEND out_files ${output_dir}/arm_neon.h)
 
-if (other_output_dir)
-    set(other_dst ${other_output_dir}/arm_neon.h)
-    add_custom_command(OUTPUT ${other_dst}
-      DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h.inc
-      COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h.inc ${other_dst}
-      COMMENT "Copying clang's arm_neon.h...")
-    list(APPEND out_files ${other_dst})
-endif ()
-
 add_custom_target(clang-headers ALL DEPENDS ${out_files})
 set_target_properties(clang-headers PROPERTIES FOLDER "Misc")
 
-if (other_output_dir)
-  if(UNIX)
-    add_custom_command(TARGET clang-headers POST_BUILD
-    COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_CFG_INTDIR}"
-    COMMAND ${CMAKE_COMMAND} -E create_symlink "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/lib/clang" "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_CFG_INTDIR}/clang")
-  endif()
-endif ()
-
-install(FILES ${files} ${output_dir}/arm_neon.h
+install(
+  FILES ${files} ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h
   PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
   DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include)
diff --git a/lib/Headers/Intrin.h b/lib/Headers/Intrin.h
index 4376464..e365abe 100644
--- a/lib/Headers/Intrin.h
+++ b/lib/Headers/Intrin.h
@@ -32,6 +32,9 @@
 /* First include the standard intrinsics. */
 #include <x86intrin.h>
 
+/* For the definition of jmp_buf. */
+#include <setjmp.h>
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -48,13 +51,16 @@
 void __addfsdword(unsigned long, unsigned long);
 void __addfsword(unsigned long, unsigned short);
 void __code_seg(const char *);
+static __inline__
 void __cpuid(int[4], int);
+static __inline__
 void __cpuidex(int[4], int, int);
 void __debugbreak(void);
 __int64 __emul(int, int);
 unsigned __int64 __emulu(unsigned int, unsigned int);
 void __cdecl __fastfail(unsigned int);
 unsigned int __getcallerseflags(void);
+static __inline__
 void __halt(void);
 unsigned char __inbyte(unsigned short);
 void __inbytestring(unsigned short, unsigned char *, unsigned long);
@@ -75,8 +81,11 @@
 void __lwpval32(unsigned int, unsigned int, unsigned int);
 unsigned int __lzcnt(unsigned int);
 unsigned short __lzcnt16(unsigned short);
+static __inline__
 void __movsb(unsigned char *, unsigned char const *, size_t);
+static __inline__
 void __movsd(unsigned long *, unsigned long const *, size_t);
+static __inline__
 void __movsw(unsigned short *, unsigned short const *, size_t);
 void __nop(void);
 void __nvreg_restore_fence(void);
@@ -91,26 +100,35 @@
 unsigned int __popcnt(unsigned int);
 static __inline__
 unsigned short __popcnt16(unsigned short);
+static __inline__
 unsigned __int64 __rdtsc(void);
 unsigned __int64 __rdtscp(unsigned int *);
 unsigned long __readcr0(void);
 unsigned long __readcr2(void);
 unsigned long __readcr3(void);
-unsigned long __readcr5(void);
+unsigned long __readcr4(void);
 unsigned long __readcr8(void);
 unsigned int __readdr(unsigned int);
-unsigned int __readeflags(void);
+#ifdef __i386__
+static __inline__
 unsigned char __readfsbyte(unsigned long);
+static __inline__
 unsigned long __readfsdword(unsigned long);
+static __inline__
 unsigned __int64 __readfsqword(unsigned long);
+static __inline__
 unsigned short __readfsword(unsigned long);
+#endif
 unsigned __int64 __readmsr(unsigned long);
 unsigned __int64 __readpmc(unsigned long);
 unsigned long __segmentlimit(unsigned long);
 void __sidt(void *);
 void *__slwpcb(void);
+static __inline__
 void __stosb(unsigned char *, unsigned char, size_t);
+static __inline__
 void __stosd(unsigned long *, unsigned long, size_t);
+static __inline__
 void __stosw(unsigned short *, unsigned short, size_t);
 void __svm_clgi(void);
 void __svm_invlpga(void *, int);
@@ -129,7 +147,6 @@
 void __writecr4(unsigned int);
 void __writecr8(unsigned int);
 void __writedr(unsigned int, unsigned int);
-void __writeeflags(unsigned int);
 void __writefsbyte(unsigned long, unsigned char);
 void __writefsdword(unsigned long, unsigned long);
 void __writefsqword(unsigned long, unsigned __int64);
@@ -139,7 +156,6 @@
 void *_AddressOfReturnAddress(void);
 unsigned int _andn_u32(unsigned int, unsigned int);
 unsigned int _bextr_u32(unsigned int, unsigned int, unsigned int);
-unsigned int _bextr_u32(unsigned int, unsigned int, unsigned int);
 unsigned int _bextri_u32(unsigned int, unsigned int);
 static __inline__
 unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
@@ -162,8 +178,6 @@
 unsigned int _blsi_u32(unsigned int);
 unsigned int _blsic_u32(unsigned int);
 unsigned int _blsmsk_u32(unsigned int);
-unsigned int _blsmsk_u32(unsigned int);
-unsigned int _blsr_u32(unsigned int);
 unsigned int _blsr_u32(unsigned int);
 unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
 unsigned long __cdecl _byteswap_ulong(unsigned long);
@@ -181,6 +195,7 @@
 static __inline__
 char _InterlockedAnd8(char volatile *_Value, char _Mask);
 unsigned char _interlockedbittestandreset(long volatile *, long);
+static __inline__
 unsigned char _interlockedbittestandset(long volatile *, long);
 static __inline__
 long __cdecl _InterlockedCompareExchange(long volatile *_Destination,
@@ -219,6 +234,10 @@
 long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
 long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
 static __inline__
+short _InterlockedExchangeAdd16(short volatile *_Addend, short _Value);
+__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
+__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
+static __inline__
 char _InterlockedExchangeAdd8(char volatile *_Addend, char _Value);
 static __inline__
 long __cdecl _InterlockedIncrement(long volatile *_Addend);
@@ -269,10 +288,7 @@
 static __inline__
 unsigned char _rotr8(unsigned char _Value, unsigned char _Shift);
 int _sarx_i32(int, unsigned int);
-
-/* FIXME: Need definition for jmp_buf.
-   int __cdecl _setjmp(jmp_buf); */
-
+int __cdecl _setjmp(jmp_buf);
 unsigned int _shlx_u32(unsigned int, unsigned int);
 unsigned int _shrx_u32(unsigned int, unsigned int);
 void _Store_HLERelease(long volatile *, long);
@@ -280,13 +296,13 @@
 void _StorePointer_HLERelease(void *volatile *, void *);
 unsigned int _t1mskc_u32(unsigned int);
 unsigned int _tzcnt_u32(unsigned int);
-unsigned int _tzcnt_u32(unsigned int);
 unsigned int _tzmsk_u32(unsigned int);
 static __inline__
 void _WriteBarrier(void);
 void _xabort(const unsigned int imm);
 unsigned __int32 xbegin(void);
 void _xend(void);
+static __inline__
 unsigned __int64 __cdecl _xgetbv(unsigned int);
 void __cdecl _xrstor(void const *, unsigned __int64);
 void __cdecl _xsave(void *, unsigned __int64);
@@ -300,19 +316,47 @@
 void __addgsdword(unsigned long, unsigned long);
 void __addgsqword(unsigned long, unsigned __int64);
 void __addgsword(unsigned long, unsigned short);
+static __inline__
 void __faststorefence(void);
 void __incgsbyte(unsigned long);
 void __incgsdword(unsigned long);
 void __incgsqword(unsigned long);
 void __incgsword(unsigned long);
+unsigned char __lwpins64(unsigned __int64, unsigned int, unsigned int);
+void __lwpval64(unsigned __int64, unsigned int, unsigned int);
+unsigned __int64 __lzcnt64(unsigned __int64);
+static __inline__
+void __movsq(unsigned long long *, unsigned long long const *, size_t);
+__int64 __mulh(__int64, __int64);
+static __inline__
 unsigned __int64 __popcnt64(unsigned __int64);
+static __inline__
+unsigned char __readgsbyte(unsigned long);
+static __inline__
+unsigned long __readgsdword(unsigned long);
+static __inline__
+unsigned __int64 __readgsqword(unsigned long);
+unsigned short __readgsword(unsigned long);
 unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
                                 unsigned __int64 _HighPart,
                                 unsigned char _Shift);
 unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
                                  unsigned __int64 _HighPart,
                                  unsigned char _Shift);
+static __inline__
 void __stosq(unsigned __int64 *, unsigned __int64, size_t);
+unsigned __int64 __umulh(unsigned __int64, unsigned __int64);
+unsigned char __vmx_on(unsigned __int64 *);
+unsigned char __vmx_vmclear(unsigned __int64 *);
+unsigned char __vmx_vmlaunch(void);
+unsigned char __vmx_vmptrld(unsigned __int64 *);
+unsigned char __vmx_vmread(size_t, size_t *);
+unsigned char __vmx_vmresume(void);
+unsigned char __vmx_vmwrite(size_t, size_t);
+void __writegsbyte(unsigned long, unsigned char);
+void __writegsdword(unsigned long, unsigned long);
+void __writegsqword(unsigned long, unsigned __int64);
+void __writegsword(unsigned long, unsigned short);
 unsigned __int64 _andn_u64(unsigned __int64, unsigned __int64);
 unsigned __int64 _bextr_u64(unsigned __int64, unsigned int, unsigned int);
 unsigned __int64 _bextri_u64(unsigned __int64, unsigned int);
@@ -336,7 +380,7 @@
 unsigned __int64 _blsfill_u64(unsigned __int64);
 unsigned __int64 _blsi_u64(unsigned __int64);
 unsigned __int64 _blsic_u64(unsigned __int64);
-unsigned __int64 _blmsk_u64(unsigned __int64);
+unsigned __int64 _blsmsk_u64(unsigned __int64);
 unsigned __int64 _blsr_u64(unsigned __int64);
 unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
 unsigned __int64 _bzhi_u64(unsigned __int64, unsigned int);
@@ -347,6 +391,7 @@
 __int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
 char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
 unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
+static __inline__
 unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
 long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
                                     long _Comparand);
@@ -360,18 +405,40 @@
                                                 __int64 *_ComparandResult);
 short _InterlockedCompareExchange16_np(short volatile *_Destination,
                                        short _Exchange, short _Comparand);
+__int64 _InterlockedCompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+                                                 __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+                                                 __int64);
 __int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
                                          __int64 _Exchange, __int64 _Comparand);
+static __inline__
+void *_InterlockedCompareExchangePointer(void *volatile *_Destination,
+                                         void *_Exchange, void *_Comparand);
 void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
                                             void *_Exchange, void *_Comparand);
+static __inline__
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+static __inline__
+void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
+static __inline__
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
 long _InterlockedOr_np(long volatile *_Value, long _Mask);
 short _InterlockedOr16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
 __int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
 char _InterlockedOr8_np(char volatile *_Value, char _Mask);
 long _InterlockedXor_np(long volatile *_Value, long _Mask);
 short _InterlockedXor16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
 __int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
 char _InterlockedXor8_np(char volatile *_Value, char _Mask);
+static __inline__
 unsigned __int64 _lzcnt_u64(unsigned __int64);
 __int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
                 __int64 *_HighProduct);
@@ -380,6 +447,11 @@
 unsigned int __cdecl _readgsbase_u32(void);
 unsigned __int64 __cdecl _readgsbase_u64(void);
 unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
+__int64 _sarx_i64(__int64, unsigned int);
+/* FIXME: Need definition for jmp_buf.
+  int __cdecl _setjmpex(jmp_buf); */
+unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
+unsigned __int64 shrx_u64(unsigned __int64, unsigned int);
 unsigned __int64 _tzcnt_u64(unsigned __int64);
 unsigned __int64 _tzmsk_u64(unsigned __int64);
 unsigned __int64 _umul128(unsigned __int64 _Multiplier,
@@ -503,6 +575,16 @@
   *a = *a | (1 << b);
   return x;
 }
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_interlockedbittestandset(long volatile *__BitBase, long __BitPos) {
+  unsigned char __Res;
+  __asm__ ("xor %0, %0\n"
+           "lock bts %2, %1\n"
+           "setc %0\n"
+           : "=r" (__Res), "+m"(*__BitBase)
+           : "Ir"(__BitPos));
+  return __Res;
+}
 #ifdef __x86_64__
 static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
 _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
@@ -552,6 +634,16 @@
   *a = *a | (1ll << b);
   return x;
 }
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_interlockedbittestandset64(__int64 volatile *__BitBase, __int64 __BitPos) {
+  unsigned char __Res;
+  __asm__ ("xor %0, %0\n"
+           "lock bts %2, %1\n"
+           "setc %0\n"
+           : "=r" (__Res), "+m"(*__BitBase)
+           : "Ir"(__BitPos));
+  return __Res;
+}
 #endif
 /*----------------------------------------------------------------------------*\
 |* Interlocked Exchange Add
@@ -564,10 +656,6 @@
 _InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
   return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedExchangeAdd(long volatile *_Addend, long _Value) {
-  return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
-}
 #ifdef __x86_64__
 static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
@@ -598,12 +686,8 @@
 /*----------------------------------------------------------------------------*\
 |* Interlocked Increment
 \*----------------------------------------------------------------------------*/
-static __inline__ char __attribute__((__always_inline__, __nodebug__))
-_InterlockedIncrement16(char volatile *_Value) {
-  return __atomic_add_fetch(_Value, 1, 0);
-}
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedIncrement(long volatile *_Value) {
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedIncrement16(short volatile *_Value) {
   return __atomic_add_fetch(_Value, 1, 0);
 }
 #ifdef __x86_64__
@@ -615,12 +699,8 @@
 /*----------------------------------------------------------------------------*\
 |* Interlocked Decrement
 \*----------------------------------------------------------------------------*/
-static __inline__ char __attribute__((__always_inline__, __nodebug__))
-_InterlockedDecrement16(char volatile *_Value) {
-  return __atomic_sub_fetch(_Value, 1, 0);
-}
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedDecrement(long volatile *_Value) {
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedDecrement16(short volatile *_Value) {
   return __atomic_sub_fetch(_Value, 1, 0);
 }
 #ifdef __x86_64__
@@ -716,6 +796,11 @@
   __atomic_exchange(_Target, &_Value, &_Value, 0);
   return _Value;
 }
+static __inline__ void *__attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangePointer(void *volatile *_Target, void *_Value) {
+  __atomic_exchange(_Target, &_Value, &_Value, 0);
+  return _Value;
+}
 #endif
 /*----------------------------------------------------------------------------*\
 |* Interlocked Compare Exchange
@@ -732,12 +817,14 @@
   __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
   return _Comparand;
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedCompareExchange(long volatile *_Destination,
-                            long _Exchange, long _Comparand) {
+#ifdef __x86_64__
+static __inline__ void *__attribute__((__always_inline__, __nodebug__))
+_InterlockedCompareExchangePointer(void *volatile *_Destination,
+                                   void *_Exchange, void *_Comparand) {
   __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
   return _Comparand;
 }
+#endif
 #ifdef __x86_64__
 static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
 _InterlockedCompareExchange64(__int64 volatile *_Destination,
@@ -764,6 +851,104 @@
 _WriteBarrier(void) {
   __asm__ volatile ("" : : : "memory");
 }
+#ifdef __x86_64__
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__faststorefence(void) {
+  __asm__ volatile("lock orq $0, (%%rsp)" : : : "memory");
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* readfs, readgs
+|* (Pointers in address space #256 and #257 are relative to the GS and FS
+|* segment registers, respectively.)
+\*----------------------------------------------------------------------------*/
+#define __ptr_to_addr_space(__addr_space_nbr, __type, __offset)              \
+    ((volatile __type __attribute__((__address_space__(__addr_space_nbr)))*) \
+    (__offset))
+
+#ifdef __i386__
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+__readfsbyte(unsigned long __offset) {
+  return *__ptr_to_addr_space(257, unsigned char, __offset);
+}
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+__readfsdword(unsigned long __offset) {
+  return *__ptr_to_addr_space(257, unsigned long, __offset);
+}
+static __inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
+__readfsqword(unsigned long __offset) {
+  return *__ptr_to_addr_space(257, unsigned __int64, __offset);
+}
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__readfsword(unsigned long __offset) {
+  return *__ptr_to_addr_space(257, unsigned short, __offset);
+}
+#endif
+#ifdef __x86_64__
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+__readgsbyte(unsigned long __offset) {
+  return *__ptr_to_addr_space(256, unsigned char, __offset);
+}
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+__readgsdword(unsigned long __offset) {
+  return *__ptr_to_addr_space(256, unsigned long, __offset);
+}
+static __inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
+__readgsqword(unsigned long __offset) {
+  return *__ptr_to_addr_space(256, unsigned __int64, __offset);
+}
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__readgsword(unsigned long __offset) {
+  return *__ptr_to_addr_space(256, unsigned short, __offset);
+}
+#endif
+#undef __ptr_to_addr_space
+/*----------------------------------------------------------------------------*\
+|* movs, stos
+\*----------------------------------------------------------------------------*/
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
+  __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)
+                        : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
+  __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n)
+                        : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
+  __asm__("rep movsh" : : "D"(__dst), "S"(__src), "c"(__n)
+                        : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__stosb(unsigned char *__dst, unsigned char __x, size_t __n) {
+  __asm__("rep stosb" : : "D"(__dst), "a"(__x), "c"(__n)
+                        : "%edi", "%ecx");
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
+  __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n)
+                        : "%edi", "%ecx");
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
+  __asm__("rep stosh" : : "D"(__dst), "a"(__x), "c"(__n)
+                        : "%edi", "%ecx");
+}
+#ifdef __x86_64__
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
+  __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n)
+                        : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
+  __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n)
+                        : "%edi", "%ecx");
+}
+#endif
+
 /*----------------------------------------------------------------------------*\
 |* Misc
 \*----------------------------------------------------------------------------*/
@@ -775,6 +960,26 @@
 _ReturnAddress(void) {
   return __builtin_return_address(0);
 }
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__cpuid(int __info[4], int __level) {
+  __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+                   : "a"(__level));
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__cpuidex(int __info[4], int __level, int __ecx) {
+  __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+                   : "a"(__level), "c"(__ecx));
+}
+static __inline__ unsigned __int64 __cdecl __attribute__((__always_inline__, __nodebug__))
+_xgetbv(unsigned int __xcr_no) {
+  unsigned int __eax, __edx;
+  __asm__ ("xgetbv" : "=a" (__eax), "=d" (__edx) : "c" (__xcr_no));
+  return ((unsigned __int64)__edx << 32) | __eax;
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__halt(void) {
+  __asm__ volatile ("hlt");
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/Headers/arm_neon.h b/lib/Headers/arm_neon.h
deleted file mode 100644
index c297518..0000000
--- a/lib/Headers/arm_neon.h
+++ /dev/null
@@ -1,9595 +0,0 @@
-/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_NEON_H
-#define __ARM_NEON_H
-
-#if !defined(__ARM_NEON__) && !defined(__ARM_NEON)
-#error "NEON support not enabled"
-#endif
-
-#include <stdint.h>
-
-typedef float float32_t;
-typedef __fp16 float16_t;
-#ifdef __aarch64__
-typedef double float64_t;
-#endif
-
-#ifdef __aarch64__
-typedef uint8_t poly8_t;
-typedef uint16_t poly16_t;
-typedef uint64_t poly64_t;
-#else
-typedef int8_t poly8_t;
-typedef int16_t poly16_t;
-#endif
-typedef __attribute__((neon_vector_type(8)))  int8_t int8x8_t;
-typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
-typedef __attribute__((neon_vector_type(4)))  int16_t int16x4_t;
-typedef __attribute__((neon_vector_type(8)))  int16_t int16x8_t;
-typedef __attribute__((neon_vector_type(2)))  int32_t int32x2_t;
-typedef __attribute__((neon_vector_type(4)))  int32_t int32x4_t;
-typedef __attribute__((neon_vector_type(1)))  int64_t int64x1_t;
-typedef __attribute__((neon_vector_type(2)))  int64_t int64x2_t;
-typedef __attribute__((neon_vector_type(8)))  uint8_t uint8x8_t;
-typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
-typedef __attribute__((neon_vector_type(4)))  uint16_t uint16x4_t;
-typedef __attribute__((neon_vector_type(8)))  uint16_t uint16x8_t;
-typedef __attribute__((neon_vector_type(2)))  uint32_t uint32x2_t;
-typedef __attribute__((neon_vector_type(4)))  uint32_t uint32x4_t;
-typedef __attribute__((neon_vector_type(1)))  uint64_t uint64x1_t;
-typedef __attribute__((neon_vector_type(2)))  uint64_t uint64x2_t;
-typedef __attribute__((neon_vector_type(4)))  float16_t float16x4_t;
-typedef __attribute__((neon_vector_type(8)))  float16_t float16x8_t;
-typedef __attribute__((neon_vector_type(2)))  float32_t float32x2_t;
-typedef __attribute__((neon_vector_type(4)))  float32_t float32x4_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_vector_type(1)))  float64_t float64x1_t;
-typedef __attribute__((neon_vector_type(2)))  float64_t float64x2_t;
-#endif
-typedef __attribute__((neon_polyvector_type(8)))  poly8_t poly8x8_t;
-typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
-typedef __attribute__((neon_polyvector_type(4)))  poly16_t poly16x4_t;
-typedef __attribute__((neon_polyvector_type(8)))  poly16_t poly16x8_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_polyvector_type(1)))  poly64_t poly64x1_t;
-typedef __attribute__((neon_polyvector_type(2)))  poly64_t poly64x2_t;
-#endif
-
-typedef struct int8x8x2_t {
-  int8x8_t val[2];
-} int8x8x2_t;
-
-typedef struct int8x16x2_t {
-  int8x16_t val[2];
-} int8x16x2_t;
-
-typedef struct int16x4x2_t {
-  int16x4_t val[2];
-} int16x4x2_t;
-
-typedef struct int16x8x2_t {
-  int16x8_t val[2];
-} int16x8x2_t;
-
-typedef struct int32x2x2_t {
-  int32x2_t val[2];
-} int32x2x2_t;
-
-typedef struct int32x4x2_t {
-  int32x4_t val[2];
-} int32x4x2_t;
-
-typedef struct int64x1x2_t {
-  int64x1_t val[2];
-} int64x1x2_t;
-
-typedef struct int64x2x2_t {
-  int64x2_t val[2];
-} int64x2x2_t;
-
-typedef struct uint8x8x2_t {
-  uint8x8_t val[2];
-} uint8x8x2_t;
-
-typedef struct uint8x16x2_t {
-  uint8x16_t val[2];
-} uint8x16x2_t;
-
-typedef struct uint16x4x2_t {
-  uint16x4_t val[2];
-} uint16x4x2_t;
-
-typedef struct uint16x8x2_t {
-  uint16x8_t val[2];
-} uint16x8x2_t;
-
-typedef struct uint32x2x2_t {
-  uint32x2_t val[2];
-} uint32x2x2_t;
-
-typedef struct uint32x4x2_t {
-  uint32x4_t val[2];
-} uint32x4x2_t;
-
-typedef struct uint64x1x2_t {
-  uint64x1_t val[2];
-} uint64x1x2_t;
-
-typedef struct uint64x2x2_t {
-  uint64x2_t val[2];
-} uint64x2x2_t;
-
-typedef struct float16x4x2_t {
-  float16x4_t val[2];
-} float16x4x2_t;
-
-typedef struct float16x8x2_t {
-  float16x8_t val[2];
-} float16x8x2_t;
-
-typedef struct float32x2x2_t {
-  float32x2_t val[2];
-} float32x2x2_t;
-
-typedef struct float32x4x2_t {
-  float32x4_t val[2];
-} float32x4x2_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x2_t {
-  float64x1_t val[2];
-} float64x1x2_t;
-
-typedef struct float64x2x2_t {
-  float64x2_t val[2];
-} float64x2x2_t;
-
-#endif
-typedef struct poly8x8x2_t {
-  poly8x8_t val[2];
-} poly8x8x2_t;
-
-typedef struct poly8x16x2_t {
-  poly8x16_t val[2];
-} poly8x16x2_t;
-
-typedef struct poly16x4x2_t {
-  poly16x4_t val[2];
-} poly16x4x2_t;
-
-typedef struct poly16x8x2_t {
-  poly16x8_t val[2];
-} poly16x8x2_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x2_t {
-  poly64x1_t val[2];
-} poly64x1x2_t;
-
-typedef struct poly64x2x2_t {
-  poly64x2_t val[2];
-} poly64x2x2_t;
-
-#endif
-typedef struct int8x8x3_t {
-  int8x8_t val[3];
-} int8x8x3_t;
-
-typedef struct int8x16x3_t {
-  int8x16_t val[3];
-} int8x16x3_t;
-
-typedef struct int16x4x3_t {
-  int16x4_t val[3];
-} int16x4x3_t;
-
-typedef struct int16x8x3_t {
-  int16x8_t val[3];
-} int16x8x3_t;
-
-typedef struct int32x2x3_t {
-  int32x2_t val[3];
-} int32x2x3_t;
-
-typedef struct int32x4x3_t {
-  int32x4_t val[3];
-} int32x4x3_t;
-
-typedef struct int64x1x3_t {
-  int64x1_t val[3];
-} int64x1x3_t;
-
-typedef struct int64x2x3_t {
-  int64x2_t val[3];
-} int64x2x3_t;
-
-typedef struct uint8x8x3_t {
-  uint8x8_t val[3];
-} uint8x8x3_t;
-
-typedef struct uint8x16x3_t {
-  uint8x16_t val[3];
-} uint8x16x3_t;
-
-typedef struct uint16x4x3_t {
-  uint16x4_t val[3];
-} uint16x4x3_t;
-
-typedef struct uint16x8x3_t {
-  uint16x8_t val[3];
-} uint16x8x3_t;
-
-typedef struct uint32x2x3_t {
-  uint32x2_t val[3];
-} uint32x2x3_t;
-
-typedef struct uint32x4x3_t {
-  uint32x4_t val[3];
-} uint32x4x3_t;
-
-typedef struct uint64x1x3_t {
-  uint64x1_t val[3];
-} uint64x1x3_t;
-
-typedef struct uint64x2x3_t {
-  uint64x2_t val[3];
-} uint64x2x3_t;
-
-typedef struct float16x4x3_t {
-  float16x4_t val[3];
-} float16x4x3_t;
-
-typedef struct float16x8x3_t {
-  float16x8_t val[3];
-} float16x8x3_t;
-
-typedef struct float32x2x3_t {
-  float32x2_t val[3];
-} float32x2x3_t;
-
-typedef struct float32x4x3_t {
-  float32x4_t val[3];
-} float32x4x3_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x3_t {
-  float64x1_t val[3];
-} float64x1x3_t;
-
-typedef struct float64x2x3_t {
-  float64x2_t val[3];
-} float64x2x3_t;
-
-#endif
-typedef struct poly8x8x3_t {
-  poly8x8_t val[3];
-} poly8x8x3_t;
-
-typedef struct poly8x16x3_t {
-  poly8x16_t val[3];
-} poly8x16x3_t;
-
-typedef struct poly16x4x3_t {
-  poly16x4_t val[3];
-} poly16x4x3_t;
-
-typedef struct poly16x8x3_t {
-  poly16x8_t val[3];
-} poly16x8x3_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x3_t {
-  poly64x1_t val[3];
-} poly64x1x3_t;
-
-typedef struct poly64x2x3_t {
-  poly64x2_t val[3];
-} poly64x2x3_t;
-
-#endif
-typedef struct int8x8x4_t {
-  int8x8_t val[4];
-} int8x8x4_t;
-
-typedef struct int8x16x4_t {
-  int8x16_t val[4];
-} int8x16x4_t;
-
-typedef struct int16x4x4_t {
-  int16x4_t val[4];
-} int16x4x4_t;
-
-typedef struct int16x8x4_t {
-  int16x8_t val[4];
-} int16x8x4_t;
-
-typedef struct int32x2x4_t {
-  int32x2_t val[4];
-} int32x2x4_t;
-
-typedef struct int32x4x4_t {
-  int32x4_t val[4];
-} int32x4x4_t;
-
-typedef struct int64x1x4_t {
-  int64x1_t val[4];
-} int64x1x4_t;
-
-typedef struct int64x2x4_t {
-  int64x2_t val[4];
-} int64x2x4_t;
-
-typedef struct uint8x8x4_t {
-  uint8x8_t val[4];
-} uint8x8x4_t;
-
-typedef struct uint8x16x4_t {
-  uint8x16_t val[4];
-} uint8x16x4_t;
-
-typedef struct uint16x4x4_t {
-  uint16x4_t val[4];
-} uint16x4x4_t;
-
-typedef struct uint16x8x4_t {
-  uint16x8_t val[4];
-} uint16x8x4_t;
-
-typedef struct uint32x2x4_t {
-  uint32x2_t val[4];
-} uint32x2x4_t;
-
-typedef struct uint32x4x4_t {
-  uint32x4_t val[4];
-} uint32x4x4_t;
-
-typedef struct uint64x1x4_t {
-  uint64x1_t val[4];
-} uint64x1x4_t;
-
-typedef struct uint64x2x4_t {
-  uint64x2_t val[4];
-} uint64x2x4_t;
-
-typedef struct float16x4x4_t {
-  float16x4_t val[4];
-} float16x4x4_t;
-
-typedef struct float16x8x4_t {
-  float16x8_t val[4];
-} float16x8x4_t;
-
-typedef struct float32x2x4_t {
-  float32x2_t val[4];
-} float32x2x4_t;
-
-typedef struct float32x4x4_t {
-  float32x4_t val[4];
-} float32x4x4_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x4_t {
-  float64x1_t val[4];
-} float64x1x4_t;
-
-typedef struct float64x2x4_t {
-  float64x2_t val[4];
-} float64x2x4_t;
-
-#endif
-typedef struct poly8x8x4_t {
-  poly8x8_t val[4];
-} poly8x8x4_t;
-
-typedef struct poly8x16x4_t {
-  poly8x16_t val[4];
-} poly8x16x4_t;
-
-typedef struct poly16x4x4_t {
-  poly16x4_t val[4];
-} poly16x4x4_t;
-
-typedef struct poly16x8x4_t {
-  poly16x8_t val[4];
-} poly16x8x4_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x4_t {
-  poly64x1_t val[4];
-} poly64x1x4_t;
-
-typedef struct poly64x2x4_t {
-  poly64x2_t val[4];
-} poly64x2x4_t;
-
-#endif
-
-#define __ai static inline __attribute__((__always_inline__, __nodebug__))
-
-__ai int16x8_t vmovl_s8(int8x8_t __a) {
-  return (int16x8_t)__builtin_neon_vmovl_v(__a, 33); }
-__ai int32x4_t vmovl_s16(int16x4_t __a) {
-  return (int32x4_t)__builtin_neon_vmovl_v((int8x8_t)__a, 34); }
-__ai int64x2_t vmovl_s32(int32x2_t __a) {
-  return (int64x2_t)__builtin_neon_vmovl_v((int8x8_t)__a, 35); }
-__ai uint16x8_t vmovl_u8(uint8x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vmovl_v((int8x8_t)__a, 49); }
-__ai uint32x4_t vmovl_u16(uint16x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vmovl_v((int8x8_t)__a, 50); }
-__ai uint64x2_t vmovl_u32(uint32x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vmovl_v((int8x8_t)__a, 51); }
-
-__ai int16x8_t vmull_s8(int8x8_t __a, int8x8_t __b) {
-  return (int16x8_t)__builtin_neon_vmull_v(__a, __b, 33); }
-__ai int32x4_t vmull_s16(int16x4_t __a, int16x4_t __b) {
-  return (int32x4_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 34); }
-__ai int64x2_t vmull_s32(int32x2_t __a, int32x2_t __b) {
-  return (int64x2_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 35); }
-__ai uint16x8_t vmull_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 49); }
-__ai uint32x4_t vmull_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 50); }
-__ai uint64x2_t vmull_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 51); }
-__ai poly16x8_t vmull_p8(poly8x8_t __a, poly8x8_t __b) {
-  return (poly16x8_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 37); }
-
-__ai int8x8_t vabd_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vabd_v(__a, __b, 0); }
-__ai int16x4_t vabd_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vabd_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vabd_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vabd_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vabd_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai float32x2_t vabd_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai int8x16_t vabdq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vabdq_v(__a, __b, 32); }
-__ai int16x8_t vabdq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vabdq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai uint8x16_t vabdq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vabdq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vabdq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai float32x4_t vabdq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-
-__ai int16x8_t vabdl_s8(int8x8_t __a, int8x8_t __b) {
-  return (int16x8_t)vmovl_u8((uint8x8_t)vabd_s8(__a, __b)); }
-__ai int32x4_t vabdl_s16(int16x4_t __a, int16x4_t __b) {
-  return (int32x4_t)vmovl_u16((uint16x4_t)vabd_s16(__a, __b)); }
-__ai int64x2_t vabdl_s32(int32x2_t __a, int32x2_t __b) {
-  return (int64x2_t)vmovl_u32((uint32x2_t)vabd_s32(__a, __b)); }
-__ai uint16x8_t vabdl_u8(uint8x8_t __a, uint8x8_t __b) {
-  return vmovl_u8(vabd_u8(__a, __b)); }
-__ai uint32x4_t vabdl_u16(uint16x4_t __a, uint16x4_t __b) {
-  return vmovl_u16(vabd_u16(__a, __b)); }
-__ai uint64x2_t vabdl_u32(uint32x2_t __a, uint32x2_t __b) {
-  return vmovl_u32(vabd_u32(__a, __b)); }
-
-__ai int8x8_t vaba_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c) {
-  return __a + vabd_s8(__b, __c); }
-__ai int16x4_t vaba_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return __a + vabd_s16(__b, __c); }
-__ai int32x2_t vaba_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return __a + vabd_s32(__b, __c); }
-__ai uint8x8_t vaba_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
-  return __a + vabd_u8(__b, __c); }
-__ai uint16x4_t vaba_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) {
-  return __a + vabd_u16(__b, __c); }
-__ai uint32x2_t vaba_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) {
-  return __a + vabd_u32(__b, __c); }
-__ai int8x16_t vabaq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c) {
-  return __a + vabdq_s8(__b, __c); }
-__ai int16x8_t vabaq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c) {
-  return __a + vabdq_s16(__b, __c); }
-__ai int32x4_t vabaq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c) {
-  return __a + vabdq_s32(__b, __c); }
-__ai uint8x16_t vabaq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
-  return __a + vabdq_u8(__b, __c); }
-__ai uint16x8_t vabaq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return __a + vabdq_u16(__b, __c); }
-__ai uint32x4_t vabaq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return __a + vabdq_u32(__b, __c); }
-
-__ai int16x8_t vabal_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c) {
-  return __a + vabdl_s8(__b, __c); }
-__ai int32x4_t vabal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return __a + vabdl_s16(__b, __c); }
-__ai int64x2_t vabal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return __a + vabdl_s32(__b, __c); }
-__ai uint16x8_t vabal_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) {
-  return __a + vabdl_u8(__b, __c); }
-__ai uint32x4_t vabal_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) {
-  return __a + vabdl_u16(__b, __c); }
-__ai uint64x2_t vabal_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) {
-  return __a + vabdl_u32(__b, __c); }
-
-
-__ai int8x8_t vabs_s8(int8x8_t __a) {
-  return (int8x8_t)__builtin_neon_vabs_v(__a, 0); }
-__ai int16x4_t vabs_s16(int16x4_t __a) {
-  return (int16x4_t)__builtin_neon_vabs_v((int8x8_t)__a, 1); }
-__ai int32x2_t vabs_s32(int32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vabs_v((int8x8_t)__a, 2); }
-__ai float32x2_t vabs_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vabs_v((int8x8_t)__a, 8); }
-__ai int8x16_t vabsq_s8(int8x16_t __a) {
-  return (int8x16_t)__builtin_neon_vabsq_v(__a, 32); }
-__ai int16x8_t vabsq_s16(int16x8_t __a) {
-  return (int16x8_t)__builtin_neon_vabsq_v((int8x16_t)__a, 33); }
-__ai int32x4_t vabsq_s32(int32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vabsq_v((int8x16_t)__a, 34); }
-__ai float32x4_t vabsq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vabsq_v((int8x16_t)__a, 40); }
-
-__ai int8x8_t vadd_s8(int8x8_t __a, int8x8_t __b) {
-  return __a + __b; }
-__ai int16x4_t vadd_s16(int16x4_t __a, int16x4_t __b) {
-  return __a + __b; }
-__ai int32x2_t vadd_s32(int32x2_t __a, int32x2_t __b) {
-  return __a + __b; }
-__ai int64x1_t vadd_s64(int64x1_t __a, int64x1_t __b) {
-  return __a + __b; }
-__ai float32x2_t vadd_f32(float32x2_t __a, float32x2_t __b) {
-  return __a + __b; }
-__ai uint8x8_t vadd_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __a + __b; }
-__ai uint16x4_t vadd_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __a + __b; }
-__ai uint32x2_t vadd_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __a + __b; }
-__ai uint64x1_t vadd_u64(uint64x1_t __a, uint64x1_t __b) {
-  return __a + __b; }
-__ai int8x16_t vaddq_s8(int8x16_t __a, int8x16_t __b) {
-  return __a + __b; }
-__ai int16x8_t vaddq_s16(int16x8_t __a, int16x8_t __b) {
-  return __a + __b; }
-__ai int32x4_t vaddq_s32(int32x4_t __a, int32x4_t __b) {
-  return __a + __b; }
-__ai int64x2_t vaddq_s64(int64x2_t __a, int64x2_t __b) {
-  return __a + __b; }
-__ai float32x4_t vaddq_f32(float32x4_t __a, float32x4_t __b) {
-  return __a + __b; }
-__ai uint8x16_t vaddq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __a + __b; }
-__ai uint16x8_t vaddq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __a + __b; }
-__ai uint32x4_t vaddq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __a + __b; }
-__ai uint64x2_t vaddq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __a + __b; }
-
-__ai int8x8_t vaddhn_s16(int16x8_t __a, int16x8_t __b) {
-  return (int8x8_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 0); }
-__ai int16x4_t vaddhn_s32(int32x4_t __a, int32x4_t __b) {
-  return (int16x4_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 1); }
-__ai int32x2_t vaddhn_s64(int64x2_t __a, int64x2_t __b) {
-  return (int32x2_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 2); }
-__ai uint8x8_t vaddhn_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 16); }
-__ai uint16x4_t vaddhn_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 17); }
-__ai uint32x2_t vaddhn_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 18); }
-
-__ai int16x8_t vaddl_s8(int8x8_t __a, int8x8_t __b) {
-  return vmovl_s8(__a) + vmovl_s8(__b); }
-__ai int32x4_t vaddl_s16(int16x4_t __a, int16x4_t __b) {
-  return vmovl_s16(__a) + vmovl_s16(__b); }
-__ai int64x2_t vaddl_s32(int32x2_t __a, int32x2_t __b) {
-  return vmovl_s32(__a) + vmovl_s32(__b); }
-__ai uint16x8_t vaddl_u8(uint8x8_t __a, uint8x8_t __b) {
-  return vmovl_u8(__a) + vmovl_u8(__b); }
-__ai uint32x4_t vaddl_u16(uint16x4_t __a, uint16x4_t __b) {
-  return vmovl_u16(__a) + vmovl_u16(__b); }
-__ai uint64x2_t vaddl_u32(uint32x2_t __a, uint32x2_t __b) {
-  return vmovl_u32(__a) + vmovl_u32(__b); }
-
-__ai int16x8_t vaddw_s8(int16x8_t __a, int8x8_t __b) {
-  return __a + vmovl_s8(__b); }
-__ai int32x4_t vaddw_s16(int32x4_t __a, int16x4_t __b) {
-  return __a + vmovl_s16(__b); }
-__ai int64x2_t vaddw_s32(int64x2_t __a, int32x2_t __b) {
-  return __a + vmovl_s32(__b); }
-__ai uint16x8_t vaddw_u8(uint16x8_t __a, uint8x8_t __b) {
-  return __a + vmovl_u8(__b); }
-__ai uint32x4_t vaddw_u16(uint32x4_t __a, uint16x4_t __b) {
-  return __a + vmovl_u16(__b); }
-__ai uint64x2_t vaddw_u32(uint64x2_t __a, uint32x2_t __b) {
-  return __a + vmovl_u32(__b); }
-
-__ai int8x8_t vand_s8(int8x8_t __a, int8x8_t __b) {
-  return __a & __b; }
-__ai int16x4_t vand_s16(int16x4_t __a, int16x4_t __b) {
-  return __a & __b; }
-__ai int32x2_t vand_s32(int32x2_t __a, int32x2_t __b) {
-  return __a & __b; }
-__ai int64x1_t vand_s64(int64x1_t __a, int64x1_t __b) {
-  return __a & __b; }
-__ai uint8x8_t vand_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __a & __b; }
-__ai uint16x4_t vand_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __a & __b; }
-__ai uint32x2_t vand_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __a & __b; }
-__ai uint64x1_t vand_u64(uint64x1_t __a, uint64x1_t __b) {
-  return __a & __b; }
-__ai int8x16_t vandq_s8(int8x16_t __a, int8x16_t __b) {
-  return __a & __b; }
-__ai int16x8_t vandq_s16(int16x8_t __a, int16x8_t __b) {
-  return __a & __b; }
-__ai int32x4_t vandq_s32(int32x4_t __a, int32x4_t __b) {
-  return __a & __b; }
-__ai int64x2_t vandq_s64(int64x2_t __a, int64x2_t __b) {
-  return __a & __b; }
-__ai uint8x16_t vandq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __a & __b; }
-__ai uint16x8_t vandq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __a & __b; }
-__ai uint32x4_t vandq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __a & __b; }
-__ai uint64x2_t vandq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __a & __b; }
-
-__ai int8x8_t vbic_s8(int8x8_t __a, int8x8_t __b) {
-  return __a & ~__b; }
-__ai int16x4_t vbic_s16(int16x4_t __a, int16x4_t __b) {
-  return __a & ~__b; }
-__ai int32x2_t vbic_s32(int32x2_t __a, int32x2_t __b) {
-  return __a & ~__b; }
-__ai int64x1_t vbic_s64(int64x1_t __a, int64x1_t __b) {
-  return __a & ~__b; }
-__ai uint8x8_t vbic_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __a & ~__b; }
-__ai uint16x4_t vbic_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __a & ~__b; }
-__ai uint32x2_t vbic_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __a & ~__b; }
-__ai uint64x1_t vbic_u64(uint64x1_t __a, uint64x1_t __b) {
-  return __a & ~__b; }
-__ai int8x16_t vbicq_s8(int8x16_t __a, int8x16_t __b) {
-  return __a & ~__b; }
-__ai int16x8_t vbicq_s16(int16x8_t __a, int16x8_t __b) {
-  return __a & ~__b; }
-__ai int32x4_t vbicq_s32(int32x4_t __a, int32x4_t __b) {
-  return __a & ~__b; }
-__ai int64x2_t vbicq_s64(int64x2_t __a, int64x2_t __b) {
-  return __a & ~__b; }
-__ai uint8x16_t vbicq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __a & ~__b; }
-__ai uint16x8_t vbicq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __a & ~__b; }
-__ai uint32x4_t vbicq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __a & ~__b; }
-__ai uint64x2_t vbicq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __a & ~__b; }
-
-__ai int8x8_t vbsl_s8(uint8x8_t __a, int8x8_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vbsl_v((int8x8_t)__a, __b, __c, 0); }
-__ai int16x4_t vbsl_s16(uint16x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return (int16x4_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 1); }
-__ai int32x2_t vbsl_s32(uint32x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return (int32x2_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 2); }
-__ai int64x1_t vbsl_s64(uint64x1_t __a, int64x1_t __b, int64x1_t __c) {
-  return (int64x1_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 3); }
-__ai uint8x8_t vbsl_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 16); }
-__ai uint16x4_t vbsl_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) {
-  return (uint16x4_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 17); }
-__ai uint32x2_t vbsl_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) {
-  return (uint32x2_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 18); }
-__ai uint64x1_t vbsl_u64(uint64x1_t __a, uint64x1_t __b, uint64x1_t __c) {
-  return (uint64x1_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 19); }
-__ai float32x2_t vbsl_f32(uint32x2_t __a, float32x2_t __b, float32x2_t __c) {
-  return (float32x2_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 8); }
-__ai poly8x8_t vbsl_p8(uint8x8_t __a, poly8x8_t __b, poly8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 4); }
-__ai poly16x4_t vbsl_p16(uint16x4_t __a, poly16x4_t __b, poly16x4_t __c) {
-  return (poly16x4_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 5); }
-__ai int8x16_t vbslq_s8(uint8x16_t __a, int8x16_t __b, int8x16_t __c) {
-  return (int8x16_t)__builtin_neon_vbslq_v((int8x16_t)__a, __b, __c, 32); }
-__ai int16x8_t vbslq_s16(uint16x8_t __a, int16x8_t __b, int16x8_t __c) {
-  return (int16x8_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 33); }
-__ai int32x4_t vbslq_s32(uint32x4_t __a, int32x4_t __b, int32x4_t __c) {
-  return (int32x4_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 34); }
-__ai int64x2_t vbslq_s64(uint64x2_t __a, int64x2_t __b, int64x2_t __c) {
-  return (int64x2_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 35); }
-__ai uint8x16_t vbslq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
-  return (uint8x16_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 48); }
-__ai uint16x8_t vbslq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return (uint16x8_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 49); }
-__ai uint32x4_t vbslq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return (uint32x4_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
-__ai uint64x2_t vbslq_u64(uint64x2_t __a, uint64x2_t __b, uint64x2_t __c) {
-  return (uint64x2_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 51); }
-__ai float32x4_t vbslq_f32(uint32x4_t __a, float32x4_t __b, float32x4_t __c) {
-  return (float32x4_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 40); }
-__ai poly8x16_t vbslq_p8(uint8x16_t __a, poly8x16_t __b, poly8x16_t __c) {
-  return (poly8x16_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 36); }
-__ai poly16x8_t vbslq_p16(uint16x8_t __a, poly16x8_t __b, poly16x8_t __c) {
-  return (poly16x8_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 37); }
-
-__ai uint32x2_t vcage_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vcage_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint32x4_t vcageq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vcageq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-__ai uint32x2_t vcagt_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vcagt_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint32x4_t vcagtq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vcagtq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-__ai uint32x2_t vcale_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vcale_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint32x4_t vcaleq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vcaleq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-__ai uint32x2_t vcalt_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vcalt_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint32x4_t vcaltq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vcaltq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-__ai uint8x8_t vceq_s8(int8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)(__a == __b); }
-__ai uint16x4_t vceq_s16(int16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)(__a == __b); }
-__ai uint32x2_t vceq_s32(int32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)(__a == __b); }
-__ai uint32x2_t vceq_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)(__a == __b); }
-__ai uint8x8_t vceq_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)(__a == __b); }
-__ai uint16x4_t vceq_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)(__a == __b); }
-__ai uint32x2_t vceq_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)(__a == __b); }
-__ai uint8x8_t vceq_p8(poly8x8_t __a, poly8x8_t __b) {
-  return (uint8x8_t)(__a == __b); }
-__ai uint8x16_t vceqq_s8(int8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)(__a == __b); }
-__ai uint16x8_t vceqq_s16(int16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)(__a == __b); }
-__ai uint32x4_t vceqq_s32(int32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)(__a == __b); }
-__ai uint32x4_t vceqq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)(__a == __b); }
-__ai uint8x16_t vceqq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)(__a == __b); }
-__ai uint16x8_t vceqq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)(__a == __b); }
-__ai uint32x4_t vceqq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)(__a == __b); }
-__ai uint8x16_t vceqq_p8(poly8x16_t __a, poly8x16_t __b) {
-  return (uint8x16_t)(__a == __b); }
-
-__ai uint8x8_t vcge_s8(int8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)(__a >= __b); }
-__ai uint16x4_t vcge_s16(int16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)(__a >= __b); }
-__ai uint32x2_t vcge_s32(int32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)(__a >= __b); }
-__ai uint32x2_t vcge_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)(__a >= __b); }
-__ai uint8x8_t vcge_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)(__a >= __b); }
-__ai uint16x4_t vcge_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)(__a >= __b); }
-__ai uint32x2_t vcge_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)(__a >= __b); }
-__ai uint8x16_t vcgeq_s8(int8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)(__a >= __b); }
-__ai uint16x8_t vcgeq_s16(int16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)(__a >= __b); }
-__ai uint32x4_t vcgeq_s32(int32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)(__a >= __b); }
-__ai uint32x4_t vcgeq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)(__a >= __b); }
-__ai uint8x16_t vcgeq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)(__a >= __b); }
-__ai uint16x8_t vcgeq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)(__a >= __b); }
-__ai uint32x4_t vcgeq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)(__a >= __b); }
-
-__ai uint8x8_t vcgt_s8(int8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)(__a > __b); }
-__ai uint16x4_t vcgt_s16(int16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)(__a > __b); }
-__ai uint32x2_t vcgt_s32(int32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)(__a > __b); }
-__ai uint32x2_t vcgt_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)(__a > __b); }
-__ai uint8x8_t vcgt_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)(__a > __b); }
-__ai uint16x4_t vcgt_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)(__a > __b); }
-__ai uint32x2_t vcgt_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)(__a > __b); }
-__ai uint8x16_t vcgtq_s8(int8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)(__a > __b); }
-__ai uint16x8_t vcgtq_s16(int16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)(__a > __b); }
-__ai uint32x4_t vcgtq_s32(int32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)(__a > __b); }
-__ai uint32x4_t vcgtq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)(__a > __b); }
-__ai uint8x16_t vcgtq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)(__a > __b); }
-__ai uint16x8_t vcgtq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)(__a > __b); }
-__ai uint32x4_t vcgtq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)(__a > __b); }
-
-__ai uint8x8_t vcle_s8(int8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)(__a <= __b); }
-__ai uint16x4_t vcle_s16(int16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)(__a <= __b); }
-__ai uint32x2_t vcle_s32(int32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)(__a <= __b); }
-__ai uint32x2_t vcle_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)(__a <= __b); }
-__ai uint8x8_t vcle_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)(__a <= __b); }
-__ai uint16x4_t vcle_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)(__a <= __b); }
-__ai uint32x2_t vcle_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)(__a <= __b); }
-__ai uint8x16_t vcleq_s8(int8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)(__a <= __b); }
-__ai uint16x8_t vcleq_s16(int16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)(__a <= __b); }
-__ai uint32x4_t vcleq_s32(int32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)(__a <= __b); }
-__ai uint32x4_t vcleq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)(__a <= __b); }
-__ai uint8x16_t vcleq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)(__a <= __b); }
-__ai uint16x8_t vcleq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)(__a <= __b); }
-__ai uint32x4_t vcleq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)(__a <= __b); }
-
-__ai int8x8_t vcls_s8(int8x8_t __a) {
-  return (int8x8_t)__builtin_neon_vcls_v(__a, 0); }
-__ai int16x4_t vcls_s16(int16x4_t __a) {
-  return (int16x4_t)__builtin_neon_vcls_v((int8x8_t)__a, 1); }
-__ai int32x2_t vcls_s32(int32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vcls_v((int8x8_t)__a, 2); }
-__ai int8x16_t vclsq_s8(int8x16_t __a) {
-  return (int8x16_t)__builtin_neon_vclsq_v(__a, 32); }
-__ai int16x8_t vclsq_s16(int16x8_t __a) {
-  return (int16x8_t)__builtin_neon_vclsq_v((int8x16_t)__a, 33); }
-__ai int32x4_t vclsq_s32(int32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vclsq_v((int8x16_t)__a, 34); }
-
-__ai uint8x8_t vclt_s8(int8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)(__a < __b); }
-__ai uint16x4_t vclt_s16(int16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)(__a < __b); }
-__ai uint32x2_t vclt_s32(int32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)(__a < __b); }
-__ai uint32x2_t vclt_f32(float32x2_t __a, float32x2_t __b) {
-  return (uint32x2_t)(__a < __b); }
-__ai uint8x8_t vclt_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)(__a < __b); }
-__ai uint16x4_t vclt_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)(__a < __b); }
-__ai uint32x2_t vclt_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)(__a < __b); }
-__ai uint8x16_t vcltq_s8(int8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)(__a < __b); }
-__ai uint16x8_t vcltq_s16(int16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)(__a < __b); }
-__ai uint32x4_t vcltq_s32(int32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)(__a < __b); }
-__ai uint32x4_t vcltq_f32(float32x4_t __a, float32x4_t __b) {
-  return (uint32x4_t)(__a < __b); }
-__ai uint8x16_t vcltq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)(__a < __b); }
-__ai uint16x8_t vcltq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)(__a < __b); }
-__ai uint32x4_t vcltq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)(__a < __b); }
-
-__ai int8x8_t vclz_s8(int8x8_t __a) {
-  return (int8x8_t)__builtin_neon_vclz_v(__a, 0); }
-__ai int16x4_t vclz_s16(int16x4_t __a) {
-  return (int16x4_t)__builtin_neon_vclz_v((int8x8_t)__a, 1); }
-__ai int32x2_t vclz_s32(int32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vclz_v((int8x8_t)__a, 2); }
-__ai uint8x8_t vclz_u8(uint8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vclz_v((int8x8_t)__a, 16); }
-__ai uint16x4_t vclz_u16(uint16x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vclz_v((int8x8_t)__a, 17); }
-__ai uint32x2_t vclz_u32(uint32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vclz_v((int8x8_t)__a, 18); }
-__ai int8x16_t vclzq_s8(int8x16_t __a) {
-  return (int8x16_t)__builtin_neon_vclzq_v(__a, 32); }
-__ai int16x8_t vclzq_s16(int16x8_t __a) {
-  return (int16x8_t)__builtin_neon_vclzq_v((int8x16_t)__a, 33); }
-__ai int32x4_t vclzq_s32(int32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vclzq_v((int8x16_t)__a, 34); }
-__ai uint8x16_t vclzq_u8(uint8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vclzq_v((int8x16_t)__a, 48); }
-__ai uint16x8_t vclzq_u16(uint16x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vclzq_v((int8x16_t)__a, 49); }
-__ai uint32x4_t vclzq_u32(uint32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vclzq_v((int8x16_t)__a, 50); }
-
-__ai uint8x8_t vcnt_u8(uint8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vcnt_v((int8x8_t)__a, 16); }
-__ai int8x8_t vcnt_s8(int8x8_t __a) {
-  return (int8x8_t)__builtin_neon_vcnt_v(__a, 0); }
-__ai poly8x8_t vcnt_p8(poly8x8_t __a) {
-  return (poly8x8_t)__builtin_neon_vcnt_v((int8x8_t)__a, 4); }
-__ai uint8x16_t vcntq_u8(uint8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vcntq_v((int8x16_t)__a, 48); }
-__ai int8x16_t vcntq_s8(int8x16_t __a) {
-  return (int8x16_t)__builtin_neon_vcntq_v(__a, 32); }
-__ai poly8x16_t vcntq_p8(poly8x16_t __a) {
-  return (poly8x16_t)__builtin_neon_vcntq_v((int8x16_t)__a, 36); }
-
-__ai int8x16_t vcombine_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x16_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai int16x8_t vcombine_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x8_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai int32x4_t vcombine_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x4_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai int64x2_t vcombine_s64(int64x1_t __a, int64x1_t __b) {
-  return (int64x2_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai float16x8_t vcombine_f16(float16x4_t __a, float16x4_t __b) {
-  return (float16x8_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai float32x4_t vcombine_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x4_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai uint8x16_t vcombine_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x16_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai uint16x8_t vcombine_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x8_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai uint32x4_t vcombine_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x4_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai uint64x2_t vcombine_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x2_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai poly8x16_t vcombine_p8(poly8x8_t __a, poly8x8_t __b) {
-  return (poly8x16_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai poly16x8_t vcombine_p16(poly16x4_t __a, poly16x4_t __b) {
-  return (poly16x8_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-
-__ai int8x8_t vcreate_s8(uint64_t __a) {
-  return (int8x8_t)__a; }
-__ai int16x4_t vcreate_s16(uint64_t __a) {
-  return (int16x4_t)__a; }
-__ai int32x2_t vcreate_s32(uint64_t __a) {
-  return (int32x2_t)__a; }
-__ai float16x4_t vcreate_f16(uint64_t __a) {
-  return (float16x4_t)__a; }
-__ai float32x2_t vcreate_f32(uint64_t __a) {
-  return (float32x2_t)__a; }
-__ai uint8x8_t vcreate_u8(uint64_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint16x4_t vcreate_u16(uint64_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint32x2_t vcreate_u32(uint64_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint64x1_t vcreate_u64(uint64_t __a) {
-  return (uint64x1_t)__a; }
-__ai poly8x8_t vcreate_p8(uint64_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly16x4_t vcreate_p16(uint64_t __a) {
-  return (poly16x4_t)__a; }
-__ai int64x1_t vcreate_s64(uint64_t __a) {
-  return (int64x1_t)__a; }
-
-__ai float16x4_t vcvt_f16_f32(float32x4_t __a) {
-  return (float16x4_t)__builtin_neon_vcvt_f16_v((int8x16_t)__a, 7); }
-
-__ai float32x2_t vcvt_f32_s32(int32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vcvt_f32_v((int8x8_t)__a, 2); }
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vcvt_f32_v((int8x8_t)__a, 18); }
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vcvtq_f32_v((int8x16_t)__a, 34); }
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vcvtq_f32_v((int8x16_t)__a, 50); }
-
-__ai float32x4_t vcvt_f32_f16(float16x4_t __a) {
-  return (float32x4_t)__builtin_neon_vcvt_f32_f16((int8x8_t)__a, 7); }
-
-#define vcvt_n_f32_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (float32x2_t)__builtin_neon_vcvt_n_f32_v((int8x8_t)__a, __b, 2); })
-#define vcvt_n_f32_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  (float32x2_t)__builtin_neon_vcvt_n_f32_v((int8x8_t)__a, __b, 18); })
-#define vcvtq_n_f32_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (float32x4_t)__builtin_neon_vcvtq_n_f32_v((int8x16_t)__a, __b, 34); })
-#define vcvtq_n_f32_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (float32x4_t)__builtin_neon_vcvtq_n_f32_v((int8x16_t)__a, __b, 50); })
-
-#define vcvt_n_s32_f32(a, __b) __extension__ ({ \
-  float32x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vcvt_n_s32_v((int8x8_t)__a, __b, 2); })
-#define vcvtq_n_s32_f32(a, __b) __extension__ ({ \
-  float32x4_t __a = (a); \
-  (int32x4_t)__builtin_neon_vcvtq_n_s32_v((int8x16_t)__a, __b, 34); })
-
-#define vcvt_n_u32_f32(a, __b) __extension__ ({ \
-  float32x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vcvt_n_u32_v((int8x8_t)__a, __b, 18); })
-#define vcvtq_n_u32_f32(a, __b) __extension__ ({ \
-  float32x4_t __a = (a); \
-  (uint32x4_t)__builtin_neon_vcvtq_n_u32_v((int8x16_t)__a, __b, 50); })
-
-__ai int32x2_t vcvt_s32_f32(float32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vcvt_s32_v((int8x8_t)__a, 2); }
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vcvtq_s32_v((int8x16_t)__a, 34); }
-
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcvt_u32_v((int8x8_t)__a, 18); }
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcvtq_u32_v((int8x16_t)__a, 50); }
-
-#define vdup_lane_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdup_lane_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_lane_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdup_lane_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdup_lane_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_lane_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdup_lane_p8(a, __b) __extension__ ({ \
-  poly8x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdup_lane_p16(a, __b) __extension__ ({ \
-  poly16x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_lane_f32(a, __b) __extension__ ({ \
-  float32x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdupq_lane_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_lane_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_lane_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdupq_lane_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_lane_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_lane_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdupq_lane_p8(a, __b) __extension__ ({ \
-  poly8x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_lane_p16(a, __b) __extension__ ({ \
-  poly16x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_lane_f32(a, __b) __extension__ ({ \
-  float32x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_lane_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b); })
-#define vdup_lane_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b); })
-#define vdupq_lane_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdupq_lane_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-
-__ai uint8x8_t vdup_n_u8(uint8_t __a) {
-  return (uint8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai uint16x4_t vdup_n_u16(uint16_t __a) {
-  return (uint16x4_t){ __a, __a, __a, __a }; }
-__ai uint32x2_t vdup_n_u32(uint32_t __a) {
-  return (uint32x2_t){ __a, __a }; }
-__ai int8x8_t vdup_n_s8(int8_t __a) {
-  return (int8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai int16x4_t vdup_n_s16(int16_t __a) {
-  return (int16x4_t){ __a, __a, __a, __a }; }
-__ai int32x2_t vdup_n_s32(int32_t __a) {
-  return (int32x2_t){ __a, __a }; }
-__ai poly8x8_t vdup_n_p8(poly8_t __a) {
-  return (poly8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai poly16x4_t vdup_n_p16(poly16_t __a) {
-  return (poly16x4_t){ __a, __a, __a, __a }; }
-__ai float32x2_t vdup_n_f32(float32_t __a) {
-  return (float32x2_t){ __a, __a }; }
-__ai uint8x16_t vdupq_n_u8(uint8_t __a) {
-  return (uint8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai uint16x8_t vdupq_n_u16(uint16_t __a) {
-  return (uint16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai uint32x4_t vdupq_n_u32(uint32_t __a) {
-  return (uint32x4_t){ __a, __a, __a, __a }; }
-__ai int8x16_t vdupq_n_s8(int8_t __a) {
-  return (int8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai int16x8_t vdupq_n_s16(int16_t __a) {
-  return (int16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai int32x4_t vdupq_n_s32(int32_t __a) {
-  return (int32x4_t){ __a, __a, __a, __a }; }
-__ai poly8x16_t vdupq_n_p8(poly8_t __a) {
-  return (poly8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai poly16x8_t vdupq_n_p16(poly16_t __a) {
-  return (poly16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai float32x4_t vdupq_n_f32(float32_t __a) {
-  return (float32x4_t){ __a, __a, __a, __a }; }
-__ai int64x1_t vdup_n_s64(int64_t __a) {
-  return (int64x1_t){ __a }; }
-__ai uint64x1_t vdup_n_u64(uint64_t __a) {
-  return (uint64x1_t){ __a }; }
-__ai int64x2_t vdupq_n_s64(int64_t __a) {
-  return (int64x2_t){ __a, __a }; }
-__ai uint64x2_t vdupq_n_u64(uint64_t __a) {
-  return (uint64x2_t){ __a, __a }; }
-
-__ai int8x8_t veor_s8(int8x8_t __a, int8x8_t __b) {
-  return __a ^ __b; }
-__ai int16x4_t veor_s16(int16x4_t __a, int16x4_t __b) {
-  return __a ^ __b; }
-__ai int32x2_t veor_s32(int32x2_t __a, int32x2_t __b) {
-  return __a ^ __b; }
-__ai int64x1_t veor_s64(int64x1_t __a, int64x1_t __b) {
-  return __a ^ __b; }
-__ai uint8x8_t veor_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __a ^ __b; }
-__ai uint16x4_t veor_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __a ^ __b; }
-__ai uint32x2_t veor_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __a ^ __b; }
-__ai uint64x1_t veor_u64(uint64x1_t __a, uint64x1_t __b) {
-  return __a ^ __b; }
-__ai int8x16_t veorq_s8(int8x16_t __a, int8x16_t __b) {
-  return __a ^ __b; }
-__ai int16x8_t veorq_s16(int16x8_t __a, int16x8_t __b) {
-  return __a ^ __b; }
-__ai int32x4_t veorq_s32(int32x4_t __a, int32x4_t __b) {
-  return __a ^ __b; }
-__ai int64x2_t veorq_s64(int64x2_t __a, int64x2_t __b) {
-  return __a ^ __b; }
-__ai uint8x16_t veorq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __a ^ __b; }
-__ai uint16x8_t veorq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __a ^ __b; }
-__ai uint32x4_t veorq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __a ^ __b; }
-__ai uint64x2_t veorq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __a ^ __b; }
-
-#define vext_s8(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int8x8_t __b = (b); \
-  (int8x8_t)__builtin_neon_vext_v(__a, __b, __c, 0); })
-#define vext_u8(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint8x8_t __b = (b); \
-  (uint8x8_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
-#define vext_p8(a, b, __c) __extension__ ({ \
-  poly8x8_t __a = (a); poly8x8_t __b = (b); \
-  (poly8x8_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 4); })
-#define vext_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  (int16x4_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
-#define vext_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); \
-  (uint16x4_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
-#define vext_p16(a, b, __c) __extension__ ({ \
-  poly16x4_t __a = (a); poly16x4_t __b = (b); \
-  (poly16x4_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 5); })
-#define vext_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  (int32x2_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
-#define vext_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); \
-  (uint32x2_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
-#define vext_s64(a, b, __c) __extension__ ({ \
-  int64x1_t __a = (a); int64x1_t __b = (b); \
-  (int64x1_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
-#define vext_u64(a, b, __c) __extension__ ({ \
-  uint64x1_t __a = (a); uint64x1_t __b = (b); \
-  (uint64x1_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
-#define vext_f32(a, b, __c) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); \
-  (float32x2_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 8); })
-#define vextq_s8(a, b, __c) __extension__ ({ \
-  int8x16_t __a = (a); int8x16_t __b = (b); \
-  (int8x16_t)__builtin_neon_vextq_v(__a, __b, __c, 32); })
-#define vextq_u8(a, b, __c) __extension__ ({ \
-  uint8x16_t __a = (a); uint8x16_t __b = (b); \
-  (uint8x16_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
-#define vextq_p8(a, b, __c) __extension__ ({ \
-  poly8x16_t __a = (a); poly8x16_t __b = (b); \
-  (poly8x16_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 36); })
-#define vextq_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  (int16x8_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
-#define vextq_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint16x8_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
-#define vextq_p16(a, b, __c) __extension__ ({ \
-  poly16x8_t __a = (a); poly16x8_t __b = (b); \
-  (poly16x8_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 37); })
-#define vextq_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  (int32x4_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
-#define vextq_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint32x4_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
-#define vextq_s64(a, b, __c) __extension__ ({ \
-  int64x2_t __a = (a); int64x2_t __b = (b); \
-  (int64x2_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
-#define vextq_u64(a, b, __c) __extension__ ({ \
-  uint64x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint64x2_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
-#define vextq_f32(a, b, __c) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); \
-  (float32x4_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 40); })
-
-__ai float32x2_t vfma_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c) {
-  return (float32x2_t)__builtin_neon_vfma_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 8); }
-__ai float32x4_t vfmaq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c) {
-  return (float32x4_t)__builtin_neon_vfmaq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 40); }
-
-__ai int8x8_t vget_high_s8(int8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 8, 9, 10, 11, 12, 13, 14, 15); }
-__ai int16x4_t vget_high_s16(int16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 4, 5, 6, 7); }
-__ai int32x2_t vget_high_s32(int32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 2, 3); }
-__ai int64x1_t vget_high_s64(int64x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 1); }
-__ai float16x4_t vget_high_f16(float16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 4, 5, 6, 7); }
-__ai float32x2_t vget_high_f32(float32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 2, 3); }
-__ai uint8x8_t vget_high_u8(uint8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 8, 9, 10, 11, 12, 13, 14, 15); }
-__ai uint16x4_t vget_high_u16(uint16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 4, 5, 6, 7); }
-__ai uint32x2_t vget_high_u32(uint32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 2, 3); }
-__ai uint64x1_t vget_high_u64(uint64x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 1); }
-__ai poly8x8_t vget_high_p8(poly8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 8, 9, 10, 11, 12, 13, 14, 15); }
-__ai poly16x4_t vget_high_p16(poly16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 4, 5, 6, 7); }
-
-#define vget_lane_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  (uint8_t)__builtin_neon_vget_lane_i8((int8x8_t)__a, __b); })
-#define vget_lane_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  (uint16_t)__builtin_neon_vget_lane_i16((int16x4_t)__a, __b); })
-#define vget_lane_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  (uint32_t)__builtin_neon_vget_lane_i32((int32x2_t)__a, __b); })
-#define vget_lane_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  (int8_t)__builtin_neon_vget_lane_i8(__a, __b); })
-#define vget_lane_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  (int16_t)__builtin_neon_vget_lane_i16(__a, __b); })
-#define vget_lane_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (int32_t)__builtin_neon_vget_lane_i32(__a, __b); })
-#define vget_lane_p8(a, __b) __extension__ ({ \
-  poly8x8_t __a = (a); \
-  (poly8_t)__builtin_neon_vget_lane_i8((int8x8_t)__a, __b); })
-#define vget_lane_p16(a, __b) __extension__ ({ \
-  poly16x4_t __a = (a); \
-  (poly16_t)__builtin_neon_vget_lane_i16((int16x4_t)__a, __b); })
-#define vget_lane_f32(a, __b) __extension__ ({ \
-  float32x2_t __a = (a); \
-  (float32_t)__builtin_neon_vget_lane_f32(__a, __b); })
-#define vgetq_lane_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  (uint8_t)__builtin_neon_vgetq_lane_i8((int8x16_t)__a, __b); })
-#define vgetq_lane_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint16_t)__builtin_neon_vgetq_lane_i16((int16x8_t)__a, __b); })
-#define vgetq_lane_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint32_t)__builtin_neon_vgetq_lane_i32((int32x4_t)__a, __b); })
-#define vgetq_lane_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  (int8_t)__builtin_neon_vgetq_lane_i8(__a, __b); })
-#define vgetq_lane_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int16_t)__builtin_neon_vgetq_lane_i16(__a, __b); })
-#define vgetq_lane_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int32_t)__builtin_neon_vgetq_lane_i32(__a, __b); })
-#define vgetq_lane_p8(a, __b) __extension__ ({ \
-  poly8x16_t __a = (a); \
-  (poly8_t)__builtin_neon_vgetq_lane_i8((int8x16_t)__a, __b); })
-#define vgetq_lane_p16(a, __b) __extension__ ({ \
-  poly16x8_t __a = (a); \
-  (poly16_t)__builtin_neon_vgetq_lane_i16((int16x8_t)__a, __b); })
-#define vgetq_lane_f32(a, __b) __extension__ ({ \
-  float32x4_t __a = (a); \
-  (float32_t)__builtin_neon_vgetq_lane_f32(__a, __b); })
-#define vget_lane_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  (int64_t)__builtin_neon_vget_lane_i64(__a, __b); })
-#define vget_lane_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  (uint64_t)__builtin_neon_vget_lane_i64((int64x1_t)__a, __b); })
-#define vgetq_lane_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int64_t)__builtin_neon_vgetq_lane_i64(__a, __b); })
-#define vgetq_lane_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint64_t)__builtin_neon_vgetq_lane_i64((int64x2_t)__a, __b); })
-
-__ai int8x8_t vget_low_s8(int8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7); }
-__ai int16x4_t vget_low_s16(int16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); }
-__ai int32x2_t vget_low_s32(int32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1); }
-__ai int64x1_t vget_low_s64(int64x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 0); }
-__ai float16x4_t vget_low_f16(float16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); }
-__ai float32x2_t vget_low_f32(float32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1); }
-__ai uint8x8_t vget_low_u8(uint8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7); }
-__ai uint16x4_t vget_low_u16(uint16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); }
-__ai uint32x2_t vget_low_u32(uint32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1); }
-__ai uint64x1_t vget_low_u64(uint64x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 0); }
-__ai poly8x8_t vget_low_p8(poly8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7); }
-__ai poly16x4_t vget_low_p16(poly16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); }
-
-__ai int8x8_t vhadd_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vhadd_v(__a, __b, 0); }
-__ai int16x4_t vhadd_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vhadd_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vhadd_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vhadd_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vhadd_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai int8x16_t vhaddq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vhaddq_v(__a, __b, 32); }
-__ai int16x8_t vhaddq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vhaddq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai uint8x16_t vhaddq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vhaddq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vhaddq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-__ai int8x8_t vhsub_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vhsub_v(__a, __b, 0); }
-__ai int16x4_t vhsub_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vhsub_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vhsub_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vhsub_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vhsub_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai int8x16_t vhsubq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vhsubq_v(__a, __b, 32); }
-__ai int16x8_t vhsubq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vhsubq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai uint8x16_t vhsubq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vhsubq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vhsubq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-#define vld1q_u8(__a) __extension__ ({ \
-  (uint8x16_t)__builtin_neon_vld1q_v(__a, 48); })
-#define vld1q_u16(__a) __extension__ ({ \
-  (uint16x8_t)__builtin_neon_vld1q_v(__a, 49); })
-#define vld1q_u32(__a) __extension__ ({ \
-  (uint32x4_t)__builtin_neon_vld1q_v(__a, 50); })
-#define vld1q_u64(__a) __extension__ ({ \
-  (uint64x2_t)__builtin_neon_vld1q_v(__a, 51); })
-#define vld1q_s8(__a) __extension__ ({ \
-  (int8x16_t)__builtin_neon_vld1q_v(__a, 32); })
-#define vld1q_s16(__a) __extension__ ({ \
-  (int16x8_t)__builtin_neon_vld1q_v(__a, 33); })
-#define vld1q_s32(__a) __extension__ ({ \
-  (int32x4_t)__builtin_neon_vld1q_v(__a, 34); })
-#define vld1q_s64(__a) __extension__ ({ \
-  (int64x2_t)__builtin_neon_vld1q_v(__a, 35); })
-#define vld1q_f16(__a) __extension__ ({ \
-  (float16x8_t)__builtin_neon_vld1q_v(__a, 39); })
-#define vld1q_f32(__a) __extension__ ({ \
-  (float32x4_t)__builtin_neon_vld1q_v(__a, 40); })
-#define vld1q_p8(__a) __extension__ ({ \
-  (poly8x16_t)__builtin_neon_vld1q_v(__a, 36); })
-#define vld1q_p16(__a) __extension__ ({ \
-  (poly16x8_t)__builtin_neon_vld1q_v(__a, 37); })
-#define vld1_u8(__a) __extension__ ({ \
-  (uint8x8_t)__builtin_neon_vld1_v(__a, 16); })
-#define vld1_u16(__a) __extension__ ({ \
-  (uint16x4_t)__builtin_neon_vld1_v(__a, 17); })
-#define vld1_u32(__a) __extension__ ({ \
-  (uint32x2_t)__builtin_neon_vld1_v(__a, 18); })
-#define vld1_u64(__a) __extension__ ({ \
-  (uint64x1_t)__builtin_neon_vld1_v(__a, 19); })
-#define vld1_s8(__a) __extension__ ({ \
-  (int8x8_t)__builtin_neon_vld1_v(__a, 0); })
-#define vld1_s16(__a) __extension__ ({ \
-  (int16x4_t)__builtin_neon_vld1_v(__a, 1); })
-#define vld1_s32(__a) __extension__ ({ \
-  (int32x2_t)__builtin_neon_vld1_v(__a, 2); })
-#define vld1_s64(__a) __extension__ ({ \
-  (int64x1_t)__builtin_neon_vld1_v(__a, 3); })
-#define vld1_f16(__a) __extension__ ({ \
-  (float16x4_t)__builtin_neon_vld1_v(__a, 7); })
-#define vld1_f32(__a) __extension__ ({ \
-  (float32x2_t)__builtin_neon_vld1_v(__a, 8); })
-#define vld1_p8(__a) __extension__ ({ \
-  (poly8x8_t)__builtin_neon_vld1_v(__a, 4); })
-#define vld1_p16(__a) __extension__ ({ \
-  (poly16x4_t)__builtin_neon_vld1_v(__a, 5); })
-
-#define vld1q_dup_u8(__a) __extension__ ({ \
-  (uint8x16_t)__builtin_neon_vld1q_dup_v(__a, 48); })
-#define vld1q_dup_u16(__a) __extension__ ({ \
-  (uint16x8_t)__builtin_neon_vld1q_dup_v(__a, 49); })
-#define vld1q_dup_u32(__a) __extension__ ({ \
-  (uint32x4_t)__builtin_neon_vld1q_dup_v(__a, 50); })
-#define vld1q_dup_u64(__a) __extension__ ({ \
-  (uint64x2_t)__builtin_neon_vld1q_dup_v(__a, 51); })
-#define vld1q_dup_s8(__a) __extension__ ({ \
-  (int8x16_t)__builtin_neon_vld1q_dup_v(__a, 32); })
-#define vld1q_dup_s16(__a) __extension__ ({ \
-  (int16x8_t)__builtin_neon_vld1q_dup_v(__a, 33); })
-#define vld1q_dup_s32(__a) __extension__ ({ \
-  (int32x4_t)__builtin_neon_vld1q_dup_v(__a, 34); })
-#define vld1q_dup_s64(__a) __extension__ ({ \
-  (int64x2_t)__builtin_neon_vld1q_dup_v(__a, 35); })
-#define vld1q_dup_f16(__a) __extension__ ({ \
-  (float16x8_t)__builtin_neon_vld1q_dup_v(__a, 39); })
-#define vld1q_dup_f32(__a) __extension__ ({ \
-  (float32x4_t)__builtin_neon_vld1q_dup_v(__a, 40); })
-#define vld1q_dup_p8(__a) __extension__ ({ \
-  (poly8x16_t)__builtin_neon_vld1q_dup_v(__a, 36); })
-#define vld1q_dup_p16(__a) __extension__ ({ \
-  (poly16x8_t)__builtin_neon_vld1q_dup_v(__a, 37); })
-#define vld1_dup_u8(__a) __extension__ ({ \
-  (uint8x8_t)__builtin_neon_vld1_dup_v(__a, 16); })
-#define vld1_dup_u16(__a) __extension__ ({ \
-  (uint16x4_t)__builtin_neon_vld1_dup_v(__a, 17); })
-#define vld1_dup_u32(__a) __extension__ ({ \
-  (uint32x2_t)__builtin_neon_vld1_dup_v(__a, 18); })
-#define vld1_dup_u64(__a) __extension__ ({ \
-  (uint64x1_t)__builtin_neon_vld1_dup_v(__a, 19); })
-#define vld1_dup_s8(__a) __extension__ ({ \
-  (int8x8_t)__builtin_neon_vld1_dup_v(__a, 0); })
-#define vld1_dup_s16(__a) __extension__ ({ \
-  (int16x4_t)__builtin_neon_vld1_dup_v(__a, 1); })
-#define vld1_dup_s32(__a) __extension__ ({ \
-  (int32x2_t)__builtin_neon_vld1_dup_v(__a, 2); })
-#define vld1_dup_s64(__a) __extension__ ({ \
-  (int64x1_t)__builtin_neon_vld1_dup_v(__a, 3); })
-#define vld1_dup_f16(__a) __extension__ ({ \
-  (float16x4_t)__builtin_neon_vld1_dup_v(__a, 7); })
-#define vld1_dup_f32(__a) __extension__ ({ \
-  (float32x2_t)__builtin_neon_vld1_dup_v(__a, 8); })
-#define vld1_dup_p8(__a) __extension__ ({ \
-  (poly8x8_t)__builtin_neon_vld1_dup_v(__a, 4); })
-#define vld1_dup_p16(__a) __extension__ ({ \
-  (poly16x4_t)__builtin_neon_vld1_dup_v(__a, 5); })
-
-#define vld1q_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x16_t __b = (b); \
-  (uint8x16_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 48); })
-#define vld1q_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x8_t __b = (b); \
-  (uint16x8_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 49); })
-#define vld1q_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x4_t __b = (b); \
-  (uint32x4_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 50); })
-#define vld1q_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x2_t __b = (b); \
-  (uint64x2_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 51); })
-#define vld1q_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x16_t __b = (b); \
-  (int8x16_t)__builtin_neon_vld1q_lane_v(__a, __b, __c, 32); })
-#define vld1q_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x8_t __b = (b); \
-  (int16x8_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 33); })
-#define vld1q_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x4_t __b = (b); \
-  (int32x4_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 34); })
-#define vld1q_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x2_t __b = (b); \
-  (int64x2_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 35); })
-#define vld1q_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x8_t __b = (b); \
-  (float16x8_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 39); })
-#define vld1q_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x4_t __b = (b); \
-  (float32x4_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 40); })
-#define vld1q_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x16_t __b = (b); \
-  (poly8x16_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 36); })
-#define vld1q_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x8_t __b = (b); \
-  (poly16x8_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 37); })
-#define vld1_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x8_t __b = (b); \
-  (uint8x8_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 16); })
-#define vld1_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x4_t __b = (b); \
-  (uint16x4_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 17); })
-#define vld1_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x2_t __b = (b); \
-  (uint32x2_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 18); })
-#define vld1_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x1_t __b = (b); \
-  (uint64x1_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 19); })
-#define vld1_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x8_t __b = (b); \
-  (int8x8_t)__builtin_neon_vld1_lane_v(__a, __b, __c, 0); })
-#define vld1_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x4_t __b = (b); \
-  (int16x4_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 1); })
-#define vld1_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x2_t __b = (b); \
-  (int32x2_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 2); })
-#define vld1_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x1_t __b = (b); \
-  (int64x1_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 3); })
-#define vld1_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x4_t __b = (b); \
-  (float16x4_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 7); })
-#define vld1_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x2_t __b = (b); \
-  (float32x2_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 8); })
-#define vld1_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x8_t __b = (b); \
-  (poly8x8_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 4); })
-#define vld1_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x4_t __b = (b); \
-  (poly16x4_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 5); })
-
-#define vld2q_u8(__a) __extension__ ({ \
-  uint8x16x2_t r; __builtin_neon_vld2q_v(&r, __a, 48); r; })
-#define vld2q_u16(__a) __extension__ ({ \
-  uint16x8x2_t r; __builtin_neon_vld2q_v(&r, __a, 49); r; })
-#define vld2q_u32(__a) __extension__ ({ \
-  uint32x4x2_t r; __builtin_neon_vld2q_v(&r, __a, 50); r; })
-#define vld2q_s8(__a) __extension__ ({ \
-  int8x16x2_t r; __builtin_neon_vld2q_v(&r, __a, 32); r; })
-#define vld2q_s16(__a) __extension__ ({ \
-  int16x8x2_t r; __builtin_neon_vld2q_v(&r, __a, 33); r; })
-#define vld2q_s32(__a) __extension__ ({ \
-  int32x4x2_t r; __builtin_neon_vld2q_v(&r, __a, 34); r; })
-#define vld2q_f16(__a) __extension__ ({ \
-  float16x8x2_t r; __builtin_neon_vld2q_v(&r, __a, 39); r; })
-#define vld2q_f32(__a) __extension__ ({ \
-  float32x4x2_t r; __builtin_neon_vld2q_v(&r, __a, 40); r; })
-#define vld2q_p8(__a) __extension__ ({ \
-  poly8x16x2_t r; __builtin_neon_vld2q_v(&r, __a, 36); r; })
-#define vld2q_p16(__a) __extension__ ({ \
-  poly16x8x2_t r; __builtin_neon_vld2q_v(&r, __a, 37); r; })
-#define vld2_u8(__a) __extension__ ({ \
-  uint8x8x2_t r; __builtin_neon_vld2_v(&r, __a, 16); r; })
-#define vld2_u16(__a) __extension__ ({ \
-  uint16x4x2_t r; __builtin_neon_vld2_v(&r, __a, 17); r; })
-#define vld2_u32(__a) __extension__ ({ \
-  uint32x2x2_t r; __builtin_neon_vld2_v(&r, __a, 18); r; })
-#define vld2_u64(__a) __extension__ ({ \
-  uint64x1x2_t r; __builtin_neon_vld2_v(&r, __a, 19); r; })
-#define vld2_s8(__a) __extension__ ({ \
-  int8x8x2_t r; __builtin_neon_vld2_v(&r, __a, 0); r; })
-#define vld2_s16(__a) __extension__ ({ \
-  int16x4x2_t r; __builtin_neon_vld2_v(&r, __a, 1); r; })
-#define vld2_s32(__a) __extension__ ({ \
-  int32x2x2_t r; __builtin_neon_vld2_v(&r, __a, 2); r; })
-#define vld2_s64(__a) __extension__ ({ \
-  int64x1x2_t r; __builtin_neon_vld2_v(&r, __a, 3); r; })
-#define vld2_f16(__a) __extension__ ({ \
-  float16x4x2_t r; __builtin_neon_vld2_v(&r, __a, 7); r; })
-#define vld2_f32(__a) __extension__ ({ \
-  float32x2x2_t r; __builtin_neon_vld2_v(&r, __a, 8); r; })
-#define vld2_p8(__a) __extension__ ({ \
-  poly8x8x2_t r; __builtin_neon_vld2_v(&r, __a, 4); r; })
-#define vld2_p16(__a) __extension__ ({ \
-  poly16x4x2_t r; __builtin_neon_vld2_v(&r, __a, 5); r; })
-
-#define vld2_dup_u8(__a) __extension__ ({ \
-  uint8x8x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 16); r; })
-#define vld2_dup_u16(__a) __extension__ ({ \
-  uint16x4x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 17); r; })
-#define vld2_dup_u32(__a) __extension__ ({ \
-  uint32x2x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 18); r; })
-#define vld2_dup_u64(__a) __extension__ ({ \
-  uint64x1x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 19); r; })
-#define vld2_dup_s8(__a) __extension__ ({ \
-  int8x8x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 0); r; })
-#define vld2_dup_s16(__a) __extension__ ({ \
-  int16x4x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 1); r; })
-#define vld2_dup_s32(__a) __extension__ ({ \
-  int32x2x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 2); r; })
-#define vld2_dup_s64(__a) __extension__ ({ \
-  int64x1x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 3); r; })
-#define vld2_dup_f16(__a) __extension__ ({ \
-  float16x4x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 7); r; })
-#define vld2_dup_f32(__a) __extension__ ({ \
-  float32x2x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 8); r; })
-#define vld2_dup_p8(__a) __extension__ ({ \
-  poly8x8x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 4); r; })
-#define vld2_dup_p16(__a) __extension__ ({ \
-  poly16x4x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 5); r; })
-
-#define vld2q_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x8x2_t __b = (b); \
-  uint16x8x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 49); r; })
-#define vld2q_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x4x2_t __b = (b); \
-  uint32x4x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 50); r; })
-#define vld2q_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x8x2_t __b = (b); \
-  int16x8x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 33); r; })
-#define vld2q_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x4x2_t __b = (b); \
-  int32x4x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 34); r; })
-#define vld2q_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x8x2_t __b = (b); \
-  float16x8x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 39); r; })
-#define vld2q_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x4x2_t __b = (b); \
-  float32x4x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 40); r; })
-#define vld2q_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x8x2_t __b = (b); \
-  poly16x8x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 37); r; })
-#define vld2_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x8x2_t __b = (b); \
-  uint8x8x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 16); r; })
-#define vld2_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x4x2_t __b = (b); \
-  uint16x4x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 17); r; })
-#define vld2_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x2x2_t __b = (b); \
-  uint32x2x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 18); r; })
-#define vld2_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x8x2_t __b = (b); \
-  int8x8x2_t r; __builtin_neon_vld2_lane_v(&r, __a, __b.val[0], __b.val[1], __c, 0); r; })
-#define vld2_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x4x2_t __b = (b); \
-  int16x4x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 1); r; })
-#define vld2_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x2x2_t __b = (b); \
-  int32x2x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 2); r; })
-#define vld2_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x4x2_t __b = (b); \
-  float16x4x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 7); r; })
-#define vld2_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x2x2_t __b = (b); \
-  float32x2x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 8); r; })
-#define vld2_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x8x2_t __b = (b); \
-  poly8x8x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 4); r; })
-#define vld2_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x4x2_t __b = (b); \
-  poly16x4x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 5); r; })
-
-#define vld3q_u8(__a) __extension__ ({ \
-  uint8x16x3_t r; __builtin_neon_vld3q_v(&r, __a, 48); r; })
-#define vld3q_u16(__a) __extension__ ({ \
-  uint16x8x3_t r; __builtin_neon_vld3q_v(&r, __a, 49); r; })
-#define vld3q_u32(__a) __extension__ ({ \
-  uint32x4x3_t r; __builtin_neon_vld3q_v(&r, __a, 50); r; })
-#define vld3q_s8(__a) __extension__ ({ \
-  int8x16x3_t r; __builtin_neon_vld3q_v(&r, __a, 32); r; })
-#define vld3q_s16(__a) __extension__ ({ \
-  int16x8x3_t r; __builtin_neon_vld3q_v(&r, __a, 33); r; })
-#define vld3q_s32(__a) __extension__ ({ \
-  int32x4x3_t r; __builtin_neon_vld3q_v(&r, __a, 34); r; })
-#define vld3q_f16(__a) __extension__ ({ \
-  float16x8x3_t r; __builtin_neon_vld3q_v(&r, __a, 39); r; })
-#define vld3q_f32(__a) __extension__ ({ \
-  float32x4x3_t r; __builtin_neon_vld3q_v(&r, __a, 40); r; })
-#define vld3q_p8(__a) __extension__ ({ \
-  poly8x16x3_t r; __builtin_neon_vld3q_v(&r, __a, 36); r; })
-#define vld3q_p16(__a) __extension__ ({ \
-  poly16x8x3_t r; __builtin_neon_vld3q_v(&r, __a, 37); r; })
-#define vld3_u8(__a) __extension__ ({ \
-  uint8x8x3_t r; __builtin_neon_vld3_v(&r, __a, 16); r; })
-#define vld3_u16(__a) __extension__ ({ \
-  uint16x4x3_t r; __builtin_neon_vld3_v(&r, __a, 17); r; })
-#define vld3_u32(__a) __extension__ ({ \
-  uint32x2x3_t r; __builtin_neon_vld3_v(&r, __a, 18); r; })
-#define vld3_u64(__a) __extension__ ({ \
-  uint64x1x3_t r; __builtin_neon_vld3_v(&r, __a, 19); r; })
-#define vld3_s8(__a) __extension__ ({ \
-  int8x8x3_t r; __builtin_neon_vld3_v(&r, __a, 0); r; })
-#define vld3_s16(__a) __extension__ ({ \
-  int16x4x3_t r; __builtin_neon_vld3_v(&r, __a, 1); r; })
-#define vld3_s32(__a) __extension__ ({ \
-  int32x2x3_t r; __builtin_neon_vld3_v(&r, __a, 2); r; })
-#define vld3_s64(__a) __extension__ ({ \
-  int64x1x3_t r; __builtin_neon_vld3_v(&r, __a, 3); r; })
-#define vld3_f16(__a) __extension__ ({ \
-  float16x4x3_t r; __builtin_neon_vld3_v(&r, __a, 7); r; })
-#define vld3_f32(__a) __extension__ ({ \
-  float32x2x3_t r; __builtin_neon_vld3_v(&r, __a, 8); r; })
-#define vld3_p8(__a) __extension__ ({ \
-  poly8x8x3_t r; __builtin_neon_vld3_v(&r, __a, 4); r; })
-#define vld3_p16(__a) __extension__ ({ \
-  poly16x4x3_t r; __builtin_neon_vld3_v(&r, __a, 5); r; })
-
-#define vld3_dup_u8(__a) __extension__ ({ \
-  uint8x8x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 16); r; })
-#define vld3_dup_u16(__a) __extension__ ({ \
-  uint16x4x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 17); r; })
-#define vld3_dup_u32(__a) __extension__ ({ \
-  uint32x2x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 18); r; })
-#define vld3_dup_u64(__a) __extension__ ({ \
-  uint64x1x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 19); r; })
-#define vld3_dup_s8(__a) __extension__ ({ \
-  int8x8x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 0); r; })
-#define vld3_dup_s16(__a) __extension__ ({ \
-  int16x4x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 1); r; })
-#define vld3_dup_s32(__a) __extension__ ({ \
-  int32x2x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 2); r; })
-#define vld3_dup_s64(__a) __extension__ ({ \
-  int64x1x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 3); r; })
-#define vld3_dup_f16(__a) __extension__ ({ \
-  float16x4x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 7); r; })
-#define vld3_dup_f32(__a) __extension__ ({ \
-  float32x2x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 8); r; })
-#define vld3_dup_p8(__a) __extension__ ({ \
-  poly8x8x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 4); r; })
-#define vld3_dup_p16(__a) __extension__ ({ \
-  poly16x4x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 5); r; })
-
-#define vld3q_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x8x3_t __b = (b); \
-  uint16x8x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 49); r; })
-#define vld3q_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x4x3_t __b = (b); \
-  uint32x4x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 50); r; })
-#define vld3q_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x8x3_t __b = (b); \
-  int16x8x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 33); r; })
-#define vld3q_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x4x3_t __b = (b); \
-  int32x4x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 34); r; })
-#define vld3q_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x8x3_t __b = (b); \
-  float16x8x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 39); r; })
-#define vld3q_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x4x3_t __b = (b); \
-  float32x4x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 40); r; })
-#define vld3q_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x8x3_t __b = (b); \
-  poly16x8x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 37); r; })
-#define vld3_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x8x3_t __b = (b); \
-  uint8x8x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 16); r; })
-#define vld3_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x4x3_t __b = (b); \
-  uint16x4x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 17); r; })
-#define vld3_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x2x3_t __b = (b); \
-  uint32x2x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 18); r; })
-#define vld3_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x8x3_t __b = (b); \
-  int8x8x3_t r; __builtin_neon_vld3_lane_v(&r, __a, __b.val[0], __b.val[1], __b.val[2], __c, 0); r; })
-#define vld3_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x4x3_t __b = (b); \
-  int16x4x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 1); r; })
-#define vld3_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x2x3_t __b = (b); \
-  int32x2x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 2); r; })
-#define vld3_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x4x3_t __b = (b); \
-  float16x4x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 7); r; })
-#define vld3_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x2x3_t __b = (b); \
-  float32x2x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 8); r; })
-#define vld3_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x8x3_t __b = (b); \
-  poly8x8x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 4); r; })
-#define vld3_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x4x3_t __b = (b); \
-  poly16x4x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 5); r; })
-
-#define vld4q_u8(__a) __extension__ ({ \
-  uint8x16x4_t r; __builtin_neon_vld4q_v(&r, __a, 48); r; })
-#define vld4q_u16(__a) __extension__ ({ \
-  uint16x8x4_t r; __builtin_neon_vld4q_v(&r, __a, 49); r; })
-#define vld4q_u32(__a) __extension__ ({ \
-  uint32x4x4_t r; __builtin_neon_vld4q_v(&r, __a, 50); r; })
-#define vld4q_s8(__a) __extension__ ({ \
-  int8x16x4_t r; __builtin_neon_vld4q_v(&r, __a, 32); r; })
-#define vld4q_s16(__a) __extension__ ({ \
-  int16x8x4_t r; __builtin_neon_vld4q_v(&r, __a, 33); r; })
-#define vld4q_s32(__a) __extension__ ({ \
-  int32x4x4_t r; __builtin_neon_vld4q_v(&r, __a, 34); r; })
-#define vld4q_f16(__a) __extension__ ({ \
-  float16x8x4_t r; __builtin_neon_vld4q_v(&r, __a, 39); r; })
-#define vld4q_f32(__a) __extension__ ({ \
-  float32x4x4_t r; __builtin_neon_vld4q_v(&r, __a, 40); r; })
-#define vld4q_p8(__a) __extension__ ({ \
-  poly8x16x4_t r; __builtin_neon_vld4q_v(&r, __a, 36); r; })
-#define vld4q_p16(__a) __extension__ ({ \
-  poly16x8x4_t r; __builtin_neon_vld4q_v(&r, __a, 37); r; })
-#define vld4_u8(__a) __extension__ ({ \
-  uint8x8x4_t r; __builtin_neon_vld4_v(&r, __a, 16); r; })
-#define vld4_u16(__a) __extension__ ({ \
-  uint16x4x4_t r; __builtin_neon_vld4_v(&r, __a, 17); r; })
-#define vld4_u32(__a) __extension__ ({ \
-  uint32x2x4_t r; __builtin_neon_vld4_v(&r, __a, 18); r; })
-#define vld4_u64(__a) __extension__ ({ \
-  uint64x1x4_t r; __builtin_neon_vld4_v(&r, __a, 19); r; })
-#define vld4_s8(__a) __extension__ ({ \
-  int8x8x4_t r; __builtin_neon_vld4_v(&r, __a, 0); r; })
-#define vld4_s16(__a) __extension__ ({ \
-  int16x4x4_t r; __builtin_neon_vld4_v(&r, __a, 1); r; })
-#define vld4_s32(__a) __extension__ ({ \
-  int32x2x4_t r; __builtin_neon_vld4_v(&r, __a, 2); r; })
-#define vld4_s64(__a) __extension__ ({ \
-  int64x1x4_t r; __builtin_neon_vld4_v(&r, __a, 3); r; })
-#define vld4_f16(__a) __extension__ ({ \
-  float16x4x4_t r; __builtin_neon_vld4_v(&r, __a, 7); r; })
-#define vld4_f32(__a) __extension__ ({ \
-  float32x2x4_t r; __builtin_neon_vld4_v(&r, __a, 8); r; })
-#define vld4_p8(__a) __extension__ ({ \
-  poly8x8x4_t r; __builtin_neon_vld4_v(&r, __a, 4); r; })
-#define vld4_p16(__a) __extension__ ({ \
-  poly16x4x4_t r; __builtin_neon_vld4_v(&r, __a, 5); r; })
-
-#define vld4_dup_u8(__a) __extension__ ({ \
-  uint8x8x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 16); r; })
-#define vld4_dup_u16(__a) __extension__ ({ \
-  uint16x4x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 17); r; })
-#define vld4_dup_u32(__a) __extension__ ({ \
-  uint32x2x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 18); r; })
-#define vld4_dup_u64(__a) __extension__ ({ \
-  uint64x1x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 19); r; })
-#define vld4_dup_s8(__a) __extension__ ({ \
-  int8x8x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 0); r; })
-#define vld4_dup_s16(__a) __extension__ ({ \
-  int16x4x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 1); r; })
-#define vld4_dup_s32(__a) __extension__ ({ \
-  int32x2x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 2); r; })
-#define vld4_dup_s64(__a) __extension__ ({ \
-  int64x1x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 3); r; })
-#define vld4_dup_f16(__a) __extension__ ({ \
-  float16x4x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 7); r; })
-#define vld4_dup_f32(__a) __extension__ ({ \
-  float32x2x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 8); r; })
-#define vld4_dup_p8(__a) __extension__ ({ \
-  poly8x8x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 4); r; })
-#define vld4_dup_p16(__a) __extension__ ({ \
-  poly16x4x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 5); r; })
-
-#define vld4q_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x8x4_t __b = (b); \
-  uint16x8x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 49); r; })
-#define vld4q_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x4x4_t __b = (b); \
-  uint32x4x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 50); r; })
-#define vld4q_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x8x4_t __b = (b); \
-  int16x8x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 33); r; })
-#define vld4q_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x4x4_t __b = (b); \
-  int32x4x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 34); r; })
-#define vld4q_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x8x4_t __b = (b); \
-  float16x8x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 39); r; })
-#define vld4q_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x4x4_t __b = (b); \
-  float32x4x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 40); r; })
-#define vld4q_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x8x4_t __b = (b); \
-  poly16x8x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 37); r; })
-#define vld4_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x8x4_t __b = (b); \
-  uint8x8x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 16); r; })
-#define vld4_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x4x4_t __b = (b); \
-  uint16x4x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 17); r; })
-#define vld4_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x2x4_t __b = (b); \
-  uint32x2x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 18); r; })
-#define vld4_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x8x4_t __b = (b); \
-  int8x8x4_t r; __builtin_neon_vld4_lane_v(&r, __a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 0); r; })
-#define vld4_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x4x4_t __b = (b); \
-  int16x4x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 1); r; })
-#define vld4_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x2x4_t __b = (b); \
-  int32x2x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 2); r; })
-#define vld4_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x4x4_t __b = (b); \
-  float16x4x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 7); r; })
-#define vld4_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x2x4_t __b = (b); \
-  float32x2x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 8); r; })
-#define vld4_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x8x4_t __b = (b); \
-  poly8x8x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 4); r; })
-#define vld4_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x4x4_t __b = (b); \
-  poly16x4x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 5); r; })
-
-__ai int8x8_t vmax_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vmax_v(__a, __b, 0); }
-__ai int16x4_t vmax_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vmax_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vmax_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vmax_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vmax_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai float32x2_t vmax_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai int8x16_t vmaxq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vmaxq_v(__a, __b, 32); }
-__ai int16x8_t vmaxq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vmaxq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai uint8x16_t vmaxq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vmaxq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vmaxq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai float32x4_t vmaxq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-
-__ai int8x8_t vmin_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vmin_v(__a, __b, 0); }
-__ai int16x4_t vmin_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vmin_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vmin_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vmin_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vmin_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai float32x2_t vmin_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai int8x16_t vminq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vminq_v(__a, __b, 32); }
-__ai int16x8_t vminq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vminq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai uint8x16_t vminq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vminq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vminq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai float32x4_t vminq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-
-__ai int8x8_t vmla_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c) {
-  return __a + (__b * __c); }
-__ai int16x4_t vmla_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return __a + (__b * __c); }
-__ai int32x2_t vmla_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return __a + (__b * __c); }
-__ai float32x2_t vmla_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c) {
-  return __a + (__b * __c); }
-__ai uint8x8_t vmla_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
-  return __a + (__b * __c); }
-__ai uint16x4_t vmla_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) {
-  return __a + (__b * __c); }
-__ai uint32x2_t vmla_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) {
-  return __a + (__b * __c); }
-__ai int8x16_t vmlaq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c) {
-  return __a + (__b * __c); }
-__ai int16x8_t vmlaq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c) {
-  return __a + (__b * __c); }
-__ai int32x4_t vmlaq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c) {
-  return __a + (__b * __c); }
-__ai float32x4_t vmlaq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c) {
-  return __a + (__b * __c); }
-__ai uint8x16_t vmlaq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
-  return __a + (__b * __c); }
-__ai uint16x8_t vmlaq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return __a + (__b * __c); }
-__ai uint32x4_t vmlaq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return __a + (__b * __c); }
-
-__ai int16x8_t vmlal_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c) {
-  return __a + vmull_s8(__b, __c); }
-__ai int32x4_t vmlal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return __a + vmull_s16(__b, __c); }
-__ai int64x2_t vmlal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return __a + vmull_s32(__b, __c); }
-__ai uint16x8_t vmlal_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) {
-  return __a + vmull_u8(__b, __c); }
-__ai uint32x4_t vmlal_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) {
-  return __a + vmull_u16(__b, __c); }
-__ai uint64x2_t vmlal_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) {
-  return __a + vmull_u32(__b, __c); }
-
-#define vmlal_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
-  __a + vmull_s16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlal_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
-  __a + vmull_s32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlal_lane_u16(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint16x4_t __b = (b); uint16x4_t __c = (c); \
-  __a + vmull_u16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlal_lane_u32(a, b, c, __d) __extension__ ({ \
-  uint64x2_t __a = (a); uint32x2_t __b = (b); uint32x2_t __c = (c); \
-  __a + vmull_u32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vmlal_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c) {
-  return __a + vmull_s16(__b, (int16x4_t){ __c, __c, __c, __c }); }
-__ai int64x2_t vmlal_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c) {
-  return __a + vmull_s32(__b, (int32x2_t){ __c, __c }); }
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __a, uint16x4_t __b, uint16_t __c) {
-  return __a + vmull_u16(__b, (uint16x4_t){ __c, __c, __c, __c }); }
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __a, uint32x2_t __b, uint32_t __c) {
-  return __a + vmull_u32(__b, (uint32x2_t){ __c, __c }); }
-
-#define vmla_lane_s16(a, b, c, __d) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmla_lane_s32(a, b, c, __d) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmla_lane_u16(a, b, c, __d) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); uint16x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmla_lane_u32(a, b, c, __d) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); uint32x2_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmla_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); float32x2_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlaq_lane_s16(a, b, c, __d) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
-#define vmlaq_lane_s32(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlaq_lane_u16(a, b, c, __d) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); uint16x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
-#define vmlaq_lane_u32(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); uint32x2_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlaq_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); float32x2_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-
-__ai int16x4_t vmla_n_s16(int16x4_t __a, int16x4_t __b, int16_t __c) {
-  return __a + (__b * (int16x4_t){ __c, __c, __c, __c }); }
-__ai int32x2_t vmla_n_s32(int32x2_t __a, int32x2_t __b, int32_t __c) {
-  return __a + (__b * (int32x2_t){ __c, __c }); }
-__ai uint16x4_t vmla_n_u16(uint16x4_t __a, uint16x4_t __b, uint16_t __c) {
-  return __a + (__b * (uint16x4_t){ __c, __c, __c, __c }); }
-__ai uint32x2_t vmla_n_u32(uint32x2_t __a, uint32x2_t __b, uint32_t __c) {
-  return __a + (__b * (uint32x2_t){ __c, __c }); }
-__ai float32x2_t vmla_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c) {
-  return __a + (__b * (float32x2_t){ __c, __c }); }
-__ai int16x8_t vmlaq_n_s16(int16x8_t __a, int16x8_t __b, int16_t __c) {
-  return __a + (__b * (int16x8_t){ __c, __c, __c, __c, __c, __c, __c, __c }); }
-__ai int32x4_t vmlaq_n_s32(int32x4_t __a, int32x4_t __b, int32_t __c) {
-  return __a + (__b * (int32x4_t){ __c, __c, __c, __c }); }
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __a, uint16x8_t __b, uint16_t __c) {
-  return __a + (__b * (uint16x8_t){ __c, __c, __c, __c, __c, __c, __c, __c }); }
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __a, uint32x4_t __b, uint32_t __c) {
-  return __a + (__b * (uint32x4_t){ __c, __c, __c, __c }); }
-__ai float32x4_t vmlaq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c) {
-  return __a + (__b * (float32x4_t){ __c, __c, __c, __c }); }
-
-__ai int8x8_t vmls_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c) {
-  return __a - (__b * __c); }
-__ai int16x4_t vmls_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return __a - (__b * __c); }
-__ai int32x2_t vmls_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return __a - (__b * __c); }
-__ai float32x2_t vmls_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c) {
-  return __a - (__b * __c); }
-__ai uint8x8_t vmls_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
-  return __a - (__b * __c); }
-__ai uint16x4_t vmls_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) {
-  return __a - (__b * __c); }
-__ai uint32x2_t vmls_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) {
-  return __a - (__b * __c); }
-__ai int8x16_t vmlsq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c) {
-  return __a - (__b * __c); }
-__ai int16x8_t vmlsq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c) {
-  return __a - (__b * __c); }
-__ai int32x4_t vmlsq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c) {
-  return __a - (__b * __c); }
-__ai float32x4_t vmlsq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c) {
-  return __a - (__b * __c); }
-__ai uint8x16_t vmlsq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
-  return __a - (__b * __c); }
-__ai uint16x8_t vmlsq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return __a - (__b * __c); }
-__ai uint32x4_t vmlsq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return __a - (__b * __c); }
-
-__ai int16x8_t vmlsl_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c) {
-  return __a - vmull_s8(__b, __c); }
-__ai int32x4_t vmlsl_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return __a - vmull_s16(__b, __c); }
-__ai int64x2_t vmlsl_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return __a - vmull_s32(__b, __c); }
-__ai uint16x8_t vmlsl_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) {
-  return __a - vmull_u8(__b, __c); }
-__ai uint32x4_t vmlsl_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) {
-  return __a - vmull_u16(__b, __c); }
-__ai uint64x2_t vmlsl_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) {
-  return __a - vmull_u32(__b, __c); }
-
-#define vmlsl_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
-  __a - vmull_s16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsl_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
-  __a - vmull_s32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlsl_lane_u16(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint16x4_t __b = (b); uint16x4_t __c = (c); \
-  __a - vmull_u16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsl_lane_u32(a, b, c, __d) __extension__ ({ \
-  uint64x2_t __a = (a); uint32x2_t __b = (b); uint32x2_t __c = (c); \
-  __a - vmull_u32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vmlsl_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c) {
-  return __a - vmull_s16(__b, (int16x4_t){ __c, __c, __c, __c }); }
-__ai int64x2_t vmlsl_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c) {
-  return __a - vmull_s32(__b, (int32x2_t){ __c, __c }); }
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __a, uint16x4_t __b, uint16_t __c) {
-  return __a - vmull_u16(__b, (uint16x4_t){ __c, __c, __c, __c }); }
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __a, uint32x2_t __b, uint32_t __c) {
-  return __a - vmull_u32(__b, (uint32x2_t){ __c, __c }); }
-
-#define vmls_lane_s16(a, b, c, __d) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmls_lane_s32(a, b, c, __d) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmls_lane_u16(a, b, c, __d) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); uint16x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmls_lane_u32(a, b, c, __d) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); uint32x2_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmls_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); float32x2_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlsq_lane_s16(a, b, c, __d) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
-#define vmlsq_lane_s32(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsq_lane_u16(a, b, c, __d) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); uint16x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
-#define vmlsq_lane_u32(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); uint32x2_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsq_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); float32x2_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-
-__ai int16x4_t vmls_n_s16(int16x4_t __a, int16x4_t __b, int16_t __c) {
-  return __a - (__b * (int16x4_t){ __c, __c, __c, __c }); }
-__ai int32x2_t vmls_n_s32(int32x2_t __a, int32x2_t __b, int32_t __c) {
-  return __a - (__b * (int32x2_t){ __c, __c }); }
-__ai uint16x4_t vmls_n_u16(uint16x4_t __a, uint16x4_t __b, uint16_t __c) {
-  return __a - (__b * (uint16x4_t){ __c, __c, __c, __c }); }
-__ai uint32x2_t vmls_n_u32(uint32x2_t __a, uint32x2_t __b, uint32_t __c) {
-  return __a - (__b * (uint32x2_t){ __c, __c }); }
-__ai float32x2_t vmls_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c) {
-  return __a - (__b * (float32x2_t){ __c, __c }); }
-__ai int16x8_t vmlsq_n_s16(int16x8_t __a, int16x8_t __b, int16_t __c) {
-  return __a - (__b * (int16x8_t){ __c, __c, __c, __c, __c, __c, __c, __c }); }
-__ai int32x4_t vmlsq_n_s32(int32x4_t __a, int32x4_t __b, int32_t __c) {
-  return __a - (__b * (int32x4_t){ __c, __c, __c, __c }); }
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __a, uint16x8_t __b, uint16_t __c) {
-  return __a - (__b * (uint16x8_t){ __c, __c, __c, __c, __c, __c, __c, __c }); }
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __a, uint32x4_t __b, uint32_t __c) {
-  return __a - (__b * (uint32x4_t){ __c, __c, __c, __c }); }
-__ai float32x4_t vmlsq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c) {
-  return __a - (__b * (float32x4_t){ __c, __c, __c, __c }); }
-
-__ai int8x8_t vmovn_s16(int16x8_t __a) {
-  return (int8x8_t)__builtin_neon_vmovn_v((int8x16_t)__a, 0); }
-__ai int16x4_t vmovn_s32(int32x4_t __a) {
-  return (int16x4_t)__builtin_neon_vmovn_v((int8x16_t)__a, 1); }
-__ai int32x2_t vmovn_s64(int64x2_t __a) {
-  return (int32x2_t)__builtin_neon_vmovn_v((int8x16_t)__a, 2); }
-__ai uint8x8_t vmovn_u16(uint16x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vmovn_v((int8x16_t)__a, 16); }
-__ai uint16x4_t vmovn_u32(uint32x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vmovn_v((int8x16_t)__a, 17); }
-__ai uint32x2_t vmovn_u64(uint64x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vmovn_v((int8x16_t)__a, 18); }
-
-__ai uint8x8_t vmov_n_u8(uint8_t __a) {
-  return (uint8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai uint16x4_t vmov_n_u16(uint16_t __a) {
-  return (uint16x4_t){ __a, __a, __a, __a }; }
-__ai uint32x2_t vmov_n_u32(uint32_t __a) {
-  return (uint32x2_t){ __a, __a }; }
-__ai int8x8_t vmov_n_s8(int8_t __a) {
-  return (int8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai int16x4_t vmov_n_s16(int16_t __a) {
-  return (int16x4_t){ __a, __a, __a, __a }; }
-__ai int32x2_t vmov_n_s32(int32_t __a) {
-  return (int32x2_t){ __a, __a }; }
-__ai poly8x8_t vmov_n_p8(poly8_t __a) {
-  return (poly8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai poly16x4_t vmov_n_p16(poly16_t __a) {
-  return (poly16x4_t){ __a, __a, __a, __a }; }
-__ai float32x2_t vmov_n_f32(float32_t __a) {
-  return (float32x2_t){ __a, __a }; }
-__ai uint8x16_t vmovq_n_u8(uint8_t __a) {
-  return (uint8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai uint16x8_t vmovq_n_u16(uint16_t __a) {
-  return (uint16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai uint32x4_t vmovq_n_u32(uint32_t __a) {
-  return (uint32x4_t){ __a, __a, __a, __a }; }
-__ai int8x16_t vmovq_n_s8(int8_t __a) {
-  return (int8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai int16x8_t vmovq_n_s16(int16_t __a) {
-  return (int16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai int32x4_t vmovq_n_s32(int32_t __a) {
-  return (int32x4_t){ __a, __a, __a, __a }; }
-__ai poly8x16_t vmovq_n_p8(poly8_t __a) {
-  return (poly8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai poly16x8_t vmovq_n_p16(poly16_t __a) {
-  return (poly16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
-__ai float32x4_t vmovq_n_f32(float32_t __a) {
-  return (float32x4_t){ __a, __a, __a, __a }; }
-__ai int64x1_t vmov_n_s64(int64_t __a) {
-  return (int64x1_t){ __a }; }
-__ai uint64x1_t vmov_n_u64(uint64_t __a) {
-  return (uint64x1_t){ __a }; }
-__ai int64x2_t vmovq_n_s64(int64_t __a) {
-  return (int64x2_t){ __a, __a }; }
-__ai uint64x2_t vmovq_n_u64(uint64_t __a) {
-  return (uint64x2_t){ __a, __a }; }
-
-__ai int8x8_t vmul_s8(int8x8_t __a, int8x8_t __b) {
-  return __a * __b; }
-__ai int16x4_t vmul_s16(int16x4_t __a, int16x4_t __b) {
-  return __a * __b; }
-__ai int32x2_t vmul_s32(int32x2_t __a, int32x2_t __b) {
-  return __a * __b; }
-__ai float32x2_t vmul_f32(float32x2_t __a, float32x2_t __b) {
-  return __a * __b; }
-__ai uint8x8_t vmul_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __a * __b; }
-__ai uint16x4_t vmul_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __a * __b; }
-__ai uint32x2_t vmul_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __a * __b; }
-__ai int8x16_t vmulq_s8(int8x16_t __a, int8x16_t __b) {
-  return __a * __b; }
-__ai int16x8_t vmulq_s16(int16x8_t __a, int16x8_t __b) {
-  return __a * __b; }
-__ai int32x4_t vmulq_s32(int32x4_t __a, int32x4_t __b) {
-  return __a * __b; }
-__ai float32x4_t vmulq_f32(float32x4_t __a, float32x4_t __b) {
-  return __a * __b; }
-__ai uint8x16_t vmulq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __a * __b; }
-__ai uint16x8_t vmulq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __a * __b; }
-__ai uint32x4_t vmulq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __a * __b; }
-
-#define vmull_lane_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  vmull_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmull_lane_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  vmull_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vmull_lane_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); \
-  vmull_u16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmull_lane_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); \
-  vmull_u32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-
-__ai int32x4_t vmull_n_s16(int16x4_t __a, int16_t __b) {
-  return (int32x4_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)(int16x4_t){ __b, __b, __b, __b }, 34); }
-__ai int64x2_t vmull_n_s32(int32x2_t __a, int32_t __b) {
-  return (int64x2_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)(int32x2_t){ __b, __b }, 35); }
-__ai uint32x4_t vmull_n_u16(uint16x4_t __a, uint16_t __b) {
-  return (uint32x4_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)(uint16x4_t){ __b, __b, __b, __b }, 50); }
-__ai uint64x2_t vmull_n_u32(uint32x2_t __a, uint32_t __b) {
-  return (uint64x2_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)(uint32x2_t){ __b, __b }, 51); }
-
-__ai poly8x8_t vmul_p8(poly8x8_t __a, poly8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vmul_v((int8x8_t)__a, (int8x8_t)__b, 4); }
-__ai poly8x16_t vmulq_p8(poly8x16_t __a, poly8x16_t __b) {
-  return (poly8x16_t)__builtin_neon_vmulq_v((int8x16_t)__a, (int8x16_t)__b, 36); }
-
-#define vmul_lane_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmul_lane_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c); })
-#define vmul_lane_f32(a, b, __c) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c); })
-#define vmul_lane_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmul_lane_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c); })
-#define vmulq_lane_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c); })
-#define vmulq_lane_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x2_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmulq_lane_f32(a, b, __c) __extension__ ({ \
-  float32x4_t __a = (a); float32x2_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmulq_lane_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c); })
-#define vmulq_lane_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x2_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-
-__ai int16x4_t vmul_n_s16(int16x4_t __a, int16_t __b) {
-  return __a * (int16x4_t){ __b, __b, __b, __b }; }
-__ai int32x2_t vmul_n_s32(int32x2_t __a, int32_t __b) {
-  return __a * (int32x2_t){ __b, __b }; }
-__ai float32x2_t vmul_n_f32(float32x2_t __a, float32_t __b) {
-  return __a * (float32x2_t){ __b, __b }; }
-__ai uint16x4_t vmul_n_u16(uint16x4_t __a, uint16_t __b) {
-  return __a * (uint16x4_t){ __b, __b, __b, __b }; }
-__ai uint32x2_t vmul_n_u32(uint32x2_t __a, uint32_t __b) {
-  return __a * (uint32x2_t){ __b, __b }; }
-__ai int16x8_t vmulq_n_s16(int16x8_t __a, int16_t __b) {
-  return __a * (int16x8_t){ __b, __b, __b, __b, __b, __b, __b, __b }; }
-__ai int32x4_t vmulq_n_s32(int32x4_t __a, int32_t __b) {
-  return __a * (int32x4_t){ __b, __b, __b, __b }; }
-__ai float32x4_t vmulq_n_f32(float32x4_t __a, float32_t __b) {
-  return __a * (float32x4_t){ __b, __b, __b, __b }; }
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __a, uint16_t __b) {
-  return __a * (uint16x8_t){ __b, __b, __b, __b, __b, __b, __b, __b }; }
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __a, uint32_t __b) {
-  return __a * (uint32x4_t){ __b, __b, __b, __b }; }
-
-__ai int8x8_t vmvn_s8(int8x8_t __a) {
-  return  ~__a; }
-__ai int16x4_t vmvn_s16(int16x4_t __a) {
-  return  ~__a; }
-__ai int32x2_t vmvn_s32(int32x2_t __a) {
-  return  ~__a; }
-__ai uint8x8_t vmvn_u8(uint8x8_t __a) {
-  return  ~__a; }
-__ai uint16x4_t vmvn_u16(uint16x4_t __a) {
-  return  ~__a; }
-__ai uint32x2_t vmvn_u32(uint32x2_t __a) {
-  return  ~__a; }
-__ai poly8x8_t vmvn_p8(poly8x8_t __a) {
-  return  ~__a; }
-__ai int8x16_t vmvnq_s8(int8x16_t __a) {
-  return  ~__a; }
-__ai int16x8_t vmvnq_s16(int16x8_t __a) {
-  return  ~__a; }
-__ai int32x4_t vmvnq_s32(int32x4_t __a) {
-  return  ~__a; }
-__ai uint8x16_t vmvnq_u8(uint8x16_t __a) {
-  return  ~__a; }
-__ai uint16x8_t vmvnq_u16(uint16x8_t __a) {
-  return  ~__a; }
-__ai uint32x4_t vmvnq_u32(uint32x4_t __a) {
-  return  ~__a; }
-__ai poly8x16_t vmvnq_p8(poly8x16_t __a) {
-  return  ~__a; }
-
-__ai int8x8_t vneg_s8(int8x8_t __a) {
-  return  -__a; }
-__ai int16x4_t vneg_s16(int16x4_t __a) {
-  return  -__a; }
-__ai int32x2_t vneg_s32(int32x2_t __a) {
-  return  -__a; }
-__ai float32x2_t vneg_f32(float32x2_t __a) {
-  return  -__a; }
-__ai int8x16_t vnegq_s8(int8x16_t __a) {
-  return  -__a; }
-__ai int16x8_t vnegq_s16(int16x8_t __a) {
-  return  -__a; }
-__ai int32x4_t vnegq_s32(int32x4_t __a) {
-  return  -__a; }
-__ai float32x4_t vnegq_f32(float32x4_t __a) {
-  return  -__a; }
-
-__ai int8x8_t vorn_s8(int8x8_t __a, int8x8_t __b) {
-  return __a | ~__b; }
-__ai int16x4_t vorn_s16(int16x4_t __a, int16x4_t __b) {
-  return __a | ~__b; }
-__ai int32x2_t vorn_s32(int32x2_t __a, int32x2_t __b) {
-  return __a | ~__b; }
-__ai int64x1_t vorn_s64(int64x1_t __a, int64x1_t __b) {
-  return __a | ~__b; }
-__ai uint8x8_t vorn_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __a | ~__b; }
-__ai uint16x4_t vorn_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __a | ~__b; }
-__ai uint32x2_t vorn_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __a | ~__b; }
-__ai uint64x1_t vorn_u64(uint64x1_t __a, uint64x1_t __b) {
-  return __a | ~__b; }
-__ai int8x16_t vornq_s8(int8x16_t __a, int8x16_t __b) {
-  return __a | ~__b; }
-__ai int16x8_t vornq_s16(int16x8_t __a, int16x8_t __b) {
-  return __a | ~__b; }
-__ai int32x4_t vornq_s32(int32x4_t __a, int32x4_t __b) {
-  return __a | ~__b; }
-__ai int64x2_t vornq_s64(int64x2_t __a, int64x2_t __b) {
-  return __a | ~__b; }
-__ai uint8x16_t vornq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __a | ~__b; }
-__ai uint16x8_t vornq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __a | ~__b; }
-__ai uint32x4_t vornq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __a | ~__b; }
-__ai uint64x2_t vornq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __a | ~__b; }
-
-__ai int8x8_t vorr_s8(int8x8_t __a, int8x8_t __b) {
-  return __a | __b; }
-__ai int16x4_t vorr_s16(int16x4_t __a, int16x4_t __b) {
-  return __a | __b; }
-__ai int32x2_t vorr_s32(int32x2_t __a, int32x2_t __b) {
-  return __a | __b; }
-__ai int64x1_t vorr_s64(int64x1_t __a, int64x1_t __b) {
-  return __a | __b; }
-__ai uint8x8_t vorr_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __a | __b; }
-__ai uint16x4_t vorr_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __a | __b; }
-__ai uint32x2_t vorr_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __a | __b; }
-__ai uint64x1_t vorr_u64(uint64x1_t __a, uint64x1_t __b) {
-  return __a | __b; }
-__ai int8x16_t vorrq_s8(int8x16_t __a, int8x16_t __b) {
-  return __a | __b; }
-__ai int16x8_t vorrq_s16(int16x8_t __a, int16x8_t __b) {
-  return __a | __b; }
-__ai int32x4_t vorrq_s32(int32x4_t __a, int32x4_t __b) {
-  return __a | __b; }
-__ai int64x2_t vorrq_s64(int64x2_t __a, int64x2_t __b) {
-  return __a | __b; }
-__ai uint8x16_t vorrq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __a | __b; }
-__ai uint16x8_t vorrq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __a | __b; }
-__ai uint32x4_t vorrq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __a | __b; }
-__ai uint64x2_t vorrq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __a | __b; }
-
-__ai int16x4_t vpadal_s8(int16x4_t __a, int8x8_t __b) {
-  return (int16x4_t)__builtin_neon_vpadal_v((int8x8_t)__a, __b, 1); }
-__ai int32x2_t vpadal_s16(int32x2_t __a, int16x4_t __b) {
-  return (int32x2_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int64x1_t vpadal_s32(int64x1_t __a, int32x2_t __b) {
-  return (int64x1_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 3); }
-__ai uint16x4_t vpadal_u8(uint16x4_t __a, uint8x8_t __b) {
-  return (uint16x4_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vpadal_u16(uint32x2_t __a, uint16x4_t __b) {
-  return (uint32x2_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint64x1_t vpadal_u32(uint64x1_t __a, uint32x2_t __b) {
-  return (uint64x1_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai int16x8_t vpadalq_s8(int16x8_t __a, int8x16_t __b) {
-  return (int16x8_t)__builtin_neon_vpadalq_v((int8x16_t)__a, __b, 33); }
-__ai int32x4_t vpadalq_s16(int32x4_t __a, int16x8_t __b) {
-  return (int32x4_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vpadalq_s32(int64x2_t __a, int32x4_t __b) {
-  return (int64x2_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-__ai uint16x8_t vpadalq_u8(uint16x8_t __a, uint8x16_t __b) {
-  return (uint16x8_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vpadalq_u16(uint32x4_t __a, uint16x8_t __b) {
-  return (uint32x4_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vpadalq_u32(uint64x2_t __a, uint32x4_t __b) {
-  return (uint64x2_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai int8x8_t vpadd_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vpadd_v(__a, __b, 0); }
-__ai int16x4_t vpadd_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vpadd_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vpadd_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vpadd_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vpadd_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai float32x2_t vpadd_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-
-__ai int16x4_t vpaddl_s8(int8x8_t __a) {
-  return (int16x4_t)__builtin_neon_vpaddl_v(__a, 1); }
-__ai int32x2_t vpaddl_s16(int16x4_t __a) {
-  return (int32x2_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 2); }
-__ai int64x1_t vpaddl_s32(int32x2_t __a) {
-  return (int64x1_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 3); }
-__ai uint16x4_t vpaddl_u8(uint8x8_t __a) {
-  return (uint16x4_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 17); }
-__ai uint32x2_t vpaddl_u16(uint16x4_t __a) {
-  return (uint32x2_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vpaddl_u32(uint32x2_t __a) {
-  return (uint64x1_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 19); }
-__ai int16x8_t vpaddlq_s8(int8x16_t __a) {
-  return (int16x8_t)__builtin_neon_vpaddlq_v(__a, 33); }
-__ai int32x4_t vpaddlq_s16(int16x8_t __a) {
-  return (int32x4_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 34); }
-__ai int64x2_t vpaddlq_s32(int32x4_t __a) {
-  return (int64x2_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 35); }
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __a) {
-  return (uint16x8_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 49); }
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __a) {
-  return (uint32x4_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __a) {
-  return (uint64x2_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 51); }
-
-__ai int8x8_t vpmax_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vpmax_v(__a, __b, 0); }
-__ai int16x4_t vpmax_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vpmax_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vpmax_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vpmax_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vpmax_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai float32x2_t vpmax_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-
-__ai int8x8_t vpmin_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vpmin_v(__a, __b, 0); }
-__ai int16x4_t vpmin_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vpmin_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vpmin_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vpmin_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vpmin_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai float32x2_t vpmin_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-
-__ai int8x8_t vqabs_s8(int8x8_t __a) {
-  return (int8x8_t)__builtin_neon_vqabs_v(__a, 0); }
-__ai int16x4_t vqabs_s16(int16x4_t __a) {
-  return (int16x4_t)__builtin_neon_vqabs_v((int8x8_t)__a, 1); }
-__ai int32x2_t vqabs_s32(int32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vqabs_v((int8x8_t)__a, 2); }
-__ai int8x16_t vqabsq_s8(int8x16_t __a) {
-  return (int8x16_t)__builtin_neon_vqabsq_v(__a, 32); }
-__ai int16x8_t vqabsq_s16(int16x8_t __a) {
-  return (int16x8_t)__builtin_neon_vqabsq_v((int8x16_t)__a, 33); }
-__ai int32x4_t vqabsq_s32(int32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vqabsq_v((int8x16_t)__a, 34); }
-
-__ai int8x8_t vqadd_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vqadd_v(__a, __b, 0); }
-__ai int16x4_t vqadd_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vqadd_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int64x1_t vqadd_s64(int64x1_t __a, int64x1_t __b) {
-  return (int64x1_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 3); }
-__ai uint8x8_t vqadd_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vqadd_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vqadd_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint64x1_t vqadd_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai int8x16_t vqaddq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vqaddq_v(__a, __b, 32); }
-__ai int16x8_t vqaddq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vqaddq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vqaddq_s64(int64x2_t __a, int64x2_t __b) {
-  return (int64x2_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-__ai uint8x16_t vqaddq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vqaddq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vqaddq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vqaddq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai int32x4_t vqdmlal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return (int32x4_t)__builtin_neon_vqdmlal_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)__c, 34); }
-__ai int64x2_t vqdmlal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return (int64x2_t)__builtin_neon_vqdmlal_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)__c, 35); }
-
-#define vqdmlal_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
-  vqdmlal_s16(__a, __b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vqdmlal_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
-  vqdmlal_s32(__a, __b, __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c) {
-  return (int32x4_t)__builtin_neon_vqdmlal_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)(int16x4_t){ __c, __c, __c, __c }, 34); }
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c) {
-  return (int64x2_t)__builtin_neon_vqdmlal_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)(int32x2_t){ __c, __c }, 35); }
-
-__ai int32x4_t vqdmlsl_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
-  return (int32x4_t)__builtin_neon_vqdmlsl_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)__c, 34); }
-__ai int64x2_t vqdmlsl_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
-  return (int64x2_t)__builtin_neon_vqdmlsl_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)__c, 35); }
-
-#define vqdmlsl_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
-  vqdmlsl_s16(__a, __b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vqdmlsl_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
-  vqdmlsl_s32(__a, __b, __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c) {
-  return (int32x4_t)__builtin_neon_vqdmlsl_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)(int16x4_t){ __c, __c, __c, __c }, 34); }
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c) {
-  return (int64x2_t)__builtin_neon_vqdmlsl_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)(int32x2_t){ __c, __c }, 35); }
-
-__ai int16x4_t vqdmulh_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vqdmulh_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vqdmulh_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vqdmulh_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int16x8_t vqdmulhq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vqdmulhq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vqdmulhq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vqdmulhq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-
-#define vqdmulh_lane_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  vqdmulh_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vqdmulh_lane_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  vqdmulh_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vqdmulhq_lane_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x4_t __b = (b); \
-  vqdmulhq_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c)); })
-#define vqdmulhq_lane_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x2_t __b = (b); \
-  vqdmulhq_s32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __a, int16_t __b) {
-  return (int16x4_t)__builtin_neon_vqdmulh_v((int8x8_t)__a, (int8x8_t)(int16x4_t){ __b, __b, __b, __b }, 1); }
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __a, int32_t __b) {
-  return (int32x2_t)__builtin_neon_vqdmulh_v((int8x8_t)__a, (int8x8_t)(int32x2_t){ __b, __b }, 2); }
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __a, int16_t __b) {
-  return (int16x8_t)__builtin_neon_vqdmulhq_v((int8x16_t)__a, (int8x16_t)(int16x8_t){ __b, __b, __b, __b, __b, __b, __b, __b }, 33); }
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __a, int32_t __b) {
-  return (int32x4_t)__builtin_neon_vqdmulhq_v((int8x16_t)__a, (int8x16_t)(int32x4_t){ __b, __b, __b, __b }, 34); }
-
-__ai int32x4_t vqdmull_s16(int16x4_t __a, int16x4_t __b) {
-  return (int32x4_t)__builtin_neon_vqdmull_v((int8x8_t)__a, (int8x8_t)__b, 34); }
-__ai int64x2_t vqdmull_s32(int32x2_t __a, int32x2_t __b) {
-  return (int64x2_t)__builtin_neon_vqdmull_v((int8x8_t)__a, (int8x8_t)__b, 35); }
-
-#define vqdmull_lane_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  vqdmull_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vqdmull_lane_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  vqdmull_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-
-__ai int32x4_t vqdmull_n_s16(int16x4_t __a, int16_t __b) {
-  return (int32x4_t)__builtin_neon_vqdmull_v((int8x8_t)__a, (int8x8_t)(int16x4_t){ __b, __b, __b, __b }, 34); }
-__ai int64x2_t vqdmull_n_s32(int32x2_t __a, int32_t __b) {
-  return (int64x2_t)__builtin_neon_vqdmull_v((int8x8_t)__a, (int8x8_t)(int32x2_t){ __b, __b }, 35); }
-
-__ai int8x8_t vqmovn_s16(int16x8_t __a) {
-  return (int8x8_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 0); }
-__ai int16x4_t vqmovn_s32(int32x4_t __a) {
-  return (int16x4_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 1); }
-__ai int32x2_t vqmovn_s64(int64x2_t __a) {
-  return (int32x2_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 2); }
-__ai uint8x8_t vqmovn_u16(uint16x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 16); }
-__ai uint16x4_t vqmovn_u32(uint32x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 17); }
-__ai uint32x2_t vqmovn_u64(uint64x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 18); }
-
-__ai uint8x8_t vqmovun_s16(int16x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vqmovun_v((int8x16_t)__a, 16); }
-__ai uint16x4_t vqmovun_s32(int32x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vqmovun_v((int8x16_t)__a, 17); }
-__ai uint32x2_t vqmovun_s64(int64x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vqmovun_v((int8x16_t)__a, 18); }
-
-__ai int8x8_t vqneg_s8(int8x8_t __a) {
-  return (int8x8_t)__builtin_neon_vqneg_v(__a, 0); }
-__ai int16x4_t vqneg_s16(int16x4_t __a) {
-  return (int16x4_t)__builtin_neon_vqneg_v((int8x8_t)__a, 1); }
-__ai int32x2_t vqneg_s32(int32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vqneg_v((int8x8_t)__a, 2); }
-__ai int8x16_t vqnegq_s8(int8x16_t __a) {
-  return (int8x16_t)__builtin_neon_vqnegq_v(__a, 32); }
-__ai int16x8_t vqnegq_s16(int16x8_t __a) {
-  return (int16x8_t)__builtin_neon_vqnegq_v((int8x16_t)__a, 33); }
-__ai int32x4_t vqnegq_s32(int32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vqnegq_v((int8x16_t)__a, 34); }
-
-__ai int16x4_t vqrdmulh_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vqrdmulh_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vqrdmulh_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vqrdmulh_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vqrdmulhq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vqrdmulhq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-
-#define vqrdmulh_lane_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  vqrdmulh_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vqrdmulh_lane_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  vqrdmulh_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vqrdmulhq_lane_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x4_t __b = (b); \
-  vqrdmulhq_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c)); })
-#define vqrdmulhq_lane_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x2_t __b = (b); \
-  vqrdmulhq_s32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __a, int16_t __b) {
-  return (int16x4_t)__builtin_neon_vqrdmulh_v((int8x8_t)__a, (int8x8_t)(int16x4_t){ __b, __b, __b, __b }, 1); }
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __a, int32_t __b) {
-  return (int32x2_t)__builtin_neon_vqrdmulh_v((int8x8_t)__a, (int8x8_t)(int32x2_t){ __b, __b }, 2); }
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __a, int16_t __b) {
-  return (int16x8_t)__builtin_neon_vqrdmulhq_v((int8x16_t)__a, (int8x16_t)(int16x8_t){ __b, __b, __b, __b, __b, __b, __b, __b }, 33); }
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __a, int32_t __b) {
-  return (int32x4_t)__builtin_neon_vqrdmulhq_v((int8x16_t)__a, (int8x16_t)(int32x4_t){ __b, __b, __b, __b }, 34); }
-
-__ai int8x8_t vqrshl_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vqrshl_v(__a, __b, 0); }
-__ai int16x4_t vqrshl_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vqrshl_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int64x1_t vqrshl_s64(int64x1_t __a, int64x1_t __b) {
-  return (int64x1_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 3); }
-__ai uint8x8_t vqrshl_u8(uint8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vqrshl_v((int8x8_t)__a, __b, 16); }
-__ai uint16x4_t vqrshl_u16(uint16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vqrshl_u32(uint32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint64x1_t vqrshl_u64(uint64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai int8x16_t vqrshlq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vqrshlq_v(__a, __b, 32); }
-__ai int16x8_t vqrshlq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vqrshlq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vqrshlq_s64(int64x2_t __a, int64x2_t __b) {
-  return (int64x2_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, __b, 48); }
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-#define vqrshrn_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int8x8_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 0); })
-#define vqrshrn_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int16x4_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 1); })
-#define vqrshrn_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 2); })
-#define vqrshrn_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 16); })
-#define vqrshrn_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 17); })
-#define vqrshrn_n_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 18); })
-
-#define vqrshrun_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vqrshrun_n_v((int8x16_t)__a, __b, 16); })
-#define vqrshrun_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vqrshrun_n_v((int8x16_t)__a, __b, 17); })
-#define vqrshrun_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vqrshrun_n_v((int8x16_t)__a, __b, 18); })
-
-__ai int8x8_t vqshl_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vqshl_v(__a, __b, 0); }
-__ai int16x4_t vqshl_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vqshl_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int64x1_t vqshl_s64(int64x1_t __a, int64x1_t __b) {
-  return (int64x1_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 3); }
-__ai uint8x8_t vqshl_u8(uint8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vqshl_v((int8x8_t)__a, __b, 16); }
-__ai uint16x4_t vqshl_u16(uint16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vqshl_u32(uint32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint64x1_t vqshl_u64(uint64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai int8x16_t vqshlq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vqshlq_v(__a, __b, 32); }
-__ai int16x8_t vqshlq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vqshlq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vqshlq_s64(int64x2_t __a, int64x2_t __b) {
-  return (int64x2_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-__ai uint8x16_t vqshlq_u8(uint8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vqshlq_v((int8x16_t)__a, __b, 48); }
-__ai uint16x8_t vqshlq_u16(uint16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vqshlq_u32(uint32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vqshlq_u64(uint64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-#define vqshlu_n_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vqshlu_n_v(__a, __b, 16); })
-#define vqshlu_n_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vqshlu_n_v((int8x8_t)__a, __b, 17); })
-#define vqshlu_n_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vqshlu_n_v((int8x8_t)__a, __b, 18); })
-#define vqshlu_n_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  (uint64x1_t)__builtin_neon_vqshlu_n_v((int8x8_t)__a, __b, 19); })
-#define vqshluq_n_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  (uint8x16_t)__builtin_neon_vqshluq_n_v(__a, __b, 48); })
-#define vqshluq_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (uint16x8_t)__builtin_neon_vqshluq_n_v((int8x16_t)__a, __b, 49); })
-#define vqshluq_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (uint32x4_t)__builtin_neon_vqshluq_n_v((int8x16_t)__a, __b, 50); })
-#define vqshluq_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (uint64x2_t)__builtin_neon_vqshluq_n_v((int8x16_t)__a, __b, 51); })
-
-#define vqshl_n_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  (int8x8_t)__builtin_neon_vqshl_n_v(__a, __b, 0); })
-#define vqshl_n_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  (int16x4_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 1); })
-#define vqshl_n_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 2); })
-#define vqshl_n_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  (int64x1_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 3); })
-#define vqshl_n_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 16); })
-#define vqshl_n_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 17); })
-#define vqshl_n_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 18); })
-#define vqshl_n_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  (uint64x1_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 19); })
-#define vqshlq_n_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  (int8x16_t)__builtin_neon_vqshlq_n_v(__a, __b, 32); })
-#define vqshlq_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int16x8_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 33); })
-#define vqshlq_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int32x4_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 34); })
-#define vqshlq_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int64x2_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 35); })
-#define vqshlq_n_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  (uint8x16_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 48); })
-#define vqshlq_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint16x8_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 49); })
-#define vqshlq_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint32x4_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 50); })
-#define vqshlq_n_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint64x2_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 51); })
-
-#define vqshrn_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int8x8_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 0); })
-#define vqshrn_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int16x4_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 1); })
-#define vqshrn_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 2); })
-#define vqshrn_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 16); })
-#define vqshrn_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 17); })
-#define vqshrn_n_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 18); })
-
-#define vqshrun_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vqshrun_n_v((int8x16_t)__a, __b, 16); })
-#define vqshrun_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vqshrun_n_v((int8x16_t)__a, __b, 17); })
-#define vqshrun_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vqshrun_n_v((int8x16_t)__a, __b, 18); })
-
-__ai int8x8_t vqsub_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vqsub_v(__a, __b, 0); }
-__ai int16x4_t vqsub_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vqsub_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int64x1_t vqsub_s64(int64x1_t __a, int64x1_t __b) {
-  return (int64x1_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 3); }
-__ai uint8x8_t vqsub_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vqsub_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vqsub_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint64x1_t vqsub_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai int8x16_t vqsubq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vqsubq_v(__a, __b, 32); }
-__ai int16x8_t vqsubq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vqsubq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vqsubq_s64(int64x2_t __a, int64x2_t __b) {
-  return (int64x2_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-__ai uint8x16_t vqsubq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vqsubq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vqsubq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vqsubq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai int8x8_t vraddhn_s16(int16x8_t __a, int16x8_t __b) {
-  return (int8x8_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 0); }
-__ai int16x4_t vraddhn_s32(int32x4_t __a, int32x4_t __b) {
-  return (int16x4_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 1); }
-__ai int32x2_t vraddhn_s64(int64x2_t __a, int64x2_t __b) {
-  return (int32x2_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 2); }
-__ai uint8x8_t vraddhn_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 16); }
-__ai uint16x4_t vraddhn_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 17); }
-__ai uint32x2_t vraddhn_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 18); }
-
-__ai float32x2_t vrecpe_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrecpe_v((int8x8_t)__a, 8); }
-__ai uint32x2_t vrecpe_u32(uint32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vrecpe_v((int8x8_t)__a, 18); }
-__ai float32x4_t vrecpeq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrecpeq_v((int8x16_t)__a, 40); }
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vrecpeq_v((int8x16_t)__a, 50); }
-
-__ai float32x2_t vrecps_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vrecps_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai float32x4_t vrecpsq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vrecpsq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __a) {
-  return (int8x8_t)__a; }
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __a) {
-  return (int16x4_t)__a; }
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __a) {
-  return (int32x2_t)__a; }
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __a) {
-  return (int64x1_t)__a; }
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __a) {
-  return (uint64x1_t)__a; }
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __a) {
-  return (float16x4_t)__a; }
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __a) {
-  return (float32x2_t)__a; }
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __a) {
-  return (poly16x4_t)__a; }
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __a) {
-  return (int8x16_t)__a; }
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __a) {
-  return (int16x8_t)__a; }
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __a) {
-  return (int32x4_t)__a; }
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __a) {
-  return (int64x2_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __a) {
-  return (uint64x2_t)__a; }
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __a) {
-  return (float16x8_t)__a; }
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __a) {
-  return (float32x4_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __a) {
-  return (poly16x8_t)__a; }
-
-__ai int8x8_t vrev16_s8(int8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
-__ai uint8x8_t vrev16_u8(uint8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
-__ai poly8x8_t vrev16_p8(poly8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
-__ai int8x16_t vrev16q_s8(int8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); }
-__ai uint8x16_t vrev16q_u8(uint8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); }
-__ai poly8x16_t vrev16q_p8(poly8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); }
-
-__ai int8x8_t vrev32_s8(int8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
-__ai int16x4_t vrev32_s16(int16x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
-__ai uint8x8_t vrev32_u8(uint8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
-__ai uint16x4_t vrev32_u16(uint16x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
-__ai poly8x8_t vrev32_p8(poly8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
-__ai poly16x4_t vrev32_p16(poly16x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
-__ai int8x16_t vrev32q_s8(int8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); }
-__ai int16x8_t vrev32q_s16(int16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
-__ai uint8x16_t vrev32q_u8(uint8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); }
-__ai uint16x8_t vrev32q_u16(uint16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
-__ai poly8x16_t vrev32q_p8(poly8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); }
-__ai poly16x8_t vrev32q_p16(poly16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
-
-__ai int8x8_t vrev64_s8(int8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); }
-__ai int16x4_t vrev64_s16(int16x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); }
-__ai int32x2_t vrev64_s32(int32x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0); }
-__ai uint8x8_t vrev64_u8(uint8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); }
-__ai uint16x4_t vrev64_u16(uint16x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); }
-__ai uint32x2_t vrev64_u32(uint32x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0); }
-__ai poly8x8_t vrev64_p8(poly8x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); }
-__ai poly16x4_t vrev64_p16(poly16x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); }
-__ai float32x2_t vrev64_f32(float32x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0); }
-__ai int8x16_t vrev64q_s8(int8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); }
-__ai int16x8_t vrev64q_s16(int16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
-__ai int32x4_t vrev64q_s32(int32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
-__ai uint8x16_t vrev64q_u8(uint8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); }
-__ai uint16x8_t vrev64q_u16(uint16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
-__ai uint32x4_t vrev64q_u32(uint32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
-__ai poly8x16_t vrev64q_p8(poly8x16_t __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); }
-__ai poly16x8_t vrev64q_p16(poly16x8_t __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
-__ai float32x4_t vrev64q_f32(float32x4_t __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
-
-__ai int8x8_t vrhadd_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vrhadd_v(__a, __b, 0); }
-__ai int16x4_t vrhadd_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vrhadd_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai uint8x8_t vrhadd_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vrhadd_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vrhadd_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai int8x16_t vrhaddq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vrhaddq_v(__a, __b, 32); }
-__ai int16x8_t vrhaddq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vrhaddq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-__ai int8x8_t vrshl_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vrshl_v(__a, __b, 0); }
-__ai int16x4_t vrshl_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vrshl_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int64x1_t vrshl_s64(int64x1_t __a, int64x1_t __b) {
-  return (int64x1_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 3); }
-__ai uint8x8_t vrshl_u8(uint8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vrshl_v((int8x8_t)__a, __b, 16); }
-__ai uint16x4_t vrshl_u16(uint16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vrshl_u32(uint32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint64x1_t vrshl_u64(uint64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai int8x16_t vrshlq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vrshlq_v(__a, __b, 32); }
-__ai int16x8_t vrshlq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vrshlq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vrshlq_s64(int64x2_t __a, int64x2_t __b) {
-  return (int64x2_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-__ai uint8x16_t vrshlq_u8(uint8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vrshlq_v((int8x16_t)__a, __b, 48); }
-__ai uint16x8_t vrshlq_u16(uint16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vrshlq_u32(uint32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vrshlq_u64(uint64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-#define vrshrn_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int8x8_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 0); })
-#define vrshrn_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int16x4_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 1); })
-#define vrshrn_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 2); })
-#define vrshrn_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 16); })
-#define vrshrn_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 17); })
-#define vrshrn_n_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 18); })
-
-#define vrshr_n_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  (int8x8_t)__builtin_neon_vrshr_n_v(__a, __b, 0); })
-#define vrshr_n_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  (int16x4_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 1); })
-#define vrshr_n_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 2); })
-#define vrshr_n_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  (int64x1_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 3); })
-#define vrshr_n_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 16); })
-#define vrshr_n_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 17); })
-#define vrshr_n_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 18); })
-#define vrshr_n_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  (uint64x1_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 19); })
-#define vrshrq_n_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  (int8x16_t)__builtin_neon_vrshrq_n_v(__a, __b, 32); })
-#define vrshrq_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int16x8_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 33); })
-#define vrshrq_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int32x4_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 34); })
-#define vrshrq_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int64x2_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 35); })
-#define vrshrq_n_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  (uint8x16_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 48); })
-#define vrshrq_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint16x8_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 49); })
-#define vrshrq_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint32x4_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 50); })
-#define vrshrq_n_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint64x2_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 51); })
-
-__ai float32x2_t vrsqrte_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrsqrte_v((int8x8_t)__a, 8); }
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vrsqrte_v((int8x8_t)__a, 18); }
-__ai float32x4_t vrsqrteq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrsqrteq_v((int8x16_t)__a, 40); }
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vrsqrteq_v((int8x16_t)__a, 50); }
-
-__ai float32x2_t vrsqrts_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vrsqrts_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vrsqrtsq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-
-#define vrsra_n_s8(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int8x8_t __b = (b); \
-  (int8x8_t)__builtin_neon_vrsra_n_v(__a, __b, __c, 0); })
-#define vrsra_n_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  (int16x4_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
-#define vrsra_n_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  (int32x2_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
-#define vrsra_n_s64(a, b, __c) __extension__ ({ \
-  int64x1_t __a = (a); int64x1_t __b = (b); \
-  (int64x1_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
-#define vrsra_n_u8(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint8x8_t __b = (b); \
-  (uint8x8_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
-#define vrsra_n_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); \
-  (uint16x4_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
-#define vrsra_n_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); \
-  (uint32x2_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
-#define vrsra_n_u64(a, b, __c) __extension__ ({ \
-  uint64x1_t __a = (a); uint64x1_t __b = (b); \
-  (uint64x1_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
-#define vrsraq_n_s8(a, b, __c) __extension__ ({ \
-  int8x16_t __a = (a); int8x16_t __b = (b); \
-  (int8x16_t)__builtin_neon_vrsraq_n_v(__a, __b, __c, 32); })
-#define vrsraq_n_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  (int16x8_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
-#define vrsraq_n_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  (int32x4_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
-#define vrsraq_n_s64(a, b, __c) __extension__ ({ \
-  int64x2_t __a = (a); int64x2_t __b = (b); \
-  (int64x2_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
-#define vrsraq_n_u8(a, b, __c) __extension__ ({ \
-  uint8x16_t __a = (a); uint8x16_t __b = (b); \
-  (uint8x16_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
-#define vrsraq_n_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint16x8_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
-#define vrsraq_n_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint32x4_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
-#define vrsraq_n_u64(a, b, __c) __extension__ ({ \
-  uint64x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint64x2_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
-
-__ai int8x8_t vrsubhn_s16(int16x8_t __a, int16x8_t __b) {
-  return (int8x8_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 0); }
-__ai int16x4_t vrsubhn_s32(int32x4_t __a, int32x4_t __b) {
-  return (int16x4_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 1); }
-__ai int32x2_t vrsubhn_s64(int64x2_t __a, int64x2_t __b) {
-  return (int32x2_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 2); }
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 16); }
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 17); }
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 18); }
-
-#define vset_lane_u8(a, b, __c) __extension__ ({ \
-  uint8_t __a = (a); uint8x8_t __b = (b); \
-  (uint8x8_t)__builtin_neon_vset_lane_i8(__a, (int8x8_t)__b, __c); })
-#define vset_lane_u16(a, b, __c) __extension__ ({ \
-  uint16_t __a = (a); uint16x4_t __b = (b); \
-  (uint16x4_t)__builtin_neon_vset_lane_i16(__a, (int16x4_t)__b, __c); })
-#define vset_lane_u32(a, b, __c) __extension__ ({ \
-  uint32_t __a = (a); uint32x2_t __b = (b); \
-  (uint32x2_t)__builtin_neon_vset_lane_i32(__a, (int32x2_t)__b, __c); })
-#define vset_lane_s8(a, b, __c) __extension__ ({ \
-  int8_t __a = (a); int8x8_t __b = (b); \
-  (int8x8_t)__builtin_neon_vset_lane_i8(__a, __b, __c); })
-#define vset_lane_s16(a, b, __c) __extension__ ({ \
-  int16_t __a = (a); int16x4_t __b = (b); \
-  (int16x4_t)__builtin_neon_vset_lane_i16(__a, __b, __c); })
-#define vset_lane_s32(a, b, __c) __extension__ ({ \
-  int32_t __a = (a); int32x2_t __b = (b); \
-  (int32x2_t)__builtin_neon_vset_lane_i32(__a, __b, __c); })
-#define vset_lane_p8(a, b, __c) __extension__ ({ \
-  poly8_t __a = (a); poly8x8_t __b = (b); \
-  (poly8x8_t)__builtin_neon_vset_lane_i8(__a, (int8x8_t)__b, __c); })
-#define vset_lane_p16(a, b, __c) __extension__ ({ \
-  poly16_t __a = (a); poly16x4_t __b = (b); \
-  (poly16x4_t)__builtin_neon_vset_lane_i16(__a, (int16x4_t)__b, __c); })
-#define vset_lane_f32(a, b, __c) __extension__ ({ \
-  float32_t __a = (a); float32x2_t __b = (b); \
-  (float32x2_t)__builtin_neon_vset_lane_f32(__a, __b, __c); })
-#define vsetq_lane_u8(a, b, __c) __extension__ ({ \
-  uint8_t __a = (a); uint8x16_t __b = (b); \
-  (uint8x16_t)__builtin_neon_vsetq_lane_i8(__a, (int8x16_t)__b, __c); })
-#define vsetq_lane_u16(a, b, __c) __extension__ ({ \
-  uint16_t __a = (a); uint16x8_t __b = (b); \
-  (uint16x8_t)__builtin_neon_vsetq_lane_i16(__a, (int16x8_t)__b, __c); })
-#define vsetq_lane_u32(a, b, __c) __extension__ ({ \
-  uint32_t __a = (a); uint32x4_t __b = (b); \
-  (uint32x4_t)__builtin_neon_vsetq_lane_i32(__a, (int32x4_t)__b, __c); })
-#define vsetq_lane_s8(a, b, __c) __extension__ ({ \
-  int8_t __a = (a); int8x16_t __b = (b); \
-  (int8x16_t)__builtin_neon_vsetq_lane_i8(__a, __b, __c); })
-#define vsetq_lane_s16(a, b, __c) __extension__ ({ \
-  int16_t __a = (a); int16x8_t __b = (b); \
-  (int16x8_t)__builtin_neon_vsetq_lane_i16(__a, __b, __c); })
-#define vsetq_lane_s32(a, b, __c) __extension__ ({ \
-  int32_t __a = (a); int32x4_t __b = (b); \
-  (int32x4_t)__builtin_neon_vsetq_lane_i32(__a, __b, __c); })
-#define vsetq_lane_p8(a, b, __c) __extension__ ({ \
-  poly8_t __a = (a); poly8x16_t __b = (b); \
-  (poly8x16_t)__builtin_neon_vsetq_lane_i8(__a, (int8x16_t)__b, __c); })
-#define vsetq_lane_p16(a, b, __c) __extension__ ({ \
-  poly16_t __a = (a); poly16x8_t __b = (b); \
-  (poly16x8_t)__builtin_neon_vsetq_lane_i16(__a, (int16x8_t)__b, __c); })
-#define vsetq_lane_f32(a, b, __c) __extension__ ({ \
-  float32_t __a = (a); float32x4_t __b = (b); \
-  (float32x4_t)__builtin_neon_vsetq_lane_f32(__a, __b, __c); })
-#define vset_lane_s64(a, b, __c) __extension__ ({ \
-  int64_t __a = (a); int64x1_t __b = (b); \
-  (int64x1_t)__builtin_neon_vset_lane_i64(__a, __b, __c); })
-#define vset_lane_u64(a, b, __c) __extension__ ({ \
-  uint64_t __a = (a); uint64x1_t __b = (b); \
-  (uint64x1_t)__builtin_neon_vset_lane_i64(__a, (int64x1_t)__b, __c); })
-#define vsetq_lane_s64(a, b, __c) __extension__ ({ \
-  int64_t __a = (a); int64x2_t __b = (b); \
-  (int64x2_t)__builtin_neon_vsetq_lane_i64(__a, __b, __c); })
-#define vsetq_lane_u64(a, b, __c) __extension__ ({ \
-  uint64_t __a = (a); uint64x2_t __b = (b); \
-  (uint64x2_t)__builtin_neon_vsetq_lane_i64(__a, (int64x2_t)__b, __c); })
-
-__ai int8x8_t vshl_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vshl_v(__a, __b, 0); }
-__ai int16x4_t vshl_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vshl_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int64x1_t vshl_s64(int64x1_t __a, int64x1_t __b) {
-  return (int64x1_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 3); }
-__ai uint8x8_t vshl_u8(uint8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vshl_v((int8x8_t)__a, __b, 16); }
-__ai uint16x4_t vshl_u16(uint16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vshl_u32(uint32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint64x1_t vshl_u64(uint64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai int8x16_t vshlq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vshlq_v(__a, __b, 32); }
-__ai int16x8_t vshlq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vshlq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vshlq_s64(int64x2_t __a, int64x2_t __b) {
-  return (int64x2_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-__ai uint8x16_t vshlq_u8(uint8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vshlq_v((int8x16_t)__a, __b, 48); }
-__ai uint16x8_t vshlq_u16(uint16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vshlq_u32(uint32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vshlq_u64(uint64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-#define vshll_n_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  (int16x8_t)__builtin_neon_vshll_n_v(__a, __b, 33); })
-#define vshll_n_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  (int32x4_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 34); })
-#define vshll_n_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (int64x2_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 35); })
-#define vshll_n_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  (uint16x8_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 49); })
-#define vshll_n_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  (uint32x4_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 50); })
-#define vshll_n_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  (uint64x2_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 51); })
-
-#define vshl_n_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  (int8x8_t)__builtin_neon_vshl_n_v(__a, __b, 0); })
-#define vshl_n_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  (int16x4_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 1); })
-#define vshl_n_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 2); })
-#define vshl_n_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  (int64x1_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 3); })
-#define vshl_n_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 16); })
-#define vshl_n_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 17); })
-#define vshl_n_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 18); })
-#define vshl_n_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  (uint64x1_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 19); })
-#define vshlq_n_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  (int8x16_t)__builtin_neon_vshlq_n_v(__a, __b, 32); })
-#define vshlq_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int16x8_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 33); })
-#define vshlq_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int32x4_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 34); })
-#define vshlq_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int64x2_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 35); })
-#define vshlq_n_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  (uint8x16_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 48); })
-#define vshlq_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint16x8_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 49); })
-#define vshlq_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint32x4_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 50); })
-#define vshlq_n_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint64x2_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 51); })
-
-#define vshrn_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int8x8_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 0); })
-#define vshrn_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int16x4_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 1); })
-#define vshrn_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 2); })
-#define vshrn_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 16); })
-#define vshrn_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 17); })
-#define vshrn_n_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 18); })
-
-#define vshr_n_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  (int8x8_t)__builtin_neon_vshr_n_v(__a, __b, 0); })
-#define vshr_n_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  (int16x4_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 1); })
-#define vshr_n_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (int32x2_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 2); })
-#define vshr_n_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  (int64x1_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 3); })
-#define vshr_n_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  (uint8x8_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 16); })
-#define vshr_n_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  (uint16x4_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 17); })
-#define vshr_n_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  (uint32x2_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 18); })
-#define vshr_n_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  (uint64x1_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 19); })
-#define vshrq_n_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  (int8x16_t)__builtin_neon_vshrq_n_v(__a, __b, 32); })
-#define vshrq_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int16x8_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 33); })
-#define vshrq_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int32x4_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 34); })
-#define vshrq_n_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int64x2_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 35); })
-#define vshrq_n_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  (uint8x16_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 48); })
-#define vshrq_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint16x8_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 49); })
-#define vshrq_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint32x4_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 50); })
-#define vshrq_n_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint64x2_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 51); })
-
-#define vsli_n_s8(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int8x8_t __b = (b); \
-  (int8x8_t)__builtin_neon_vsli_n_v(__a, __b, __c, 0); })
-#define vsli_n_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  (int16x4_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
-#define vsli_n_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  (int32x2_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
-#define vsli_n_s64(a, b, __c) __extension__ ({ \
-  int64x1_t __a = (a); int64x1_t __b = (b); \
-  (int64x1_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
-#define vsli_n_u8(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint8x8_t __b = (b); \
-  (uint8x8_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
-#define vsli_n_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); \
-  (uint16x4_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
-#define vsli_n_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); \
-  (uint32x2_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
-#define vsli_n_u64(a, b, __c) __extension__ ({ \
-  uint64x1_t __a = (a); uint64x1_t __b = (b); \
-  (uint64x1_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
-#define vsli_n_p8(a, b, __c) __extension__ ({ \
-  poly8x8_t __a = (a); poly8x8_t __b = (b); \
-  (poly8x8_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 4); })
-#define vsli_n_p16(a, b, __c) __extension__ ({ \
-  poly16x4_t __a = (a); poly16x4_t __b = (b); \
-  (poly16x4_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 5); })
-#define vsliq_n_s8(a, b, __c) __extension__ ({ \
-  int8x16_t __a = (a); int8x16_t __b = (b); \
-  (int8x16_t)__builtin_neon_vsliq_n_v(__a, __b, __c, 32); })
-#define vsliq_n_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  (int16x8_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
-#define vsliq_n_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  (int32x4_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
-#define vsliq_n_s64(a, b, __c) __extension__ ({ \
-  int64x2_t __a = (a); int64x2_t __b = (b); \
-  (int64x2_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
-#define vsliq_n_u8(a, b, __c) __extension__ ({ \
-  uint8x16_t __a = (a); uint8x16_t __b = (b); \
-  (uint8x16_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
-#define vsliq_n_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint16x8_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
-#define vsliq_n_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint32x4_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
-#define vsliq_n_u64(a, b, __c) __extension__ ({ \
-  uint64x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint64x2_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
-#define vsliq_n_p8(a, b, __c) __extension__ ({ \
-  poly8x16_t __a = (a); poly8x16_t __b = (b); \
-  (poly8x16_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 36); })
-#define vsliq_n_p16(a, b, __c) __extension__ ({ \
-  poly16x8_t __a = (a); poly16x8_t __b = (b); \
-  (poly16x8_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 37); })
-
-#define vsra_n_s8(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int8x8_t __b = (b); \
-  (int8x8_t)__builtin_neon_vsra_n_v(__a, __b, __c, 0); })
-#define vsra_n_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  (int16x4_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
-#define vsra_n_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  (int32x2_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
-#define vsra_n_s64(a, b, __c) __extension__ ({ \
-  int64x1_t __a = (a); int64x1_t __b = (b); \
-  (int64x1_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
-#define vsra_n_u8(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint8x8_t __b = (b); \
-  (uint8x8_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
-#define vsra_n_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); \
-  (uint16x4_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
-#define vsra_n_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); \
-  (uint32x2_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
-#define vsra_n_u64(a, b, __c) __extension__ ({ \
-  uint64x1_t __a = (a); uint64x1_t __b = (b); \
-  (uint64x1_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
-#define vsraq_n_s8(a, b, __c) __extension__ ({ \
-  int8x16_t __a = (a); int8x16_t __b = (b); \
-  (int8x16_t)__builtin_neon_vsraq_n_v(__a, __b, __c, 32); })
-#define vsraq_n_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  (int16x8_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
-#define vsraq_n_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  (int32x4_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
-#define vsraq_n_s64(a, b, __c) __extension__ ({ \
-  int64x2_t __a = (a); int64x2_t __b = (b); \
-  (int64x2_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
-#define vsraq_n_u8(a, b, __c) __extension__ ({ \
-  uint8x16_t __a = (a); uint8x16_t __b = (b); \
-  (uint8x16_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
-#define vsraq_n_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint16x8_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
-#define vsraq_n_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint32x4_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
-#define vsraq_n_u64(a, b, __c) __extension__ ({ \
-  uint64x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint64x2_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
-
-#define vsri_n_s8(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int8x8_t __b = (b); \
-  (int8x8_t)__builtin_neon_vsri_n_v(__a, __b, __c, 0); })
-#define vsri_n_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); \
-  (int16x4_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
-#define vsri_n_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); \
-  (int32x2_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
-#define vsri_n_s64(a, b, __c) __extension__ ({ \
-  int64x1_t __a = (a); int64x1_t __b = (b); \
-  (int64x1_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
-#define vsri_n_u8(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint8x8_t __b = (b); \
-  (uint8x8_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
-#define vsri_n_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); \
-  (uint16x4_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
-#define vsri_n_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); \
-  (uint32x2_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
-#define vsri_n_u64(a, b, __c) __extension__ ({ \
-  uint64x1_t __a = (a); uint64x1_t __b = (b); \
-  (uint64x1_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
-#define vsri_n_p8(a, b, __c) __extension__ ({ \
-  poly8x8_t __a = (a); poly8x8_t __b = (b); \
-  (poly8x8_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 4); })
-#define vsri_n_p16(a, b, __c) __extension__ ({ \
-  poly16x4_t __a = (a); poly16x4_t __b = (b); \
-  (poly16x4_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 5); })
-#define vsriq_n_s8(a, b, __c) __extension__ ({ \
-  int8x16_t __a = (a); int8x16_t __b = (b); \
-  (int8x16_t)__builtin_neon_vsriq_n_v(__a, __b, __c, 32); })
-#define vsriq_n_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  (int16x8_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
-#define vsriq_n_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  (int32x4_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
-#define vsriq_n_s64(a, b, __c) __extension__ ({ \
-  int64x2_t __a = (a); int64x2_t __b = (b); \
-  (int64x2_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
-#define vsriq_n_u8(a, b, __c) __extension__ ({ \
-  uint8x16_t __a = (a); uint8x16_t __b = (b); \
-  (uint8x16_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
-#define vsriq_n_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint16x8_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
-#define vsriq_n_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint32x4_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
-#define vsriq_n_u64(a, b, __c) __extension__ ({ \
-  uint64x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint64x2_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
-#define vsriq_n_p8(a, b, __c) __extension__ ({ \
-  poly8x16_t __a = (a); poly8x16_t __b = (b); \
-  (poly8x16_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 36); })
-#define vsriq_n_p16(a, b, __c) __extension__ ({ \
-  poly16x8_t __a = (a); poly16x8_t __b = (b); \
-  (poly16x8_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 37); })
-
-#define vst1q_u8(__a, b) __extension__ ({ \
-  uint8x16_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 48); })
-#define vst1q_u16(__a, b) __extension__ ({ \
-  uint16x8_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 49); })
-#define vst1q_u32(__a, b) __extension__ ({ \
-  uint32x4_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 50); })
-#define vst1q_u64(__a, b) __extension__ ({ \
-  uint64x2_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 51); })
-#define vst1q_s8(__a, b) __extension__ ({ \
-  int8x16_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, __b, 32); })
-#define vst1q_s16(__a, b) __extension__ ({ \
-  int16x8_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 33); })
-#define vst1q_s32(__a, b) __extension__ ({ \
-  int32x4_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 34); })
-#define vst1q_s64(__a, b) __extension__ ({ \
-  int64x2_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 35); })
-#define vst1q_f16(__a, b) __extension__ ({ \
-  float16x8_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 39); })
-#define vst1q_f32(__a, b) __extension__ ({ \
-  float32x4_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 40); })
-#define vst1q_p8(__a, b) __extension__ ({ \
-  poly8x16_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 36); })
-#define vst1q_p16(__a, b) __extension__ ({ \
-  poly16x8_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 37); })
-#define vst1_u8(__a, b) __extension__ ({ \
-  uint8x8_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 16); })
-#define vst1_u16(__a, b) __extension__ ({ \
-  uint16x4_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 17); })
-#define vst1_u32(__a, b) __extension__ ({ \
-  uint32x2_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 18); })
-#define vst1_u64(__a, b) __extension__ ({ \
-  uint64x1_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 19); })
-#define vst1_s8(__a, b) __extension__ ({ \
-  int8x8_t __b = (b); \
-  __builtin_neon_vst1_v(__a, __b, 0); })
-#define vst1_s16(__a, b) __extension__ ({ \
-  int16x4_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 1); })
-#define vst1_s32(__a, b) __extension__ ({ \
-  int32x2_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 2); })
-#define vst1_s64(__a, b) __extension__ ({ \
-  int64x1_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 3); })
-#define vst1_f16(__a, b) __extension__ ({ \
-  float16x4_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 7); })
-#define vst1_f32(__a, b) __extension__ ({ \
-  float32x2_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 8); })
-#define vst1_p8(__a, b) __extension__ ({ \
-  poly8x8_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 4); })
-#define vst1_p16(__a, b) __extension__ ({ \
-  poly16x4_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 5); })
-
-#define vst1q_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x16_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 48); })
-#define vst1q_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x8_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 49); })
-#define vst1q_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x4_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 50); })
-#define vst1q_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x2_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 51); })
-#define vst1q_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x16_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, __b, __c, 32); })
-#define vst1q_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x8_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 33); })
-#define vst1q_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x4_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 34); })
-#define vst1q_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x2_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 35); })
-#define vst1q_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x8_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 39); })
-#define vst1q_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x4_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 40); })
-#define vst1q_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x16_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 36); })
-#define vst1q_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x8_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 37); })
-#define vst1_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x8_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 16); })
-#define vst1_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x4_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 17); })
-#define vst1_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x2_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 18); })
-#define vst1_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x1_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 19); })
-#define vst1_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x8_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, __b, __c, 0); })
-#define vst1_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x4_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 1); })
-#define vst1_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x2_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 2); })
-#define vst1_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x1_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 3); })
-#define vst1_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x4_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 7); })
-#define vst1_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x2_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 8); })
-#define vst1_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x8_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 4); })
-#define vst1_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x4_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 5); })
-
-#define vst2q_u8(__a, b) __extension__ ({ \
-  uint8x16x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 48); })
-#define vst2q_u16(__a, b) __extension__ ({ \
-  uint16x8x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 49); })
-#define vst2q_u32(__a, b) __extension__ ({ \
-  uint32x4x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 50); })
-#define vst2q_s8(__a, b) __extension__ ({ \
-  int8x16x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, __b.val[0], __b.val[1], 32); })
-#define vst2q_s16(__a, b) __extension__ ({ \
-  int16x8x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 33); })
-#define vst2q_s32(__a, b) __extension__ ({ \
-  int32x4x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 34); })
-#define vst2q_f16(__a, b) __extension__ ({ \
-  float16x8x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 39); })
-#define vst2q_f32(__a, b) __extension__ ({ \
-  float32x4x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 40); })
-#define vst2q_p8(__a, b) __extension__ ({ \
-  poly8x16x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 36); })
-#define vst2q_p16(__a, b) __extension__ ({ \
-  poly16x8x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 37); })
-#define vst2_u8(__a, b) __extension__ ({ \
-  uint8x8x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 16); })
-#define vst2_u16(__a, b) __extension__ ({ \
-  uint16x4x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 17); })
-#define vst2_u32(__a, b) __extension__ ({ \
-  uint32x2x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 18); })
-#define vst2_u64(__a, b) __extension__ ({ \
-  uint64x1x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 19); })
-#define vst2_s8(__a, b) __extension__ ({ \
-  int8x8x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, __b.val[0], __b.val[1], 0); })
-#define vst2_s16(__a, b) __extension__ ({ \
-  int16x4x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 1); })
-#define vst2_s32(__a, b) __extension__ ({ \
-  int32x2x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 2); })
-#define vst2_s64(__a, b) __extension__ ({ \
-  int64x1x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 3); })
-#define vst2_f16(__a, b) __extension__ ({ \
-  float16x4x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 7); })
-#define vst2_f32(__a, b) __extension__ ({ \
-  float32x2x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 8); })
-#define vst2_p8(__a, b) __extension__ ({ \
-  poly8x8x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 4); })
-#define vst2_p16(__a, b) __extension__ ({ \
-  poly16x4x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 5); })
-
-#define vst2q_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x8x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 49); })
-#define vst2q_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x4x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 50); })
-#define vst2q_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x8x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 33); })
-#define vst2q_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x4x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 34); })
-#define vst2q_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x8x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 39); })
-#define vst2q_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x4x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 40); })
-#define vst2q_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x8x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 37); })
-#define vst2_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x8x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 16); })
-#define vst2_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x4x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 17); })
-#define vst2_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x2x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 18); })
-#define vst2_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x8x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, __b.val[0], __b.val[1], __c, 0); })
-#define vst2_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x4x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 1); })
-#define vst2_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x2x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 2); })
-#define vst2_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x4x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 7); })
-#define vst2_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x2x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 8); })
-#define vst2_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x8x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 4); })
-#define vst2_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x4x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 5); })
-
-#define vst3q_u8(__a, b) __extension__ ({ \
-  uint8x16x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 48); })
-#define vst3q_u16(__a, b) __extension__ ({ \
-  uint16x8x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 49); })
-#define vst3q_u32(__a, b) __extension__ ({ \
-  uint32x4x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 50); })
-#define vst3q_s8(__a, b) __extension__ ({ \
-  int8x16x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, __b.val[0], __b.val[1], __b.val[2], 32); })
-#define vst3q_s16(__a, b) __extension__ ({ \
-  int16x8x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 33); })
-#define vst3q_s32(__a, b) __extension__ ({ \
-  int32x4x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 34); })
-#define vst3q_f16(__a, b) __extension__ ({ \
-  float16x8x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 39); })
-#define vst3q_f32(__a, b) __extension__ ({ \
-  float32x4x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 40); })
-#define vst3q_p8(__a, b) __extension__ ({ \
-  poly8x16x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 36); })
-#define vst3q_p16(__a, b) __extension__ ({ \
-  poly16x8x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 37); })
-#define vst3_u8(__a, b) __extension__ ({ \
-  uint8x8x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 16); })
-#define vst3_u16(__a, b) __extension__ ({ \
-  uint16x4x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 17); })
-#define vst3_u32(__a, b) __extension__ ({ \
-  uint32x2x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 18); })
-#define vst3_u64(__a, b) __extension__ ({ \
-  uint64x1x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 19); })
-#define vst3_s8(__a, b) __extension__ ({ \
-  int8x8x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, __b.val[0], __b.val[1], __b.val[2], 0); })
-#define vst3_s16(__a, b) __extension__ ({ \
-  int16x4x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 1); })
-#define vst3_s32(__a, b) __extension__ ({ \
-  int32x2x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 2); })
-#define vst3_s64(__a, b) __extension__ ({ \
-  int64x1x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 3); })
-#define vst3_f16(__a, b) __extension__ ({ \
-  float16x4x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 7); })
-#define vst3_f32(__a, b) __extension__ ({ \
-  float32x2x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 8); })
-#define vst3_p8(__a, b) __extension__ ({ \
-  poly8x8x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 4); })
-#define vst3_p16(__a, b) __extension__ ({ \
-  poly16x4x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 5); })
-
-#define vst3q_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x8x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 49); })
-#define vst3q_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x4x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 50); })
-#define vst3q_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x8x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 33); })
-#define vst3q_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x4x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 34); })
-#define vst3q_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x8x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 39); })
-#define vst3q_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x4x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 40); })
-#define vst3q_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x8x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 37); })
-#define vst3_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x8x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 16); })
-#define vst3_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x4x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 17); })
-#define vst3_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x2x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 18); })
-#define vst3_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x8x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 0); })
-#define vst3_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x4x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 1); })
-#define vst3_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x2x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 2); })
-#define vst3_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x4x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 7); })
-#define vst3_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x2x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 8); })
-#define vst3_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x8x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 4); })
-#define vst3_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x4x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 5); })
-
-#define vst4q_u8(__a, b) __extension__ ({ \
-  uint8x16x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 48); })
-#define vst4q_u16(__a, b) __extension__ ({ \
-  uint16x8x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 49); })
-#define vst4q_u32(__a, b) __extension__ ({ \
-  uint32x4x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 50); })
-#define vst4q_s8(__a, b) __extension__ ({ \
-  int8x16x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], 32); })
-#define vst4q_s16(__a, b) __extension__ ({ \
-  int16x8x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 33); })
-#define vst4q_s32(__a, b) __extension__ ({ \
-  int32x4x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 34); })
-#define vst4q_f16(__a, b) __extension__ ({ \
-  float16x8x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 39); })
-#define vst4q_f32(__a, b) __extension__ ({ \
-  float32x4x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 40); })
-#define vst4q_p8(__a, b) __extension__ ({ \
-  poly8x16x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 36); })
-#define vst4q_p16(__a, b) __extension__ ({ \
-  poly16x8x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 37); })
-#define vst4_u8(__a, b) __extension__ ({ \
-  uint8x8x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 16); })
-#define vst4_u16(__a, b) __extension__ ({ \
-  uint16x4x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 17); })
-#define vst4_u32(__a, b) __extension__ ({ \
-  uint32x2x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 18); })
-#define vst4_u64(__a, b) __extension__ ({ \
-  uint64x1x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 19); })
-#define vst4_s8(__a, b) __extension__ ({ \
-  int8x8x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], 0); })
-#define vst4_s16(__a, b) __extension__ ({ \
-  int16x4x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 1); })
-#define vst4_s32(__a, b) __extension__ ({ \
-  int32x2x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 2); })
-#define vst4_s64(__a, b) __extension__ ({ \
-  int64x1x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 3); })
-#define vst4_f16(__a, b) __extension__ ({ \
-  float16x4x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 7); })
-#define vst4_f32(__a, b) __extension__ ({ \
-  float32x2x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 8); })
-#define vst4_p8(__a, b) __extension__ ({ \
-  poly8x8x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 4); })
-#define vst4_p16(__a, b) __extension__ ({ \
-  poly16x4x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 5); })
-
-#define vst4q_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x8x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 49); })
-#define vst4q_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x4x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 50); })
-#define vst4q_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x8x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 33); })
-#define vst4q_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x4x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 34); })
-#define vst4q_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x8x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 39); })
-#define vst4q_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x4x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 40); })
-#define vst4q_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x8x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 37); })
-#define vst4_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x8x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 16); })
-#define vst4_lane_u16(__a, b, __c) __extension__ ({ \
-  uint16x4x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 17); })
-#define vst4_lane_u32(__a, b, __c) __extension__ ({ \
-  uint32x2x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 18); })
-#define vst4_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x8x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 0); })
-#define vst4_lane_s16(__a, b, __c) __extension__ ({ \
-  int16x4x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 1); })
-#define vst4_lane_s32(__a, b, __c) __extension__ ({ \
-  int32x2x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 2); })
-#define vst4_lane_f16(__a, b, __c) __extension__ ({ \
-  float16x4x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 7); })
-#define vst4_lane_f32(__a, b, __c) __extension__ ({ \
-  float32x2x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 8); })
-#define vst4_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x8x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 4); })
-#define vst4_lane_p16(__a, b, __c) __extension__ ({ \
-  poly16x4x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 5); })
-
-__ai int8x8_t vsub_s8(int8x8_t __a, int8x8_t __b) {
-  return __a - __b; }
-__ai int16x4_t vsub_s16(int16x4_t __a, int16x4_t __b) {
-  return __a - __b; }
-__ai int32x2_t vsub_s32(int32x2_t __a, int32x2_t __b) {
-  return __a - __b; }
-__ai int64x1_t vsub_s64(int64x1_t __a, int64x1_t __b) {
-  return __a - __b; }
-__ai float32x2_t vsub_f32(float32x2_t __a, float32x2_t __b) {
-  return __a - __b; }
-__ai uint8x8_t vsub_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __a - __b; }
-__ai uint16x4_t vsub_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __a - __b; }
-__ai uint32x2_t vsub_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __a - __b; }
-__ai uint64x1_t vsub_u64(uint64x1_t __a, uint64x1_t __b) {
-  return __a - __b; }
-__ai int8x16_t vsubq_s8(int8x16_t __a, int8x16_t __b) {
-  return __a - __b; }
-__ai int16x8_t vsubq_s16(int16x8_t __a, int16x8_t __b) {
-  return __a - __b; }
-__ai int32x4_t vsubq_s32(int32x4_t __a, int32x4_t __b) {
-  return __a - __b; }
-__ai int64x2_t vsubq_s64(int64x2_t __a, int64x2_t __b) {
-  return __a - __b; }
-__ai float32x4_t vsubq_f32(float32x4_t __a, float32x4_t __b) {
-  return __a - __b; }
-__ai uint8x16_t vsubq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __a - __b; }
-__ai uint16x8_t vsubq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __a - __b; }
-__ai uint32x4_t vsubq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __a - __b; }
-__ai uint64x2_t vsubq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __a - __b; }
-
-__ai int8x8_t vsubhn_s16(int16x8_t __a, int16x8_t __b) {
-  return (int8x8_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 0); }
-__ai int16x4_t vsubhn_s32(int32x4_t __a, int32x4_t __b) {
-  return (int16x4_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 1); }
-__ai int32x2_t vsubhn_s64(int64x2_t __a, int64x2_t __b) {
-  return (int32x2_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 2); }
-__ai uint8x8_t vsubhn_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 16); }
-__ai uint16x4_t vsubhn_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 17); }
-__ai uint32x2_t vsubhn_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 18); }
-
-__ai int16x8_t vsubl_s8(int8x8_t __a, int8x8_t __b) {
-  return vmovl_s8(__a) - vmovl_s8(__b); }
-__ai int32x4_t vsubl_s16(int16x4_t __a, int16x4_t __b) {
-  return vmovl_s16(__a) - vmovl_s16(__b); }
-__ai int64x2_t vsubl_s32(int32x2_t __a, int32x2_t __b) {
-  return vmovl_s32(__a) - vmovl_s32(__b); }
-__ai uint16x8_t vsubl_u8(uint8x8_t __a, uint8x8_t __b) {
-  return vmovl_u8(__a) - vmovl_u8(__b); }
-__ai uint32x4_t vsubl_u16(uint16x4_t __a, uint16x4_t __b) {
-  return vmovl_u16(__a) - vmovl_u16(__b); }
-__ai uint64x2_t vsubl_u32(uint32x2_t __a, uint32x2_t __b) {
-  return vmovl_u32(__a) - vmovl_u32(__b); }
-
-__ai int16x8_t vsubw_s8(int16x8_t __a, int8x8_t __b) {
-  return __a - vmovl_s8(__b); }
-__ai int32x4_t vsubw_s16(int32x4_t __a, int16x4_t __b) {
-  return __a - vmovl_s16(__b); }
-__ai int64x2_t vsubw_s32(int64x2_t __a, int32x2_t __b) {
-  return __a - vmovl_s32(__b); }
-__ai uint16x8_t vsubw_u8(uint16x8_t __a, uint8x8_t __b) {
-  return __a - vmovl_u8(__b); }
-__ai uint32x4_t vsubw_u16(uint32x4_t __a, uint16x4_t __b) {
-  return __a - vmovl_u16(__b); }
-__ai uint64x2_t vsubw_u32(uint64x2_t __a, uint32x2_t __b) {
-  return __a - vmovl_u32(__b); }
-
-__ai uint8x8_t vtbl1_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vtbl1_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai int8x8_t vtbl1_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vtbl1_v(__a, __b, 0); }
-__ai poly8x8_t vtbl1_p8(poly8x8_t __a, uint8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vtbl1_v((int8x8_t)__a, (int8x8_t)__b, 4); }
-
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vtbl2_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__b, 16); }
-__ai int8x8_t vtbl2_s8(int8x8x2_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vtbl2_v(__a.val[0], __a.val[1], __b, 0); }
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __a, uint8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vtbl2_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__b, 4); }
-
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vtbl3_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__a.val[2], (int8x8_t)__b, 16); }
-__ai int8x8_t vtbl3_s8(int8x8x3_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vtbl3_v(__a.val[0], __a.val[1], __a.val[2], __b, 0); }
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __a, uint8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vtbl3_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__a.val[2], (int8x8_t)__b, 4); }
-
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vtbl4_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__a.val[2], (int8x8_t)__a.val[3], (int8x8_t)__b, 16); }
-__ai int8x8_t vtbl4_s8(int8x8x4_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vtbl4_v(__a.val[0], __a.val[1], __a.val[2], __a.val[3], __b, 0); }
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __a, uint8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vtbl4_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__a.val[2], (int8x8_t)__a.val[3], (int8x8_t)__b, 4); }
-
-__ai uint8x8_t vtbx1_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vtbx1_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 16); }
-__ai int8x8_t vtbx1_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vtbx1_v(__a, __b, __c, 0); }
-__ai poly8x8_t vtbx1_p8(poly8x8_t __a, poly8x8_t __b, uint8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vtbx1_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 4); }
-
-__ai uint8x8_t vtbx2_u8(uint8x8_t __a, uint8x8x2_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vtbx2_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__c, 16); }
-__ai int8x8_t vtbx2_s8(int8x8_t __a, int8x8x2_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vtbx2_v(__a, __b.val[0], __b.val[1], __c, 0); }
-__ai poly8x8_t vtbx2_p8(poly8x8_t __a, poly8x8x2_t __b, uint8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vtbx2_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__c, 4); }
-
-__ai uint8x8_t vtbx3_u8(uint8x8_t __a, uint8x8x3_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vtbx3_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__c, 16); }
-__ai int8x8_t vtbx3_s8(int8x8_t __a, int8x8x3_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vtbx3_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 0); }
-__ai poly8x8_t vtbx3_p8(poly8x8_t __a, poly8x8x3_t __b, uint8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vtbx3_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__c, 4); }
-
-__ai uint8x8_t vtbx4_u8(uint8x8_t __a, uint8x8x4_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vtbx4_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], (int8x8_t)__c, 16); }
-__ai int8x8_t vtbx4_s8(int8x8_t __a, int8x8x4_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vtbx4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 0); }
-__ai poly8x8_t vtbx4_p8(poly8x8_t __a, poly8x8x4_t __b, uint8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vtbx4_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], (int8x8_t)__c, 4); }
-
-__ai int8x8x2_t vtrn_s8(int8x8_t __a, int8x8_t __b) {
-  int8x8x2_t r; __builtin_neon_vtrn_v(&r, __a, __b, 0); return r; }
-__ai int16x4x2_t vtrn_s16(int16x4_t __a, int16x4_t __b) {
-  int16x4x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 1); return r; }
-__ai int32x2x2_t vtrn_s32(int32x2_t __a, int32x2_t __b) {
-  int32x2x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 2); return r; }
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __a, uint8x8_t __b) {
-  uint8x8x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 16); return r; }
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __a, uint16x4_t __b) {
-  uint16x4x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 17); return r; }
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __a, uint32x2_t __b) {
-  uint32x2x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 18); return r; }
-__ai float32x2x2_t vtrn_f32(float32x2_t __a, float32x2_t __b) {
-  float32x2x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 8); return r; }
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __a, poly8x8_t __b) {
-  poly8x8x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 4); return r; }
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __a, poly16x4_t __b) {
-  poly16x4x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 5); return r; }
-__ai int8x16x2_t vtrnq_s8(int8x16_t __a, int8x16_t __b) {
-  int8x16x2_t r; __builtin_neon_vtrnq_v(&r, __a, __b, 32); return r; }
-__ai int16x8x2_t vtrnq_s16(int16x8_t __a, int16x8_t __b) {
-  int16x8x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 33); return r; }
-__ai int32x4x2_t vtrnq_s32(int32x4_t __a, int32x4_t __b) {
-  int32x4x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 34); return r; }
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __a, uint8x16_t __b) {
-  uint8x16x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 48); return r; }
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __a, uint16x8_t __b) {
-  uint16x8x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 49); return r; }
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __a, uint32x4_t __b) {
-  uint32x4x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 50); return r; }
-__ai float32x4x2_t vtrnq_f32(float32x4_t __a, float32x4_t __b) {
-  float32x4x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 40); return r; }
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __a, poly8x16_t __b) {
-  poly8x16x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 36); return r; }
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __a, poly16x8_t __b) {
-  poly16x8x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 37); return r; }
-
-__ai uint8x8_t vtst_s8(int8x8_t __a, int8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vtst_v(__a, __b, 16); }
-__ai uint16x4_t vtst_s16(int16x4_t __a, int16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vtst_s32(int32x2_t __a, int32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint8x8_t vtst_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vtst_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vtst_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint8x8_t vtst_p8(poly8x8_t __a, poly8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vtst_p16(poly16x4_t __a, poly16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint8x16_t vtstq_s8(int8x16_t __a, int8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vtstq_v(__a, __b, 48); }
-__ai uint16x8_t vtstq_s16(int16x8_t __a, int16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vtstq_s32(int32x4_t __a, int32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint8x16_t vtstq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vtstq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vtstq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint8x16_t vtstq_p8(poly8x16_t __a, poly8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vtstq_p16(poly16x8_t __a, poly16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-
-__ai int8x8x2_t vuzp_s8(int8x8_t __a, int8x8_t __b) {
-  int8x8x2_t r; __builtin_neon_vuzp_v(&r, __a, __b, 0); return r; }
-__ai int16x4x2_t vuzp_s16(int16x4_t __a, int16x4_t __b) {
-  int16x4x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 1); return r; }
-__ai int32x2x2_t vuzp_s32(int32x2_t __a, int32x2_t __b) {
-  int32x2x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 2); return r; }
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __a, uint8x8_t __b) {
-  uint8x8x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 16); return r; }
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __a, uint16x4_t __b) {
-  uint16x4x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 17); return r; }
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __a, uint32x2_t __b) {
-  uint32x2x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 18); return r; }
-__ai float32x2x2_t vuzp_f32(float32x2_t __a, float32x2_t __b) {
-  float32x2x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 8); return r; }
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __a, poly8x8_t __b) {
-  poly8x8x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 4); return r; }
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __a, poly16x4_t __b) {
-  poly16x4x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 5); return r; }
-__ai int8x16x2_t vuzpq_s8(int8x16_t __a, int8x16_t __b) {
-  int8x16x2_t r; __builtin_neon_vuzpq_v(&r, __a, __b, 32); return r; }
-__ai int16x8x2_t vuzpq_s16(int16x8_t __a, int16x8_t __b) {
-  int16x8x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 33); return r; }
-__ai int32x4x2_t vuzpq_s32(int32x4_t __a, int32x4_t __b) {
-  int32x4x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 34); return r; }
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __a, uint8x16_t __b) {
-  uint8x16x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 48); return r; }
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __a, uint16x8_t __b) {
-  uint16x8x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 49); return r; }
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __a, uint32x4_t __b) {
-  uint32x4x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 50); return r; }
-__ai float32x4x2_t vuzpq_f32(float32x4_t __a, float32x4_t __b) {
-  float32x4x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 40); return r; }
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __a, poly8x16_t __b) {
-  poly8x16x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 36); return r; }
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __a, poly16x8_t __b) {
-  poly16x8x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 37); return r; }
-
-__ai int8x8x2_t vzip_s8(int8x8_t __a, int8x8_t __b) {
-  int8x8x2_t r; __builtin_neon_vzip_v(&r, __a, __b, 0); return r; }
-__ai int16x4x2_t vzip_s16(int16x4_t __a, int16x4_t __b) {
-  int16x4x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 1); return r; }
-__ai int32x2x2_t vzip_s32(int32x2_t __a, int32x2_t __b) {
-  int32x2x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 2); return r; }
-__ai uint8x8x2_t vzip_u8(uint8x8_t __a, uint8x8_t __b) {
-  uint8x8x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 16); return r; }
-__ai uint16x4x2_t vzip_u16(uint16x4_t __a, uint16x4_t __b) {
-  uint16x4x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 17); return r; }
-__ai uint32x2x2_t vzip_u32(uint32x2_t __a, uint32x2_t __b) {
-  uint32x2x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 18); return r; }
-__ai float32x2x2_t vzip_f32(float32x2_t __a, float32x2_t __b) {
-  float32x2x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 8); return r; }
-__ai poly8x8x2_t vzip_p8(poly8x8_t __a, poly8x8_t __b) {
-  poly8x8x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 4); return r; }
-__ai poly16x4x2_t vzip_p16(poly16x4_t __a, poly16x4_t __b) {
-  poly16x4x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 5); return r; }
-__ai int8x16x2_t vzipq_s8(int8x16_t __a, int8x16_t __b) {
-  int8x16x2_t r; __builtin_neon_vzipq_v(&r, __a, __b, 32); return r; }
-__ai int16x8x2_t vzipq_s16(int16x8_t __a, int16x8_t __b) {
-  int16x8x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 33); return r; }
-__ai int32x4x2_t vzipq_s32(int32x4_t __a, int32x4_t __b) {
-  int32x4x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 34); return r; }
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __a, uint8x16_t __b) {
-  uint8x16x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 48); return r; }
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __a, uint16x8_t __b) {
-  uint16x8x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 49); return r; }
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __a, uint32x4_t __b) {
-  uint32x4x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 50); return r; }
-__ai float32x4x2_t vzipq_f32(float32x4_t __a, float32x4_t __b) {
-  float32x4x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 40); return r; }
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __a, poly8x16_t __b) {
-  poly8x16x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 36); return r; }
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __a, poly16x8_t __b) {
-  poly16x8x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 37); return r; }
-
-#ifdef __aarch64__
-__ai int16x8_t vmovl_high_s8(int8x16_t __a) {
-  int8x8_t __a1 = vget_high_s8(__a);
-  return (int16x8_t)vshll_n_s8(__a1, 0); }
-__ai int32x4_t vmovl_high_s16(int16x8_t __a) {
-  int16x4_t __a1 = vget_high_s16(__a);
-  return (int32x4_t)vshll_n_s16(__a1, 0); }
-__ai int64x2_t vmovl_high_s32(int32x4_t __a) {
-  int32x2_t __a1 = vget_high_s32(__a);
-  return (int64x2_t)vshll_n_s32(__a1, 0); }
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __a) {
-  uint8x8_t __a1 = vget_high_u8(__a);
-  return (uint16x8_t)vshll_n_u8(__a1, 0); }
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __a) {
-  uint16x4_t __a1 = vget_high_u16(__a);
-  return (uint32x4_t)vshll_n_u16(__a1, 0); }
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __a) {
-  uint32x2_t __a1 = vget_high_u32(__a);
-  return (uint64x2_t)vshll_n_u32(__a1, 0); }
-
-__ai int16x8_t vmull_high_s8(int8x16_t __a, int8x16_t __b) {
-  return vmull_s8(vget_high_s8(__a), vget_high_s8(__b)); }
-__ai int32x4_t vmull_high_s16(int16x8_t __a, int16x8_t __b) {
-  return vmull_s16(vget_high_s16(__a), vget_high_s16(__b)); }
-__ai int64x2_t vmull_high_s32(int32x4_t __a, int32x4_t __b) {
-  return vmull_s32(vget_high_s32(__a), vget_high_s32(__b)); }
-__ai uint16x8_t vmull_high_u8(uint8x16_t __a, uint8x16_t __b) {
-  return vmull_u8(vget_high_u8(__a), vget_high_u8(__b)); }
-__ai uint32x4_t vmull_high_u16(uint16x8_t __a, uint16x8_t __b) {
-  return vmull_u16(vget_high_u16(__a), vget_high_u16(__b)); }
-__ai uint64x2_t vmull_high_u32(uint32x4_t __a, uint32x4_t __b) {
-  return vmull_u32(vget_high_u32(__a), vget_high_u32(__b)); }
-__ai poly16x8_t vmull_high_p8(poly8x16_t __a, poly8x16_t __b) {
-  return vmull_p8(vget_high_p8(__a), vget_high_p8(__b)); }
-
-__ai int16x8_t vabdl_high_s8(int8x16_t __a, int8x16_t __b) {
-  return vabdl_s8(vget_high_s8(__a), vget_high_s8(__b)); }
-__ai int32x4_t vabdl_high_s16(int16x8_t __a, int16x8_t __b) {
-  return vabdl_s16(vget_high_s16(__a), vget_high_s16(__b)); }
-__ai int64x2_t vabdl_high_s32(int32x4_t __a, int32x4_t __b) {
-  return vabdl_s32(vget_high_s32(__a), vget_high_s32(__b)); }
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __a, uint8x16_t __b) {
-  return vabdl_u8(vget_high_u8(__a), vget_high_u8(__b)); }
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __a, uint16x8_t __b) {
-  return vabdl_u16(vget_high_u16(__a), vget_high_u16(__b)); }
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __a, uint32x4_t __b) {
-  return vabdl_u32(vget_high_u32(__a), vget_high_u32(__b)); }
-
-__ai float64x1_t vabd_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x1_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 9); }
-__ai float64x2_t vabdq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai int64x1_t vabs_s64(int64x1_t __a) {
-  return (int64x1_t)__builtin_neon_vabs_v((int8x8_t)__a, 3); }
-__ai float64x1_t vabs_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vabs_v((int8x8_t)__a, 9); }
-__ai int64x2_t vabsq_s64(int64x2_t __a) {
-  return (int64x2_t)__builtin_neon_vabsq_v((int8x16_t)__a, 35); }
-__ai float64x2_t vabsq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vabsq_v((int8x16_t)__a, 41); }
-
-__ai float64x1_t vadd_f64(float64x1_t __a, float64x1_t __b) {
-  return __a + __b; }
-__ai float64x2_t vaddq_f64(float64x2_t __a, float64x2_t __b) {
-  return __a + __b; }
-
-__ai int8x16_t vpaddq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vpaddq_v(__a, __b, 32); }
-__ai int16x8_t vpaddq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vpaddq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vpaddq_s64(int64x2_t __a, int64x2_t __b) {
-  return (int64x2_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-__ai uint8x16_t vpaddq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vpaddq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vpaddq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vpaddq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-__ai float32x4_t vpaddq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-__ai float64x2_t vpaddq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float64x1_t vbsl_f64(uint64x1_t __a, float64x1_t __b, float64x1_t __c) {
-  return (float64x1_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 9); }
-__ai float64x2_t vbslq_f64(uint64x2_t __a, float64x2_t __b, float64x2_t __c) {
-  return (float64x2_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 41); }
-__ai poly64x1_t vbsl_p64(uint64x1_t __a, poly64x1_t __b, poly64x1_t __c) {
-  return (poly64x1_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 6); }
-__ai poly64x2_t vbslq_p64(uint64x2_t __a, poly64x2_t __b, poly64x2_t __c) {
-  return (poly64x2_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 38); }
-
-__ai uint64x1_t vceq_s64(int64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)(__a == __b); }
-__ai uint64x1_t vceq_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)(__a == __b); }
-__ai uint64x1_t vceq_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)(__a == __b); }
-__ai uint64x2_t vceqq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)(__a == __b); }
-__ai uint64x2_t vceqq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)(__a == __b); }
-__ai uint64x2_t vceqq_s64(int64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)(__a == __b); }
-__ai uint64x1_t vceq_p64(poly64x1_t __a, poly64x1_t __b) {
-  return (uint64x1_t)(__a == __b); }
-__ai uint64x2_t vceqq_p64(poly64x2_t __a, poly64x2_t __b) {
-  return (uint64x2_t)(__a == __b); }
-
-__ai uint64x1_t vcge_s64(int64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)(__a >= __b); }
-__ai uint64x1_t vcge_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)(__a >= __b); }
-__ai uint64x2_t vcgeq_s64(int64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)(__a >= __b); }
-__ai uint64x2_t vcgeq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)(__a >= __b); }
-__ai uint64x1_t vcge_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)(__a >= __b); }
-__ai uint64x2_t vcgeq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)(__a >= __b); }
-
-__ai uint64x1_t vcgt_s64(int64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)(__a > __b); }
-__ai uint64x1_t vcgt_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)(__a > __b); }
-__ai uint64x2_t vcgtq_s64(int64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)(__a > __b); }
-__ai uint64x2_t vcgtq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)(__a > __b); }
-__ai uint64x1_t vcgt_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)(__a > __b); }
-__ai uint64x2_t vcgtq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)(__a > __b); }
-
-__ai uint64x1_t vcle_s64(int64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)(__a <= __b); }
-__ai uint64x1_t vcle_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)(__a <= __b); }
-__ai uint64x2_t vcleq_s64(int64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)(__a <= __b); }
-__ai uint64x2_t vcleq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)(__a <= __b); }
-__ai uint64x1_t vcle_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)(__a <= __b); }
-__ai uint64x2_t vcleq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)(__a <= __b); }
-
-__ai uint64x1_t vclt_s64(int64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)(__a < __b); }
-__ai uint64x1_t vclt_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)(__a < __b); }
-__ai uint64x2_t vcltq_s64(int64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)(__a < __b); }
-__ai uint64x2_t vcltq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)(__a < __b); }
-__ai uint64x1_t vclt_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)(__a < __b); }
-__ai uint64x2_t vcltq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)(__a < __b); }
-
-__ai uint8x8_t vceqz_s8(int8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vceqz_v(__a, 16); }
-__ai uint16x4_t vceqz_s16(int16x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vceqz_v((int8x8_t)__a, 17); }
-__ai uint32x2_t vceqz_s32(int32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vceqz_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vceqz_s64(int64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vceqz_v((int8x8_t)__a, 19); }
-__ai uint32x2_t vceqz_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vceqz_v((int8x8_t)__a, 18); }
-__ai uint8x8_t vceqz_u8(uint8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vceqz_v((int8x8_t)__a, 16); }
-__ai uint16x4_t vceqz_u16(uint16x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vceqz_v((int8x8_t)__a, 17); }
-__ai uint32x2_t vceqz_u32(uint32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vceqz_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vceqz_u64(uint64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vceqz_v((int8x8_t)__a, 19); }
-__ai uint8x8_t vceqz_p8(poly8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vceqz_v((int8x8_t)__a, 16); }
-__ai uint16x4_t vceqz_p16(poly16x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vceqz_v((int8x8_t)__a, 17); }
-__ai uint64x1_t vceqz_p64(poly64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vceqz_v((int8x8_t)__a, 19); }
-__ai uint8x16_t vceqzq_s8(int8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vceqzq_v(__a, 48); }
-__ai uint16x8_t vceqzq_s16(int16x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 49); }
-__ai uint32x4_t vceqzq_s32(int32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vceqzq_s64(int64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 51); }
-__ai uint32x4_t vceqzq_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 50); }
-__ai uint8x16_t vceqzq_u8(uint8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 48); }
-__ai uint16x8_t vceqzq_u16(uint16x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 49); }
-__ai uint32x4_t vceqzq_u32(uint32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vceqzq_u64(uint64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 51); }
-__ai uint8x16_t vceqzq_p8(poly8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 48); }
-__ai uint16x8_t vceqzq_p16(poly16x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 49); }
-__ai uint64x1_t vceqz_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vceqz_v((int8x8_t)__a, 19); }
-__ai uint64x2_t vceqzq_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 51); }
-__ai uint64x2_t vceqzq_p64(poly64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 51); }
-
-__ai uint8x8_t vcgez_s8(int8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vcgez_v(__a, 16); }
-__ai uint16x4_t vcgez_s16(int16x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vcgez_v((int8x8_t)__a, 17); }
-__ai uint32x2_t vcgez_s32(int32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcgez_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vcgez_s64(int64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcgez_v((int8x8_t)__a, 19); }
-__ai uint32x2_t vcgez_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcgez_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vcgez_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcgez_v((int8x8_t)__a, 19); }
-__ai uint8x16_t vcgezq_s8(int8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vcgezq_v(__a, 48); }
-__ai uint16x8_t vcgezq_s16(int16x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 49); }
-__ai uint32x4_t vcgezq_s32(int32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vcgezq_s64(int64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 51); }
-__ai uint32x4_t vcgezq_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vcgezq_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 51); }
-
-__ai uint8x8_t vcgtz_s8(int8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vcgtz_v(__a, 16); }
-__ai uint16x4_t vcgtz_s16(int16x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 17); }
-__ai uint32x2_t vcgtz_s32(int32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vcgtz_s64(int64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 19); }
-__ai uint32x2_t vcgtz_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vcgtz_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 19); }
-__ai uint8x16_t vcgtzq_s8(int8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vcgtzq_v(__a, 48); }
-__ai uint16x8_t vcgtzq_s16(int16x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 49); }
-__ai uint32x4_t vcgtzq_s32(int32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vcgtzq_s64(int64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 51); }
-__ai uint32x4_t vcgtzq_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vcgtzq_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 51); }
-
-__ai uint8x8_t vclez_s8(int8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vclez_v(__a, 16); }
-__ai uint16x4_t vclez_s16(int16x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vclez_v((int8x8_t)__a, 17); }
-__ai uint32x2_t vclez_s32(int32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vclez_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vclez_s64(int64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vclez_v((int8x8_t)__a, 19); }
-__ai uint32x2_t vclez_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vclez_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vclez_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vclez_v((int8x8_t)__a, 19); }
-__ai uint8x16_t vclezq_s8(int8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vclezq_v(__a, 48); }
-__ai uint16x8_t vclezq_s16(int16x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vclezq_v((int8x16_t)__a, 49); }
-__ai uint32x4_t vclezq_s32(int32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vclezq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vclezq_s64(int64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vclezq_v((int8x16_t)__a, 51); }
-__ai uint32x4_t vclezq_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vclezq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vclezq_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vclezq_v((int8x16_t)__a, 51); }
-
-__ai uint8x8_t vcltz_s8(int8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vcltz_v(__a, 16); }
-__ai uint16x4_t vcltz_s16(int16x4_t __a) {
-  return (uint16x4_t)__builtin_neon_vcltz_v((int8x8_t)__a, 17); }
-__ai uint32x2_t vcltz_s32(int32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcltz_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vcltz_s64(int64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcltz_v((int8x8_t)__a, 19); }
-__ai uint32x2_t vcltz_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcltz_v((int8x8_t)__a, 18); }
-__ai uint64x1_t vcltz_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcltz_v((int8x8_t)__a, 19); }
-__ai uint8x16_t vcltzq_s8(int8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vcltzq_v(__a, 48); }
-__ai uint16x8_t vcltzq_s16(int16x8_t __a) {
-  return (uint16x8_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 49); }
-__ai uint32x4_t vcltzq_s32(int32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vcltzq_s64(int64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 51); }
-__ai uint32x4_t vcltzq_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 50); }
-__ai uint64x2_t vcltzq_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 51); }
-
-__ai uint64x1_t vtst_s64(int64x1_t __a, int64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai uint64x1_t vtst_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai uint64x2_t vtstq_s64(int64x2_t __a, int64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-__ai uint64x2_t vtstq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-__ai uint64x1_t vtst_p64(poly64x1_t __a, poly64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai uint64x2_t vtstq_p64(poly64x2_t __a, poly64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai float64x2_t vcombine_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x2_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-__ai poly64x2_t vcombine_p64(poly64x1_t __a, poly64x1_t __b) {
-  return (poly64x2_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
-
-#define vcopyq_lane_s8(a1, __b1, c1, __d1) __extension__ ({ \
-  int8x16_t __a1 = (a1); int8x8_t __c1 = (c1); \
-  int8_t __c2 = vget_lane_s8(__c1, __d1); \
-  vsetq_lane_s8(__c2, __a1, __b1); })
-#define vcopyq_lane_s16(a1, __b1, c1, __d1) __extension__ ({ \
-  int16x8_t __a1 = (a1); int16x4_t __c1 = (c1); \
-  int16_t __c2 = vget_lane_s16(__c1, __d1); \
-  vsetq_lane_s16(__c2, __a1, __b1); })
-#define vcopyq_lane_s32(a1, __b1, c1, __d1) __extension__ ({ \
-  int32x4_t __a1 = (a1); int32x2_t __c1 = (c1); \
-  int32_t __c2 = vget_lane_s32(__c1, __d1); \
-  vsetq_lane_s32(__c2, __a1, __b1); })
-#define vcopyq_lane_s64(a1, __b1, c1, __d1) __extension__ ({ \
-  int64x2_t __a1 = (a1); int64x1_t __c1 = (c1); \
-  int64_t __c2 = vget_lane_s64(__c1, __d1); \
-  vsetq_lane_s64(__c2, __a1, __b1); })
-#define vcopyq_lane_u8(a1, __b1, c1, __d1) __extension__ ({ \
-  uint8x16_t __a1 = (a1); uint8x8_t __c1 = (c1); \
-  uint8_t __c2 = vget_lane_u8(__c1, __d1); \
-  vsetq_lane_u8(__c2, __a1, __b1); })
-#define vcopyq_lane_u16(a1, __b1, c1, __d1) __extension__ ({ \
-  uint16x8_t __a1 = (a1); uint16x4_t __c1 = (c1); \
-  uint16_t __c2 = vget_lane_u16(__c1, __d1); \
-  vsetq_lane_u16(__c2, __a1, __b1); })
-#define vcopyq_lane_u32(a1, __b1, c1, __d1) __extension__ ({ \
-  uint32x4_t __a1 = (a1); uint32x2_t __c1 = (c1); \
-  uint32_t __c2 = vget_lane_u32(__c1, __d1); \
-  vsetq_lane_u32(__c2, __a1, __b1); })
-#define vcopyq_lane_u64(a1, __b1, c1, __d1) __extension__ ({ \
-  uint64x2_t __a1 = (a1); uint64x1_t __c1 = (c1); \
-  uint64_t __c2 = vget_lane_u64(__c1, __d1); \
-  vsetq_lane_u64(__c2, __a1, __b1); })
-#define vcopyq_lane_p8(a1, __b1, c1, __d1) __extension__ ({ \
-  poly8x16_t __a1 = (a1); poly8x8_t __c1 = (c1); \
-  poly8_t __c2 = vget_lane_p8(__c1, __d1); \
-  vsetq_lane_p8(__c2, __a1, __b1); })
-#define vcopyq_lane_p16(a1, __b1, c1, __d1) __extension__ ({ \
-  poly16x8_t __a1 = (a1); poly16x4_t __c1 = (c1); \
-  poly16_t __c2 = vget_lane_p16(__c1, __d1); \
-  vsetq_lane_p16(__c2, __a1, __b1); })
-#define vcopyq_lane_f32(a1, __b1, c1, __d1) __extension__ ({ \
-  float32x4_t __a1 = (a1); float32x2_t __c1 = (c1); \
-  float32_t __c2 = vget_lane_f32(__c1, __d1); \
-  vsetq_lane_f32(__c2, __a1, __b1); })
-#define vcopyq_lane_f64(a1, __b1, c1, __d1) __extension__ ({ \
-  float64x2_t __a1 = (a1); float64x1_t __c1 = (c1); \
-  float64_t __c2 = vget_lane_f64(__c1, __d1); \
-  vsetq_lane_f64(__c2, __a1, __b1); })
-#define vcopyq_lane_p64(a1, __b1, c1, __d1) __extension__ ({ \
-  poly64x2_t __a1 = (a1); poly64x1_t __c1 = (c1); \
-  poly64_t __c2 = vget_lane_p64(__c1, __d1); \
-  vsetq_lane_p64(__c2, __a1, __b1); })
-
-#define vcopyq_laneq_s8(a1, __b1, c1, __d1) __extension__ ({ \
-  int8x16_t __a1 = (a1); int8x16_t __c1 = (c1); \
-  int8_t __c2 = vgetq_lane_s8(__c1, __d1); \
-  vsetq_lane_s8(__c2, __a1, __b1); })
-#define vcopyq_laneq_s16(a1, __b1, c1, __d1) __extension__ ({ \
-  int16x8_t __a1 = (a1); int16x8_t __c1 = (c1); \
-  int16_t __c2 = vgetq_lane_s16(__c1, __d1); \
-  vsetq_lane_s16(__c2, __a1, __b1); })
-#define vcopyq_laneq_s32(a1, __b1, c1, __d1) __extension__ ({ \
-  int32x4_t __a1 = (a1); int32x4_t __c1 = (c1); \
-  int32_t __c2 = vgetq_lane_s32(__c1, __d1); \
-  vsetq_lane_s32(__c2, __a1, __b1); })
-#define vcopyq_laneq_s64(a1, __b1, c1, __d1) __extension__ ({ \
-  int64x2_t __a1 = (a1); int64x2_t __c1 = (c1); \
-  int64_t __c2 = vgetq_lane_s64(__c1, __d1); \
-  vsetq_lane_s64(__c2, __a1, __b1); })
-#define vcopyq_laneq_u8(a1, __b1, c1, __d1) __extension__ ({ \
-  uint8x16_t __a1 = (a1); uint8x16_t __c1 = (c1); \
-  uint8_t __c2 = vgetq_lane_u8(__c1, __d1); \
-  vsetq_lane_u8(__c2, __a1, __b1); })
-#define vcopyq_laneq_u16(a1, __b1, c1, __d1) __extension__ ({ \
-  uint16x8_t __a1 = (a1); uint16x8_t __c1 = (c1); \
-  uint16_t __c2 = vgetq_lane_u16(__c1, __d1); \
-  vsetq_lane_u16(__c2, __a1, __b1); })
-#define vcopyq_laneq_u32(a1, __b1, c1, __d1) __extension__ ({ \
-  uint32x4_t __a1 = (a1); uint32x4_t __c1 = (c1); \
-  uint32_t __c2 = vgetq_lane_u32(__c1, __d1); \
-  vsetq_lane_u32(__c2, __a1, __b1); })
-#define vcopyq_laneq_u64(a1, __b1, c1, __d1) __extension__ ({ \
-  uint64x2_t __a1 = (a1); uint64x2_t __c1 = (c1); \
-  uint64_t __c2 = vgetq_lane_u64(__c1, __d1); \
-  vsetq_lane_u64(__c2, __a1, __b1); })
-#define vcopyq_laneq_p8(a1, __b1, c1, __d1) __extension__ ({ \
-  poly8x16_t __a1 = (a1); poly8x16_t __c1 = (c1); \
-  poly8_t __c2 = vgetq_lane_p8(__c1, __d1); \
-  vsetq_lane_p8(__c2, __a1, __b1); })
-#define vcopyq_laneq_p16(a1, __b1, c1, __d1) __extension__ ({ \
-  poly16x8_t __a1 = (a1); poly16x8_t __c1 = (c1); \
-  poly16_t __c2 = vgetq_lane_p16(__c1, __d1); \
-  vsetq_lane_p16(__c2, __a1, __b1); })
-#define vcopyq_laneq_f32(a1, __b1, c1, __d1) __extension__ ({ \
-  float32x4_t __a1 = (a1); float32x4_t __c1 = (c1); \
-  float32_t __c2 = vgetq_lane_f32(__c1, __d1); \
-  vsetq_lane_f32(__c2, __a1, __b1); })
-#define vcopyq_laneq_f64(a1, __b1, c1, __d1) __extension__ ({ \
-  float64x2_t __a1 = (a1); float64x2_t __c1 = (c1); \
-  float64_t __c2 = vgetq_lane_f64(__c1, __d1); \
-  vsetq_lane_f64(__c2, __a1, __b1); })
-#define vcopyq_laneq_p64(a1, __b1, c1, __d1) __extension__ ({ \
-  poly64x2_t __a1 = (a1); poly64x2_t __c1 = (c1); \
-  poly64_t __c2 = vgetq_lane_p64(__c1, __d1); \
-  vsetq_lane_p64(__c2, __a1, __b1); })
-
-#define vcopy_lane_s8(a1, __b1, c1, __d1) __extension__ ({ \
-  int8x8_t __a1 = (a1); int8x8_t __c1 = (c1); \
-  int8_t __c2 = vget_lane_s8(__c1, __d1); \
-  vset_lane_s8(__c2, __a1, __b1); })
-#define vcopy_lane_s16(a1, __b1, c1, __d1) __extension__ ({ \
-  int16x4_t __a1 = (a1); int16x4_t __c1 = (c1); \
-  int16_t __c2 = vget_lane_s16(__c1, __d1); \
-  vset_lane_s16(__c2, __a1, __b1); })
-#define vcopy_lane_s32(a1, __b1, c1, __d1) __extension__ ({ \
-  int32x2_t __a1 = (a1); int32x2_t __c1 = (c1); \
-  int32_t __c2 = vget_lane_s32(__c1, __d1); \
-  vset_lane_s32(__c2, __a1, __b1); })
-#define vcopy_lane_s64(a1, __b1, c1, __d1) __extension__ ({ \
-  int64x1_t __a1 = (a1); int64x1_t __c1 = (c1); \
-  int64_t __c2 = vget_lane_s64(__c1, __d1); \
-  vset_lane_s64(__c2, __a1, __b1); })
-#define vcopy_lane_p8(a1, __b1, c1, __d1) __extension__ ({ \
-  poly8x8_t __a1 = (a1); poly8x8_t __c1 = (c1); \
-  poly8_t __c2 = vget_lane_p8(__c1, __d1); \
-  vset_lane_p8(__c2, __a1, __b1); })
-#define vcopy_lane_p16(a1, __b1, c1, __d1) __extension__ ({ \
-  poly16x4_t __a1 = (a1); poly16x4_t __c1 = (c1); \
-  poly16_t __c2 = vget_lane_p16(__c1, __d1); \
-  vset_lane_p16(__c2, __a1, __b1); })
-#define vcopy_lane_u8(a1, __b1, c1, __d1) __extension__ ({ \
-  uint8x8_t __a1 = (a1); uint8x8_t __c1 = (c1); \
-  uint8_t __c2 = vget_lane_u8(__c1, __d1); \
-  vset_lane_u8(__c2, __a1, __b1); })
-#define vcopy_lane_u16(a1, __b1, c1, __d1) __extension__ ({ \
-  uint16x4_t __a1 = (a1); uint16x4_t __c1 = (c1); \
-  uint16_t __c2 = vget_lane_u16(__c1, __d1); \
-  vset_lane_u16(__c2, __a1, __b1); })
-#define vcopy_lane_u32(a1, __b1, c1, __d1) __extension__ ({ \
-  uint32x2_t __a1 = (a1); uint32x2_t __c1 = (c1); \
-  uint32_t __c2 = vget_lane_u32(__c1, __d1); \
-  vset_lane_u32(__c2, __a1, __b1); })
-#define vcopy_lane_u64(a1, __b1, c1, __d1) __extension__ ({ \
-  uint64x1_t __a1 = (a1); uint64x1_t __c1 = (c1); \
-  uint64_t __c2 = vget_lane_u64(__c1, __d1); \
-  vset_lane_u64(__c2, __a1, __b1); })
-#define vcopy_lane_p64(a1, __b1, c1, __d1) __extension__ ({ \
-  poly64x1_t __a1 = (a1); poly64x1_t __c1 = (c1); \
-  poly64_t __c2 = vget_lane_p64(__c1, __d1); \
-  vset_lane_p64(__c2, __a1, __b1); })
-#define vcopy_lane_f32(a1, __b1, c1, __d1) __extension__ ({ \
-  float32x2_t __a1 = (a1); float32x2_t __c1 = (c1); \
-  float32_t __c2 = vget_lane_f32(__c1, __d1); \
-  vset_lane_f32(__c2, __a1, __b1); })
-#define vcopy_lane_f64(a1, __b1, c1, __d1) __extension__ ({ \
-  float64x1_t __a1 = (a1); float64x1_t __c1 = (c1); \
-  float64_t __c2 = vget_lane_f64(__c1, __d1); \
-  vset_lane_f64(__c2, __a1, __b1); })
-
-#define vcopy_laneq_s8(a1, __b1, c1, __d1) __extension__ ({ \
-  int8x8_t __a1 = (a1); int8x16_t __c1 = (c1); \
-  int8_t __c2 = vgetq_lane_s8(__c1, __d1); \
-  vset_lane_s8(__c2, __a1, __b1); })
-#define vcopy_laneq_s16(a1, __b1, c1, __d1) __extension__ ({ \
-  int16x4_t __a1 = (a1); int16x8_t __c1 = (c1); \
-  int16_t __c2 = vgetq_lane_s16(__c1, __d1); \
-  vset_lane_s16(__c2, __a1, __b1); })
-#define vcopy_laneq_s32(a1, __b1, c1, __d1) __extension__ ({ \
-  int32x2_t __a1 = (a1); int32x4_t __c1 = (c1); \
-  int32_t __c2 = vgetq_lane_s32(__c1, __d1); \
-  vset_lane_s32(__c2, __a1, __b1); })
-#define vcopy_laneq_s64(a1, __b1, c1, __d1) __extension__ ({ \
-  int64x1_t __a1 = (a1); int64x2_t __c1 = (c1); \
-  int64_t __c2 = vgetq_lane_s64(__c1, __d1); \
-  vset_lane_s64(__c2, __a1, __b1); })
-#define vcopy_laneq_p8(a1, __b1, c1, __d1) __extension__ ({ \
-  poly8x8_t __a1 = (a1); poly8x16_t __c1 = (c1); \
-  poly8_t __c2 = vgetq_lane_p8(__c1, __d1); \
-  vset_lane_p8(__c2, __a1, __b1); })
-#define vcopy_laneq_p16(a1, __b1, c1, __d1) __extension__ ({ \
-  poly16x4_t __a1 = (a1); poly16x8_t __c1 = (c1); \
-  poly16_t __c2 = vgetq_lane_p16(__c1, __d1); \
-  vset_lane_p16(__c2, __a1, __b1); })
-#define vcopy_laneq_p64(a1, __b1, c1, __d1) __extension__ ({ \
-  poly64x1_t __a1 = (a1); poly64x2_t __c1 = (c1); \
-  poly64_t __c2 = vgetq_lane_p64(__c1, __d1); \
-  vset_lane_p64(__c2, __a1, __b1); })
-#define vcopy_laneq_u8(a1, __b1, c1, __d1) __extension__ ({ \
-  uint8x8_t __a1 = (a1); uint8x16_t __c1 = (c1); \
-  uint8_t __c2 = vgetq_lane_u8(__c1, __d1); \
-  vset_lane_u8(__c2, __a1, __b1); })
-#define vcopy_laneq_u16(a1, __b1, c1, __d1) __extension__ ({ \
-  uint16x4_t __a1 = (a1); uint16x8_t __c1 = (c1); \
-  uint16_t __c2 = vgetq_lane_u16(__c1, __d1); \
-  vset_lane_u16(__c2, __a1, __b1); })
-#define vcopy_laneq_u32(a1, __b1, c1, __d1) __extension__ ({ \
-  uint32x2_t __a1 = (a1); uint32x4_t __c1 = (c1); \
-  uint32_t __c2 = vgetq_lane_u32(__c1, __d1); \
-  vset_lane_u32(__c2, __a1, __b1); })
-#define vcopy_laneq_u64(a1, __b1, c1, __d1) __extension__ ({ \
-  uint64x1_t __a1 = (a1); uint64x2_t __c1 = (c1); \
-  uint64_t __c2 = vgetq_lane_u64(__c1, __d1); \
-  vset_lane_u64(__c2, __a1, __b1); })
-#define vcopy_laneq_f32(a1, __b1, c1, __d1) __extension__ ({ \
-  float32x2_t __a1 = (a1); float32x4_t __c1 = (c1); \
-  float32_t __c2 = vgetq_lane_f32(__c1, __d1); \
-  vset_lane_f32(__c2, __a1, __b1); })
-#define vcopy_laneq_f64(a1, __b1, c1, __d1) __extension__ ({ \
-  float64x1_t __a1 = (a1); float64x2_t __c1 = (c1); \
-  float64_t __c2 = vgetq_lane_f64(__c1, __d1); \
-  vset_lane_f64(__c2, __a1, __b1); })
-
-__ai float64x1_t vcreate_f64(uint64_t __a) {
-  return (float64x1_t)__a; }
-__ai poly64x1_t vcreate_p64(uint64_t __a) {
-  return (poly64x1_t)__a; }
-
-#define vcvt_n_f64_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  (float64x1_t)__builtin_neon_vcvt_n_f64_v((int8x8_t)__a, __b, 3); })
-#define vcvt_n_f64_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  (float64x1_t)__builtin_neon_vcvt_n_f64_v((int8x8_t)__a, __b, 19); })
-#define vcvtq_n_f64_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (float64x2_t)__builtin_neon_vcvtq_n_f64_v((int8x16_t)__a, __b, 35); })
-#define vcvtq_n_f64_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (float64x2_t)__builtin_neon_vcvtq_n_f64_v((int8x16_t)__a, __b, 51); })
-
-__ai float64x1_t vdup_n_f64(float64_t __a) {
-  return (float64x1_t){ __a }; }
-__ai float64x2_t vdupq_n_f64(float64_t __a) {
-  return (float64x2_t){ __a, __a }; }
-__ai poly64x1_t vdup_n_p64(poly64_t __a) {
-  return (poly64x1_t){ __a }; }
-__ai poly64x2_t vdupq_n_p64(poly64_t __a) {
-  return (poly64x2_t){ __a, __a }; }
-
-__ai uint64x1_t vcage_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vcage_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai uint64x2_t vcageq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vcageq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai uint64x1_t vcagt_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vcagt_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai uint64x2_t vcagtq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vcagtq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai uint64x1_t vcale_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vcale_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai uint64x2_t vcaleq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vcaleq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai uint64x1_t vcalt_f64(float64x1_t __a, float64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vcalt_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai uint64x2_t vcaltq_f64(float64x2_t __a, float64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vcaltq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai int32x2_t vcvta_s32_f32(float32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vcvta_s32_v((int8x8_t)__a, 2); }
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vcvtaq_s32_v((int8x16_t)__a, 34); }
-
-__ai int64x1_t vcvta_s64_f64(float64x1_t __a) {
-  return (int64x1_t)__builtin_neon_vcvta_s64_v((int8x8_t)__a, 3); }
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __a) {
-  return (int64x2_t)__builtin_neon_vcvtaq_s64_v((int8x16_t)__a, 35); }
-
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcvta_u32_v((int8x8_t)__a, 18); }
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcvtaq_u32_v((int8x16_t)__a, 50); }
-
-__ai uint64x1_t vcvta_u64_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcvta_u64_v((int8x8_t)__a, 19); }
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcvtaq_u64_v((int8x16_t)__a, 51); }
-
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vcvtm_s32_v((int8x8_t)__a, 2); }
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vcvtmq_s32_v((int8x16_t)__a, 34); }
-
-__ai int64x1_t vcvtm_s64_f64(float64x1_t __a) {
-  return (int64x1_t)__builtin_neon_vcvtm_s64_v((int8x8_t)__a, 3); }
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __a) {
-  return (int64x2_t)__builtin_neon_vcvtmq_s64_v((int8x16_t)__a, 35); }
-
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcvtm_u32_v((int8x8_t)__a, 18); }
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcvtmq_u32_v((int8x16_t)__a, 50); }
-
-__ai uint64x1_t vcvtm_u64_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcvtm_u64_v((int8x8_t)__a, 19); }
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcvtmq_u64_v((int8x16_t)__a, 51); }
-
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vcvtn_s32_v((int8x8_t)__a, 2); }
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vcvtnq_s32_v((int8x16_t)__a, 34); }
-
-__ai int64x1_t vcvtn_s64_f64(float64x1_t __a) {
-  return (int64x1_t)__builtin_neon_vcvtn_s64_v((int8x8_t)__a, 3); }
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __a) {
-  return (int64x2_t)__builtin_neon_vcvtnq_s64_v((int8x16_t)__a, 35); }
-
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcvtn_u32_v((int8x8_t)__a, 18); }
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcvtnq_u32_v((int8x16_t)__a, 50); }
-
-__ai uint64x1_t vcvtn_u64_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcvtn_u64_v((int8x8_t)__a, 19); }
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcvtnq_u64_v((int8x16_t)__a, 51); }
-
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __a) {
-  return (int32x2_t)__builtin_neon_vcvtp_s32_v((int8x8_t)__a, 2); }
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __a) {
-  return (int32x4_t)__builtin_neon_vcvtpq_s32_v((int8x16_t)__a, 34); }
-
-__ai int64x1_t vcvtp_s64_f64(float64x1_t __a) {
-  return (int64x1_t)__builtin_neon_vcvtp_s64_v((int8x8_t)__a, 3); }
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __a) {
-  return (int64x2_t)__builtin_neon_vcvtpq_s64_v((int8x16_t)__a, 35); }
-
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __a) {
-  return (uint32x2_t)__builtin_neon_vcvtp_u32_v((int8x8_t)__a, 18); }
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __a) {
-  return (uint32x4_t)__builtin_neon_vcvtpq_u32_v((int8x16_t)__a, 50); }
-
-__ai uint64x1_t vcvtp_u64_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcvtp_u64_v((int8x8_t)__a, 19); }
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcvtpq_u64_v((int8x16_t)__a, 51); }
-
-#define vcvt_n_s64_f64(a, __b) __extension__ ({ \
-  float64x1_t __a = (a); \
-  (int64x1_t)__builtin_neon_vcvt_n_s64_v((int8x8_t)__a, __b, 3); })
-#define vcvtq_n_s64_f64(a, __b) __extension__ ({ \
-  float64x2_t __a = (a); \
-  (int64x2_t)__builtin_neon_vcvtq_n_s64_v((int8x16_t)__a, __b, 35); })
-
-#define vcvt_n_u64_f64(a, __b) __extension__ ({ \
-  float64x1_t __a = (a); \
-  (uint64x1_t)__builtin_neon_vcvt_n_u64_v((int8x8_t)__a, __b, 19); })
-#define vcvtq_n_u64_f64(a, __b) __extension__ ({ \
-  float64x2_t __a = (a); \
-  (uint64x2_t)__builtin_neon_vcvtq_n_u64_v((int8x16_t)__a, __b, 51); })
-
-__ai float32x2_t vdiv_f32(float32x2_t __a, float32x2_t __b) {
-  return __a / __b; }
-__ai float64x1_t vdiv_f64(float64x1_t __a, float64x1_t __b) {
-  return __a / __b; }
-__ai float32x4_t vdivq_f32(float32x4_t __a, float32x4_t __b) {
-  return __a / __b; }
-__ai float64x2_t vdivq_f64(float64x2_t __a, float64x2_t __b) {
-  return __a / __b; }
-
-__ai float32x2_t vmaxnm_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vmaxnm_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai float64x1_t vmaxnm_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x1_t)__builtin_neon_vmaxnm_v((int8x8_t)__a, (int8x8_t)__b, 9); }
-__ai float32x4_t vmaxnmq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vmaxnmq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-__ai float64x2_t vmaxnmq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vmaxnmq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float32x2_t vpmaxnm_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vpmaxnm_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vpmaxnmq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vpmaxnmq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float32_t vmaxnmv_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vmaxnmv_f32(__a); }
-__ai float32_t vmaxnmvq_f32(float32x4_t __a) {
-  return (float32_t)__builtin_neon_vmaxnmvq_f32(__a); }
-__ai float64_t vmaxnmvq_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vmaxnmvq_f64(__a); }
-
-__ai float32x2_t vminnm_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vminnm_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai float64x1_t vminnm_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x1_t)__builtin_neon_vminnm_v((int8x8_t)__a, (int8x8_t)__b, 9); }
-__ai float32x4_t vminnmq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vminnmq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-__ai float64x2_t vminnmq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vminnmq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float32x2_t vpminnm_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vpminnm_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai float32x4_t vpminnmq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vpminnmq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-__ai float64x2_t vpminnmq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vpminnmq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float32_t vminnmv_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vminnmv_f32(__a); }
-__ai float32_t vminnmvq_f32(float32x4_t __a) {
-  return (float32_t)__builtin_neon_vminnmvq_f32(__a); }
-__ai float64_t vminnmvq_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vminnmvq_f64(__a); }
-
-__ai float64x1_t vfma_f64(float64x1_t __a, float64x1_t __b, float64x1_t __c) {
-  return (float64x1_t)__builtin_neon_vfma_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 9); }
-__ai float64x2_t vfmaq_f64(float64x2_t __a, float64x2_t __b, float64x2_t __c) {
-  return (float64x2_t)__builtin_neon_vfmaq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 41); }
-
-__ai float32x2_t vfma_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c) {
-  return vfma_f32(__a, __b, (float32x2_t){ __c, __c }); }
-__ai float32x4_t vfmaq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c) {
-  return vfmaq_f32(__a, __b, (float32x4_t){ __c, __c, __c, __c }); }
-
-__ai float32x2_t vfms_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c) {
-  return (float32x2_t)__builtin_neon_vfms_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 8); }
-__ai float64x1_t vfms_f64(float64x1_t __a, float64x1_t __b, float64x1_t __c) {
-  return (float64x1_t)__builtin_neon_vfms_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 9); }
-__ai float32x4_t vfmsq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c) {
-  return (float32x4_t)__builtin_neon_vfmsq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 40); }
-__ai float64x2_t vfmsq_f64(float64x2_t __a, float64x2_t __b, float64x2_t __c) {
-  return (float64x2_t)__builtin_neon_vfmsq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 41); }
-
-__ai float32x2_t vfms_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c) {
-  return vfms_f32(__a, __b, (float32x2_t){ __c, __c }); }
-__ai float32x4_t vfmsq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c) {
-  return vfmsq_f32(__a, __b, (float32x4_t){ __c, __c, __c, __c }); }
-
-__ai float64x1_t vrecpe_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrecpe_v((int8x8_t)__a, 9); }
-__ai float64x2_t vrecpeq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrecpeq_v((int8x16_t)__a, 41); }
-
-__ai float64x1_t vrecps_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x1_t)__builtin_neon_vrecps_v((int8x8_t)__a, (int8x8_t)__b, 9); }
-__ai float64x2_t vrecpsq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vrecpsq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float32x2_t vrnda_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrnda_v((int8x8_t)__a, 8); }
-__ai float64x1_t vrnda_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrnda_v((int8x8_t)__a, 9); }
-__ai float32x4_t vrndaq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrndaq_v((int8x16_t)__a, 40); }
-__ai float64x2_t vrndaq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrndaq_v((int8x16_t)__a, 41); }
-
-__ai float32x2_t vrndi_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrndi_v((int8x8_t)__a, 8); }
-__ai float64x1_t vrndi_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrndi_v((int8x8_t)__a, 9); }
-__ai float32x4_t vrndiq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrndiq_v((int8x16_t)__a, 40); }
-__ai float64x2_t vrndiq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrndiq_v((int8x16_t)__a, 41); }
-
-__ai float32x2_t vrndm_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrndm_v((int8x8_t)__a, 8); }
-__ai float64x1_t vrndm_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrndm_v((int8x8_t)__a, 9); }
-__ai float32x4_t vrndmq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrndmq_v((int8x16_t)__a, 40); }
-__ai float64x2_t vrndmq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrndmq_v((int8x16_t)__a, 41); }
-
-__ai float32x2_t vrndn_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrndn_v((int8x8_t)__a, 8); }
-__ai float64x1_t vrndn_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrndn_v((int8x8_t)__a, 9); }
-__ai float32x4_t vrndnq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrndnq_v((int8x16_t)__a, 40); }
-__ai float64x2_t vrndnq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrndnq_v((int8x16_t)__a, 41); }
-
-__ai float32x2_t vrndp_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrndp_v((int8x8_t)__a, 8); }
-__ai float64x1_t vrndp_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrndp_v((int8x8_t)__a, 9); }
-__ai float32x4_t vrndpq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrndpq_v((int8x16_t)__a, 40); }
-__ai float64x2_t vrndpq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrndpq_v((int8x16_t)__a, 41); }
-
-__ai float32x2_t vrndx_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrndx_v((int8x8_t)__a, 8); }
-__ai float64x1_t vrndx_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrndx_v((int8x8_t)__a, 9); }
-__ai float32x4_t vrndxq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrndxq_v((int8x16_t)__a, 40); }
-__ai float64x2_t vrndxq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrndxq_v((int8x16_t)__a, 41); }
-
-__ai float32x2_t vrnd_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vrnd_v((int8x8_t)__a, 8); }
-__ai float64x1_t vrnd_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrnd_v((int8x8_t)__a, 9); }
-__ai float32x4_t vrndq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vrndq_v((int8x16_t)__a, 40); }
-__ai float64x2_t vrndq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrndq_v((int8x16_t)__a, 41); }
-
-__ai float64x1_t vrsqrte_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vrsqrte_v((int8x8_t)__a, 9); }
-__ai float64x2_t vrsqrteq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vrsqrteq_v((int8x16_t)__a, 41); }
-
-__ai float64x1_t vrsqrts_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x1_t)__builtin_neon_vrsqrts_v((int8x8_t)__a, (int8x8_t)__b, 9); }
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vrsqrtsq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float32x2_t vsqrt_f32(float32x2_t __a) {
-  return (float32x2_t)__builtin_neon_vsqrt_v((int8x8_t)__a, 8); }
-__ai float64x1_t vsqrt_f64(float64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vsqrt_v((int8x8_t)__a, 9); }
-__ai float32x4_t vsqrtq_f32(float32x4_t __a) {
-  return (float32x4_t)__builtin_neon_vsqrtq_v((int8x16_t)__a, 40); }
-__ai float64x2_t vsqrtq_f64(float64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vsqrtq_v((int8x16_t)__a, 41); }
-
-#define vget_lane_f64(a, __b) __extension__ ({ \
-  float64x1_t __a = (a); \
-  (float64_t)__builtin_neon_vget_lane_f64(__a, __b); })
-#define vgetq_lane_f64(a, __b) __extension__ ({ \
-  float64x2_t __a = (a); \
-  (float64_t)__builtin_neon_vgetq_lane_f64(__a, __b); })
-#define vget_lane_p64(a, __b) __extension__ ({ \
-  poly64x1_t __a = (a); \
-  (poly64_t)__builtin_neon_vget_lane_i64((int64x1_t)__a, __b); })
-#define vgetq_lane_p64(a, __b) __extension__ ({ \
-  poly64x2_t __a = (a); \
-  (poly64_t)__builtin_neon_vgetq_lane_i64((int64x2_t)__a, __b); })
-
-#define vld1q_f64(__a) __extension__ ({ \
-  (float64x2_t)__builtin_neon_vld1q_v(__a, 41); })
-#define vld1_f64(__a) __extension__ ({ \
-  (float64x1_t)__builtin_neon_vld1_v(__a, 9); })
-#define vld1_p64(__a) __extension__ ({ \
-  (poly64x1_t)__builtin_neon_vld1_v(__a, 6); })
-#define vld1q_p64(__a) __extension__ ({ \
-  (poly64x2_t)__builtin_neon_vld1q_v(__a, 38); })
-
-#define vld1q_dup_f64(__a) __extension__ ({ \
-  (float64x2_t)__builtin_neon_vld1q_dup_v(__a, 41); })
-#define vld1q_dup_p64(__a) __extension__ ({ \
-  (poly64x2_t)__builtin_neon_vld1q_dup_v(__a, 38); })
-#define vld1_dup_f64(__a) __extension__ ({ \
-  (float64x1_t)__builtin_neon_vld1_dup_v(__a, 9); })
-#define vld1_dup_p64(__a) __extension__ ({ \
-  (poly64x1_t)__builtin_neon_vld1_dup_v(__a, 6); })
-
-#define vld1q_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x2_t __b = (b); \
-  (float64x2_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 41); })
-#define vld1q_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x2_t __b = (b); \
-  (poly64x2_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 38); })
-#define vld1_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x1_t __b = (b); \
-  (float64x1_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 9); })
-#define vld1_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x1_t __b = (b); \
-  (poly64x1_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 6); })
-
-#define vld1q_u8_x2(__a) __extension__ ({ \
-  uint8x16x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 48); r; })
-#define vld1q_u16_x2(__a) __extension__ ({ \
-  uint16x8x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 49); r; })
-#define vld1q_u32_x2(__a) __extension__ ({ \
-  uint32x4x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 50); r; })
-#define vld1q_u64_x2(__a) __extension__ ({ \
-  uint64x2x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 51); r; })
-#define vld1q_s8_x2(__a) __extension__ ({ \
-  int8x16x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 32); r; })
-#define vld1q_s16_x2(__a) __extension__ ({ \
-  int16x8x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 33); r; })
-#define vld1q_s32_x2(__a) __extension__ ({ \
-  int32x4x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 34); r; })
-#define vld1q_s64_x2(__a) __extension__ ({ \
-  int64x2x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 35); r; })
-#define vld1q_f16_x2(__a) __extension__ ({ \
-  float16x8x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 39); r; })
-#define vld1q_f32_x2(__a) __extension__ ({ \
-  float32x4x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 40); r; })
-#define vld1q_f64_x2(__a) __extension__ ({ \
-  float64x2x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 41); r; })
-#define vld1q_p8_x2(__a) __extension__ ({ \
-  poly8x16x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 36); r; })
-#define vld1q_p16_x2(__a) __extension__ ({ \
-  poly16x8x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 37); r; })
-#define vld1q_p64_x2(__a) __extension__ ({ \
-  poly64x2x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 38); r; })
-#define vld1_u8_x2(__a) __extension__ ({ \
-  uint8x8x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 16); r; })
-#define vld1_u16_x2(__a) __extension__ ({ \
-  uint16x4x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 17); r; })
-#define vld1_u32_x2(__a) __extension__ ({ \
-  uint32x2x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 18); r; })
-#define vld1_u64_x2(__a) __extension__ ({ \
-  uint64x1x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 19); r; })
-#define vld1_s8_x2(__a) __extension__ ({ \
-  int8x8x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 0); r; })
-#define vld1_s16_x2(__a) __extension__ ({ \
-  int16x4x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 1); r; })
-#define vld1_s32_x2(__a) __extension__ ({ \
-  int32x2x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 2); r; })
-#define vld1_s64_x2(__a) __extension__ ({ \
-  int64x1x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 3); r; })
-#define vld1_f16_x2(__a) __extension__ ({ \
-  float16x4x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 7); r; })
-#define vld1_f32_x2(__a) __extension__ ({ \
-  float32x2x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 8); r; })
-#define vld1_f64_x2(__a) __extension__ ({ \
-  float64x1x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 9); r; })
-#define vld1_p8_x2(__a) __extension__ ({ \
-  poly8x8x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 4); r; })
-#define vld1_p16_x2(__a) __extension__ ({ \
-  poly16x4x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 5); r; })
-#define vld1_p64_x2(__a) __extension__ ({ \
-  poly64x1x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 6); r; })
-
-#define vld2q_u64(__a) __extension__ ({ \
-  uint64x2x2_t r; __builtin_neon_vld2q_v(&r, __a, 51); r; })
-#define vld2q_s64(__a) __extension__ ({ \
-  int64x2x2_t r; __builtin_neon_vld2q_v(&r, __a, 35); r; })
-#define vld2q_f64(__a) __extension__ ({ \
-  float64x2x2_t r; __builtin_neon_vld2q_v(&r, __a, 41); r; })
-#define vld2_f64(__a) __extension__ ({ \
-  float64x1x2_t r; __builtin_neon_vld2_v(&r, __a, 9); r; })
-#define vld2_p64(__a) __extension__ ({ \
-  poly64x1x2_t r; __builtin_neon_vld2_v(&r, __a, 6); r; })
-#define vld2q_p64(__a) __extension__ ({ \
-  poly64x2x2_t r; __builtin_neon_vld2q_v(&r, __a, 38); r; })
-
-#define vld2q_dup_u8(__a) __extension__ ({ \
-  uint8x16x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 48); r; })
-#define vld2q_dup_u16(__a) __extension__ ({ \
-  uint16x8x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 49); r; })
-#define vld2q_dup_u32(__a) __extension__ ({ \
-  uint32x4x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 50); r; })
-#define vld2q_dup_u64(__a) __extension__ ({ \
-  uint64x2x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 51); r; })
-#define vld2q_dup_s8(__a) __extension__ ({ \
-  int8x16x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 32); r; })
-#define vld2q_dup_s16(__a) __extension__ ({ \
-  int16x8x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 33); r; })
-#define vld2q_dup_s32(__a) __extension__ ({ \
-  int32x4x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 34); r; })
-#define vld2q_dup_s64(__a) __extension__ ({ \
-  int64x2x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 35); r; })
-#define vld2q_dup_f16(__a) __extension__ ({ \
-  float16x8x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 39); r; })
-#define vld2q_dup_f32(__a) __extension__ ({ \
-  float32x4x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 40); r; })
-#define vld2q_dup_f64(__a) __extension__ ({ \
-  float64x2x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 41); r; })
-#define vld2q_dup_p8(__a) __extension__ ({ \
-  poly8x16x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 36); r; })
-#define vld2q_dup_p16(__a) __extension__ ({ \
-  poly16x8x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 37); r; })
-#define vld2q_dup_p64(__a) __extension__ ({ \
-  poly64x2x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 38); r; })
-#define vld2_dup_f64(__a) __extension__ ({ \
-  float64x1x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 9); r; })
-#define vld2_dup_p64(__a) __extension__ ({ \
-  poly64x1x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 6); r; })
-
-#define vld2q_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x16x2_t __b = (b); \
-  uint8x16x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 48); r; })
-#define vld2q_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x2x2_t __b = (b); \
-  uint64x2x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 51); r; })
-#define vld2q_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x16x2_t __b = (b); \
-  int8x16x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, __b.val[0], __b.val[1], __c, 32); r; })
-#define vld2q_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x2x2_t __b = (b); \
-  int64x2x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 35); r; })
-#define vld2q_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x2x2_t __b = (b); \
-  float64x2x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 41); r; })
-#define vld2q_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x16x2_t __b = (b); \
-  poly8x16x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 36); r; })
-#define vld2q_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x2x2_t __b = (b); \
-  poly64x2x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 38); r; })
-#define vld2_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x1x2_t __b = (b); \
-  uint64x1x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 19); r; })
-#define vld2_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x1x2_t __b = (b); \
-  int64x1x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 3); r; })
-#define vld2_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x1x2_t __b = (b); \
-  float64x1x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 9); r; })
-#define vld2_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x1x2_t __b = (b); \
-  poly64x1x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 6); r; })
-
-#define vld3q_u64(__a) __extension__ ({ \
-  uint64x2x3_t r; __builtin_neon_vld3q_v(&r, __a, 51); r; })
-#define vld3q_s64(__a) __extension__ ({ \
-  int64x2x3_t r; __builtin_neon_vld3q_v(&r, __a, 35); r; })
-#define vld3q_f64(__a) __extension__ ({ \
-  float64x2x3_t r; __builtin_neon_vld3q_v(&r, __a, 41); r; })
-#define vld3_f64(__a) __extension__ ({ \
-  float64x1x3_t r; __builtin_neon_vld3_v(&r, __a, 9); r; })
-#define vld3_p64(__a) __extension__ ({ \
-  poly64x1x3_t r; __builtin_neon_vld3_v(&r, __a, 6); r; })
-#define vld3q_p64(__a) __extension__ ({ \
-  poly64x2x3_t r; __builtin_neon_vld3q_v(&r, __a, 38); r; })
-
-#define vld3q_dup_u8(__a) __extension__ ({ \
-  uint8x16x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 48); r; })
-#define vld3q_dup_u16(__a) __extension__ ({ \
-  uint16x8x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 49); r; })
-#define vld3q_dup_u32(__a) __extension__ ({ \
-  uint32x4x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 50); r; })
-#define vld3q_dup_u64(__a) __extension__ ({ \
-  uint64x2x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 51); r; })
-#define vld3q_dup_s8(__a) __extension__ ({ \
-  int8x16x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 32); r; })
-#define vld3q_dup_s16(__a) __extension__ ({ \
-  int16x8x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 33); r; })
-#define vld3q_dup_s32(__a) __extension__ ({ \
-  int32x4x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 34); r; })
-#define vld3q_dup_s64(__a) __extension__ ({ \
-  int64x2x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 35); r; })
-#define vld3q_dup_f16(__a) __extension__ ({ \
-  float16x8x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 39); r; })
-#define vld3q_dup_f32(__a) __extension__ ({ \
-  float32x4x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 40); r; })
-#define vld3q_dup_f64(__a) __extension__ ({ \
-  float64x2x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 41); r; })
-#define vld3q_dup_p8(__a) __extension__ ({ \
-  poly8x16x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 36); r; })
-#define vld3q_dup_p16(__a) __extension__ ({ \
-  poly16x8x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 37); r; })
-#define vld3q_dup_p64(__a) __extension__ ({ \
-  poly64x2x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 38); r; })
-#define vld3_dup_f64(__a) __extension__ ({ \
-  float64x1x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 9); r; })
-#define vld3_dup_p64(__a) __extension__ ({ \
-  poly64x1x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 6); r; })
-
-#define vld3q_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x16x3_t __b = (b); \
-  uint8x16x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 48); r; })
-#define vld3q_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x2x3_t __b = (b); \
-  uint64x2x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 51); r; })
-#define vld3q_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x16x3_t __b = (b); \
-  int8x16x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, __b.val[0], __b.val[1], __b.val[2], __c, 32); r; })
-#define vld3q_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x2x3_t __b = (b); \
-  int64x2x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 35); r; })
-#define vld3q_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x2x3_t __b = (b); \
-  float64x2x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 41); r; })
-#define vld3q_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x16x3_t __b = (b); \
-  poly8x16x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 36); r; })
-#define vld3q_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x2x3_t __b = (b); \
-  poly64x2x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 38); r; })
-#define vld3_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x1x3_t __b = (b); \
-  uint64x1x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 19); r; })
-#define vld3_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x1x3_t __b = (b); \
-  int64x1x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 3); r; })
-#define vld3_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x1x3_t __b = (b); \
-  float64x1x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 9); r; })
-#define vld3_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x1x3_t __b = (b); \
-  poly64x1x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 6); r; })
-
-#define vld1q_u8_x3(__a) __extension__ ({ \
-  uint8x16x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 48); r; })
-#define vld1q_u16_x3(__a) __extension__ ({ \
-  uint16x8x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 49); r; })
-#define vld1q_u32_x3(__a) __extension__ ({ \
-  uint32x4x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 50); r; })
-#define vld1q_u64_x3(__a) __extension__ ({ \
-  uint64x2x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 51); r; })
-#define vld1q_s8_x3(__a) __extension__ ({ \
-  int8x16x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 32); r; })
-#define vld1q_s16_x3(__a) __extension__ ({ \
-  int16x8x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 33); r; })
-#define vld1q_s32_x3(__a) __extension__ ({ \
-  int32x4x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 34); r; })
-#define vld1q_s64_x3(__a) __extension__ ({ \
-  int64x2x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 35); r; })
-#define vld1q_f16_x3(__a) __extension__ ({ \
-  float16x8x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 39); r; })
-#define vld1q_f32_x3(__a) __extension__ ({ \
-  float32x4x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 40); r; })
-#define vld1q_f64_x3(__a) __extension__ ({ \
-  float64x2x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 41); r; })
-#define vld1q_p8_x3(__a) __extension__ ({ \
-  poly8x16x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 36); r; })
-#define vld1q_p16_x3(__a) __extension__ ({ \
-  poly16x8x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 37); r; })
-#define vld1q_p64_x3(__a) __extension__ ({ \
-  poly64x2x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 38); r; })
-#define vld1_u8_x3(__a) __extension__ ({ \
-  uint8x8x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 16); r; })
-#define vld1_u16_x3(__a) __extension__ ({ \
-  uint16x4x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 17); r; })
-#define vld1_u32_x3(__a) __extension__ ({ \
-  uint32x2x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 18); r; })
-#define vld1_u64_x3(__a) __extension__ ({ \
-  uint64x1x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 19); r; })
-#define vld1_s8_x3(__a) __extension__ ({ \
-  int8x8x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 0); r; })
-#define vld1_s16_x3(__a) __extension__ ({ \
-  int16x4x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 1); r; })
-#define vld1_s32_x3(__a) __extension__ ({ \
-  int32x2x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 2); r; })
-#define vld1_s64_x3(__a) __extension__ ({ \
-  int64x1x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 3); r; })
-#define vld1_f16_x3(__a) __extension__ ({ \
-  float16x4x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 7); r; })
-#define vld1_f32_x3(__a) __extension__ ({ \
-  float32x2x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 8); r; })
-#define vld1_f64_x3(__a) __extension__ ({ \
-  float64x1x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 9); r; })
-#define vld1_p8_x3(__a) __extension__ ({ \
-  poly8x8x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 4); r; })
-#define vld1_p16_x3(__a) __extension__ ({ \
-  poly16x4x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 5); r; })
-#define vld1_p64_x3(__a) __extension__ ({ \
-  poly64x1x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 6); r; })
-
-#define vld4q_u64(__a) __extension__ ({ \
-  uint64x2x4_t r; __builtin_neon_vld4q_v(&r, __a, 51); r; })
-#define vld4q_s64(__a) __extension__ ({ \
-  int64x2x4_t r; __builtin_neon_vld4q_v(&r, __a, 35); r; })
-#define vld4q_f64(__a) __extension__ ({ \
-  float64x2x4_t r; __builtin_neon_vld4q_v(&r, __a, 41); r; })
-#define vld4_f64(__a) __extension__ ({ \
-  float64x1x4_t r; __builtin_neon_vld4_v(&r, __a, 9); r; })
-#define vld4_p64(__a) __extension__ ({ \
-  poly64x1x4_t r; __builtin_neon_vld4_v(&r, __a, 6); r; })
-#define vld4q_p64(__a) __extension__ ({ \
-  poly64x2x4_t r; __builtin_neon_vld4q_v(&r, __a, 38); r; })
-
-#define vld4q_dup_u8(__a) __extension__ ({ \
-  uint8x16x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 48); r; })
-#define vld4q_dup_u16(__a) __extension__ ({ \
-  uint16x8x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 49); r; })
-#define vld4q_dup_u32(__a) __extension__ ({ \
-  uint32x4x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 50); r; })
-#define vld4q_dup_u64(__a) __extension__ ({ \
-  uint64x2x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 51); r; })
-#define vld4q_dup_s8(__a) __extension__ ({ \
-  int8x16x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 32); r; })
-#define vld4q_dup_s16(__a) __extension__ ({ \
-  int16x8x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 33); r; })
-#define vld4q_dup_s32(__a) __extension__ ({ \
-  int32x4x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 34); r; })
-#define vld4q_dup_s64(__a) __extension__ ({ \
-  int64x2x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 35); r; })
-#define vld4q_dup_f16(__a) __extension__ ({ \
-  float16x8x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 39); r; })
-#define vld4q_dup_f32(__a) __extension__ ({ \
-  float32x4x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 40); r; })
-#define vld4q_dup_f64(__a) __extension__ ({ \
-  float64x2x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 41); r; })
-#define vld4q_dup_p8(__a) __extension__ ({ \
-  poly8x16x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 36); r; })
-#define vld4q_dup_p16(__a) __extension__ ({ \
-  poly16x8x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 37); r; })
-#define vld4q_dup_p64(__a) __extension__ ({ \
-  poly64x2x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 38); r; })
-#define vld4_dup_f64(__a) __extension__ ({ \
-  float64x1x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 9); r; })
-#define vld4_dup_p64(__a) __extension__ ({ \
-  poly64x1x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 6); r; })
-
-#define vld4q_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x16x4_t __b = (b); \
-  uint8x16x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 48); r; })
-#define vld4q_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x2x4_t __b = (b); \
-  uint64x2x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 51); r; })
-#define vld4q_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x16x4_t __b = (b); \
-  int8x16x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 32); r; })
-#define vld4q_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x2x4_t __b = (b); \
-  int64x2x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 35); r; })
-#define vld4q_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x2x4_t __b = (b); \
-  float64x2x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 41); r; })
-#define vld4q_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x16x4_t __b = (b); \
-  poly8x16x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 36); r; })
-#define vld4q_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x2x4_t __b = (b); \
-  poly64x2x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 38); r; })
-#define vld4_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x1x4_t __b = (b); \
-  uint64x1x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 19); r; })
-#define vld4_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x1x4_t __b = (b); \
-  int64x1x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 3); r; })
-#define vld4_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x1x4_t __b = (b); \
-  float64x1x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 9); r; })
-#define vld4_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x1x4_t __b = (b); \
-  poly64x1x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 6); r; })
-
-#define vld1q_u8_x4(__a) __extension__ ({ \
-  uint8x16x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 48); r; })
-#define vld1q_u16_x4(__a) __extension__ ({ \
-  uint16x8x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 49); r; })
-#define vld1q_u32_x4(__a) __extension__ ({ \
-  uint32x4x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 50); r; })
-#define vld1q_u64_x4(__a) __extension__ ({ \
-  uint64x2x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 51); r; })
-#define vld1q_s8_x4(__a) __extension__ ({ \
-  int8x16x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 32); r; })
-#define vld1q_s16_x4(__a) __extension__ ({ \
-  int16x8x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 33); r; })
-#define vld1q_s32_x4(__a) __extension__ ({ \
-  int32x4x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 34); r; })
-#define vld1q_s64_x4(__a) __extension__ ({ \
-  int64x2x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 35); r; })
-#define vld1q_f16_x4(__a) __extension__ ({ \
-  float16x8x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 39); r; })
-#define vld1q_f32_x4(__a) __extension__ ({ \
-  float32x4x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 40); r; })
-#define vld1q_f64_x4(__a) __extension__ ({ \
-  float64x2x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 41); r; })
-#define vld1q_p8_x4(__a) __extension__ ({ \
-  poly8x16x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 36); r; })
-#define vld1q_p16_x4(__a) __extension__ ({ \
-  poly16x8x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 37); r; })
-#define vld1q_p64_x4(__a) __extension__ ({ \
-  poly64x2x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 38); r; })
-#define vld1_u8_x4(__a) __extension__ ({ \
-  uint8x8x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 16); r; })
-#define vld1_u16_x4(__a) __extension__ ({ \
-  uint16x4x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 17); r; })
-#define vld1_u32_x4(__a) __extension__ ({ \
-  uint32x2x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 18); r; })
-#define vld1_u64_x4(__a) __extension__ ({ \
-  uint64x1x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 19); r; })
-#define vld1_s8_x4(__a) __extension__ ({ \
-  int8x8x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 0); r; })
-#define vld1_s16_x4(__a) __extension__ ({ \
-  int16x4x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 1); r; })
-#define vld1_s32_x4(__a) __extension__ ({ \
-  int32x2x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 2); r; })
-#define vld1_s64_x4(__a) __extension__ ({ \
-  int64x1x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 3); r; })
-#define vld1_f16_x4(__a) __extension__ ({ \
-  float16x4x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 7); r; })
-#define vld1_f32_x4(__a) __extension__ ({ \
-  float32x2x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 8); r; })
-#define vld1_f64_x4(__a) __extension__ ({ \
-  float64x1x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 9); r; })
-#define vld1_p8_x4(__a) __extension__ ({ \
-  poly8x8x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 4); r; })
-#define vld1_p16_x4(__a) __extension__ ({ \
-  poly16x4x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 5); r; })
-#define vld1_p64_x4(__a) __extension__ ({ \
-  poly64x1x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 6); r; })
-
-__ai float64x1_t vmax_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x1_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 9); }
-__ai float64x2_t vmaxq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai int8x16_t vpmaxq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vpmaxq_v(__a, __b, 32); }
-__ai int16x8_t vpmaxq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vpmaxq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai float32x4_t vpmaxq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-__ai float64x2_t vpmaxq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float64x1_t vmin_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x1_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 9); }
-__ai float64x2_t vminq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai int8x16_t vpminq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vpminq_v(__a, __b, 32); }
-__ai int16x8_t vpminq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vpminq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai uint8x16_t vpminq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vpminq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vpminq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai float32x4_t vpminq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-__ai float64x2_t vpminq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai float64x1_t vmla_f64(float64x1_t __a, float64x1_t __b, float64x1_t __c) {
-  return __a + (__b * __c); }
-__ai float64x2_t vmlaq_f64(float64x2_t __a, float64x2_t __b, float64x2_t __c) {
-  return __a + (__b * __c); }
-
-__ai float64x1_t vmls_f64(float64x1_t __a, float64x1_t __b, float64x1_t __c) {
-  return __a - (__b * __c); }
-__ai float64x2_t vmlsq_f64(float64x2_t __a, float64x2_t __b, float64x2_t __c) {
-  return __a - (__b * __c); }
-
-__ai float64x1_t vmov_n_f64(float64_t __a) {
-  return (float64x1_t){ __a }; }
-__ai float64x2_t vmovq_n_f64(float64_t __a) {
-  return (float64x2_t){ __a, __a }; }
-
-__ai float64x1_t vmul_f64(float64x1_t __a, float64x1_t __b) {
-  return __a * __b; }
-__ai float64x2_t vmulq_f64(float64x2_t __a, float64x2_t __b) {
-  return __a * __b; }
-
-__ai float32x2_t vmulx_f32(float32x2_t __a, float32x2_t __b) {
-  return (float32x2_t)__builtin_neon_vmulx_v((int8x8_t)__a, (int8x8_t)__b, 8); }
-__ai float64x1_t vmulx_f64(float64x1_t __a, float64x1_t __b) {
-  return (float64x1_t)__builtin_neon_vmulx_v((int8x8_t)__a, (int8x8_t)__b, 9); }
-__ai float32x4_t vmulxq_f32(float32x4_t __a, float32x4_t __b) {
-  return (float32x4_t)__builtin_neon_vmulxq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
-__ai float64x2_t vmulxq_f64(float64x2_t __a, float64x2_t __b) {
-  return (float64x2_t)__builtin_neon_vmulxq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
-
-__ai int64x1_t vneg_s64(int64x1_t __a) {
-  return  -__a; }
-__ai float64x1_t vneg_f64(float64x1_t __a) {
-  return  -__a; }
-__ai float64x2_t vnegq_f64(float64x2_t __a) {
-  return  -__a; }
-__ai int64x2_t vnegq_s64(int64x2_t __a) {
-  return  -__a; }
-
-__ai int64x1_t vqabs_s64(int64x1_t __a) {
-  return (int64x1_t)__builtin_neon_vqabs_v((int8x8_t)__a, 3); }
-__ai int64x2_t vqabsq_s64(int64x2_t __a) {
-  return (int64x2_t)__builtin_neon_vqabsq_v((int8x16_t)__a, 35); }
-
-__ai int64x1_t vqneg_s64(int64x1_t __a) {
-  return (int64x1_t)__builtin_neon_vqneg_v((int8x8_t)__a, 3); }
-__ai int64x2_t vqnegq_s64(int64x2_t __a) {
-  return (int64x2_t)__builtin_neon_vqnegq_v((int8x16_t)__a, 35); }
-
-#define vqrshrn_high_n_s16(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int16x8_t __b = (b); \
-  (int8x16_t)vcombine_s16(__a, vqrshrn_n_s16(__b, __c)); })
-#define vqrshrn_high_n_s32(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int32x4_t __b = (b); \
-  (int16x8_t)vcombine_s32(__a, vqrshrn_n_s32(__b, __c)); })
-#define vqrshrn_high_n_s64(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int64x2_t __b = (b); \
-  (int32x4_t)vcombine_s64(__a, vqrshrn_n_s64(__b, __c)); })
-#define vqrshrn_high_n_u16(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint8x16_t)vcombine_u16(__a, vqrshrn_n_u16(__b, __c)); })
-#define vqrshrn_high_n_u32(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint16x8_t)vcombine_u32(__a, vqrshrn_n_u32(__b, __c)); })
-#define vqrshrn_high_n_u64(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint32x4_t)vcombine_u64(__a, vqrshrn_n_u64(__b, __c)); })
-
-#define vqrshrun_high_n_s16(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int16x8_t __b = (b); \
-  (int8x16_t)vcombine_s16(__a, vqrshrun_n_s16(__b, __c)); })
-#define vqrshrun_high_n_s32(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int32x4_t __b = (b); \
-  (int16x8_t)vcombine_s32(__a, vqrshrun_n_s32(__b, __c)); })
-#define vqrshrun_high_n_s64(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int64x2_t __b = (b); \
-  (int32x4_t)vcombine_s64(__a, vqrshrun_n_s64(__b, __c)); })
-
-#define vqshrn_high_n_s16(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int16x8_t __b = (b); \
-  (int8x16_t)vcombine_s16(__a, vqshrn_n_s16(__b, __c)); })
-#define vqshrn_high_n_s32(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int32x4_t __b = (b); \
-  (int16x8_t)vcombine_s32(__a, vqshrn_n_s32(__b, __c)); })
-#define vqshrn_high_n_s64(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int64x2_t __b = (b); \
-  (int32x4_t)vcombine_s64(__a, vqshrn_n_s64(__b, __c)); })
-#define vqshrn_high_n_u16(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint8x16_t)vcombine_u16(__a, vqshrn_n_u16(__b, __c)); })
-#define vqshrn_high_n_u32(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint16x8_t)vcombine_u32(__a, vqshrn_n_u32(__b, __c)); })
-#define vqshrn_high_n_u64(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint32x4_t)vcombine_u64(__a, vqshrn_n_u64(__b, __c)); })
-
-#define vqshrun_high_n_s16(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int16x8_t __b = (b); \
-  (int8x16_t)vcombine_s16(__a, vqshrun_n_s16(__b, __c)); })
-#define vqshrun_high_n_s32(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int32x4_t __b = (b); \
-  (int16x8_t)vcombine_s32(__a, vqshrun_n_s32(__b, __c)); })
-#define vqshrun_high_n_s64(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int64x2_t __b = (b); \
-  (int32x4_t)vcombine_s64(__a, vqshrun_n_s64(__b, __c)); })
-
-__ai int8x16_t vqmovn_high_s16(int8x8_t __a, int16x8_t __b) {
-  int8x8_t __a1 = vqmovn_s16(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
-__ai int16x8_t vqmovn_high_s32(int16x4_t __a, int32x4_t __b) {
-  int16x4_t __a1 = vqmovn_s32(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
-__ai int32x4_t vqmovn_high_s64(int32x2_t __a, int64x2_t __b) {
-  int32x2_t __a1 = vqmovn_s64(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __a, uint16x8_t __b) {
-  uint8x8_t __a1 = vqmovn_u16(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __a, uint32x4_t __b) {
-  uint16x4_t __a1 = vqmovn_u32(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __a, uint64x2_t __b) {
-  uint32x2_t __a1 = vqmovn_u64(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
-
-__ai int8x8_t vrbit_s8(int8x8_t __a) {
-  return (int8x8_t)__builtin_neon_vrbit_v(__a, 0); }
-__ai uint8x8_t vrbit_u8(uint8x8_t __a) {
-  return (uint8x8_t)__builtin_neon_vrbit_v((int8x8_t)__a, 16); }
-__ai poly8x8_t vrbit_p8(poly8x8_t __a) {
-  return (poly8x8_t)__builtin_neon_vrbit_v((int8x8_t)__a, 4); }
-__ai int8x16_t vrbitq_s8(int8x16_t __a) {
-  return (int8x16_t)__builtin_neon_vrbitq_v(__a, 32); }
-__ai uint8x16_t vrbitq_u8(uint8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vrbitq_v((int8x16_t)__a, 48); }
-__ai poly8x16_t vrbitq_p8(poly8x16_t __a) {
-  return (poly8x16_t)__builtin_neon_vrbitq_v((int8x16_t)__a, 36); }
-
-__ai int8x8_t vreinterpret_s8_f64(float64x1_t __a) {
-  return (int8x8_t)__a; }
-__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __a) {
-  return (int8x8_t)__a; }
-__ai int16x4_t vreinterpret_s16_f64(float64x1_t __a) {
-  return (int16x4_t)__a; }
-__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __a) {
-  return (int16x4_t)__a; }
-__ai int32x2_t vreinterpret_s32_f64(float64x1_t __a) {
-  return (int32x2_t)__a; }
-__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __a) {
-  return (int32x2_t)__a; }
-__ai int64x1_t vreinterpret_s64_f64(float64x1_t __a) {
-  return (int64x1_t)__a; }
-__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __a) {
-  return (int64x1_t)__a; }
-__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __a) {
-  return (uint8x8_t)__a; }
-__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __a) {
-  return (uint16x4_t)__a; }
-__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __a) {
-  return (uint32x2_t)__a; }
-__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __a) {
-  return (uint64x1_t)__a; }
-__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __a) {
-  return (uint64x1_t)__a; }
-__ai float16x4_t vreinterpret_f16_f64(float64x1_t __a) {
-  return (float16x4_t)__a; }
-__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __a) {
-  return (float16x4_t)__a; }
-__ai float32x2_t vreinterpret_f32_f64(float64x1_t __a) {
-  return (float32x2_t)__a; }
-__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __a) {
-  return (float32x2_t)__a; }
-__ai float64x1_t vreinterpret_f64_s8(int8x8_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_s16(int16x4_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_s32(int32x2_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_s64(int64x1_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_f16(float16x4_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_f32(float32x2_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __a) {
-  return (float64x1_t)__a; }
-__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __a) {
-  return (float64x1_t)__a; }
-__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __a) {
-  return (poly8x8_t)__a; }
-__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __a) {
-  return (poly16x4_t)__a; }
-__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __a) {
-  return (poly64x1_t)__a; }
-__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __a) {
-  return (poly64x1_t)__a; }
-__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __a) {
-  return (int8x16_t)__a; }
-__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __a) {
-  return (int8x16_t)__a; }
-__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __a) {
-  return (int16x8_t)__a; }
-__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __a) {
-  return (int16x8_t)__a; }
-__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __a) {
-  return (int32x4_t)__a; }
-__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __a) {
-  return (int32x4_t)__a; }
-__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __a) {
-  return (int64x2_t)__a; }
-__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __a) {
-  return (int64x2_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __a) {
-  return (uint8x16_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __a) {
-  return (uint16x8_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __a) {
-  return (uint32x4_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __a) {
-  return (uint64x2_t)__a; }
-__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __a) {
-  return (uint64x2_t)__a; }
-__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __a) {
-  return (float16x8_t)__a; }
-__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __a) {
-  return (float16x8_t)__a; }
-__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __a) {
-  return (float32x4_t)__a; }
-__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __a) {
-  return (float32x4_t)__a; }
-__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __a) {
-  return (float64x2_t)__a; }
-__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __a) {
-  return (float64x2_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __a) {
-  return (poly8x16_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __a) {
-  return (poly16x8_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __a) {
-  return (poly64x2_t)__a; }
-__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __a) {
-  return (poly64x2_t)__a; }
-
-#define vrshrn_high_n_s16(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int16x8_t __b = (b); \
-  (int8x16_t)vcombine_s16(__a, vrshrn_n_s16(__b, __c)); })
-#define vrshrn_high_n_s32(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int32x4_t __b = (b); \
-  (int16x8_t)vcombine_s32(__a, vrshrn_n_s32(__b, __c)); })
-#define vrshrn_high_n_s64(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int64x2_t __b = (b); \
-  (int32x4_t)vcombine_s64(__a, vrshrn_n_s64(__b, __c)); })
-#define vrshrn_high_n_u16(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint8x16_t)vcombine_u16(__a, vrshrn_n_u16(__b, __c)); })
-#define vrshrn_high_n_u32(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint16x8_t)vcombine_u32(__a, vrshrn_n_u32(__b, __c)); })
-#define vrshrn_high_n_u64(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint32x4_t)vcombine_u64(__a, vrshrn_n_u64(__b, __c)); })
-
-__ai float32_t vabds_f32(float32_t __a, float32_t __b) {
-  return (float32_t)__builtin_neon_vabds_f32(__a, __b); }
-__ai float64_t vabdd_f64(float64_t __a, float64_t __b) {
-  return (float64_t)__builtin_neon_vabdd_f64(__a, __b); }
-
-__ai int64_t vabsd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vabsd_s64(__a); }
-
-__ai int64_t vaddd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vaddd_s64(__a, __b); }
-__ai uint64_t vaddd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vaddd_u64(__a, __b); }
-
-__ai float32_t vpadds_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vpadds_f32(__a); }
-__ai int64_t vpaddd_s64(int64x2_t __a) {
-  return (int64_t)__builtin_neon_vpaddd_s64(__a); }
-__ai float64_t vpaddd_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vpaddd_f64(__a); }
-__ai uint64_t vpaddd_u64(uint64x2_t __a) {
-  return (uint64_t)__builtin_neon_vpaddd_u64((int64x2_t)__a); }
-
-__ai int64_t vceqd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vceqd_s64(__a, __b); }
-__ai uint64_t vceqd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vceqd_u64(__a, __b); }
-
-__ai int64_t vceqzd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vceqzd_s64(__a); }
-__ai uint64_t vceqzd_u64(uint64_t __a) {
-  return (uint64_t)__builtin_neon_vceqzd_u64(__a); }
-
-__ai int64_t vcged_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vcged_s64(__a, __b); }
-
-__ai int64_t vcgezd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vcgezd_s64(__a); }
-
-__ai int64_t vcgtd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vcgtd_s64(__a, __b); }
-
-__ai int64_t vcgtzd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vcgtzd_s64(__a); }
-
-__ai uint64_t vcgtd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vcgtd_u64(__a, __b); }
-
-__ai uint64_t vcged_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vcged_u64(__a, __b); }
-
-__ai int64_t vcled_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vcled_s64(__a, __b); }
-__ai uint64_t vcled_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vcled_u64(__a, __b); }
-
-__ai int64_t vclezd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vclezd_s64(__a); }
-
-__ai int64_t vcltd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vcltd_s64(__a, __b); }
-__ai uint64_t vcltd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vcltd_u64(__a, __b); }
-
-__ai int64_t vcltzd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vcltzd_s64(__a); }
-
-__ai int64_t vtstd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vtstd_s64(__a, __b); }
-__ai uint64_t vtstd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vtstd_u64(__a, __b); }
-
-__ai uint32_t vcages_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vcages_f32(__a, __b); }
-__ai uint64_t vcaged_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vcaged_f64(__a, __b); }
-
-__ai uint32_t vcagts_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vcagts_f32(__a, __b); }
-__ai uint64_t vcagtd_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vcagtd_f64(__a, __b); }
-
-__ai uint32_t vcales_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vcales_f32(__a, __b); }
-__ai uint64_t vcaled_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vcaled_f64(__a, __b); }
-
-__ai uint32_t vcalts_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vcalts_f32(__a, __b); }
-__ai uint64_t vcaltd_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vcaltd_f64(__a, __b); }
-
-__ai uint32_t vceqs_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vceqs_f32(__a, __b); }
-__ai uint64_t vceqd_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vceqd_f64(__a, __b); }
-
-__ai uint32_t vceqzs_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vceqzs_f32(__a); }
-__ai uint64_t vceqzd_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vceqzd_f64(__a); }
-
-__ai uint32_t vcges_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vcges_f32(__a, __b); }
-__ai uint64_t vcged_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vcged_f64(__a, __b); }
-
-__ai uint32_t vcgezs_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vcgezs_f32(__a); }
-__ai uint64_t vcgezd_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vcgezd_f64(__a); }
-
-__ai uint32_t vcgts_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vcgts_f32(__a, __b); }
-__ai uint64_t vcgtd_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vcgtd_f64(__a, __b); }
-
-__ai uint32_t vcgtzs_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vcgtzs_f32(__a); }
-__ai uint64_t vcgtzd_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vcgtzd_f64(__a); }
-
-__ai uint32_t vcles_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vcles_f32(__a, __b); }
-__ai uint64_t vcled_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vcled_f64(__a, __b); }
-
-__ai uint32_t vclezs_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vclezs_f32(__a); }
-__ai uint64_t vclezd_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vclezd_f64(__a); }
-
-__ai uint32_t vclts_f32(float32_t __a, float32_t __b) {
-  return (uint32_t)__builtin_neon_vclts_f32(__a, __b); }
-__ai uint64_t vcltd_f64(float64_t __a, float64_t __b) {
-  return (uint64_t)__builtin_neon_vcltd_f64(__a, __b); }
-
-__ai uint32_t vcltzs_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vcltzs_f32(__a); }
-__ai uint64_t vcltzd_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vcltzd_f64(__a); }
-
-__ai int64_t vcvtad_s64_f64(float64_t __a) {
-  return (int64_t)__builtin_neon_vcvtad_s64_f64(__a); }
-
-__ai int32_t vcvtas_s32_f32(float32_t __a) {
-  return (int32_t)__builtin_neon_vcvtas_s32_f32(__a); }
-
-__ai uint64_t vcvtad_u64_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vcvtad_u64_f64(__a); }
-
-__ai uint32_t vcvtas_u32_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vcvtas_u32_f32(__a); }
-
-__ai int64_t vcvtmd_s64_f64(float64_t __a) {
-  return (int64_t)__builtin_neon_vcvtmd_s64_f64(__a); }
-
-__ai int32_t vcvtms_s32_f32(float32_t __a) {
-  return (int32_t)__builtin_neon_vcvtms_s32_f32(__a); }
-
-__ai uint64_t vcvtmd_u64_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vcvtmd_u64_f64(__a); }
-
-__ai uint32_t vcvtms_u32_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vcvtms_u32_f32(__a); }
-
-__ai int64_t vcvtnd_s64_f64(float64_t __a) {
-  return (int64_t)__builtin_neon_vcvtnd_s64_f64(__a); }
-
-__ai int32_t vcvtns_s32_f32(float32_t __a) {
-  return (int32_t)__builtin_neon_vcvtns_s32_f32(__a); }
-
-__ai uint64_t vcvtnd_u64_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vcvtnd_u64_f64(__a); }
-
-__ai uint32_t vcvtns_u32_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vcvtns_u32_f32(__a); }
-
-__ai int64_t vcvtpd_s64_f64(float64_t __a) {
-  return (int64_t)__builtin_neon_vcvtpd_s64_f64(__a); }
-
-__ai int32_t vcvtps_s32_f32(float32_t __a) {
-  return (int32_t)__builtin_neon_vcvtps_s32_f32(__a); }
-
-__ai uint64_t vcvtpd_u64_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vcvtpd_u64_f64(__a); }
-
-__ai uint32_t vcvtps_u32_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vcvtps_u32_f32(__a); }
-
-__ai float32_t vcvtxd_f32_f64(float64_t __a) {
-  return (float32_t)__builtin_neon_vcvtxd_f32_f64(__a); }
-
-__ai int64_t vcvtd_s64_f64(float64_t __a) {
-  return (int64_t)__builtin_neon_vcvtd_s64_f64(__a); }
-
-__ai int32_t vcvts_s32_f32(float32_t __a) {
-  return (int32_t)__builtin_neon_vcvts_s32_f32(__a); }
-
-#define vcvts_n_s32_f32(a, __b) __extension__ ({ \
-  float32_t __a = (a); \
-  (int32_t)__builtin_neon_vcvts_n_s32_f32(__a, __b); })
-
-#define vcvtd_n_s64_f64(a, __b) __extension__ ({ \
-  float64_t __a = (a); \
-  (int64_t)__builtin_neon_vcvtd_n_s64_f64(__a, __b); })
-
-__ai uint64_t vcvtd_u64_f64(float64_t __a) {
-  return (uint64_t)__builtin_neon_vcvtd_u64_f64(__a); }
-
-__ai uint32_t vcvts_u32_f32(float32_t __a) {
-  return (uint32_t)__builtin_neon_vcvts_u32_f32(__a); }
-
-#define vcvts_n_u32_f32(a, __b) __extension__ ({ \
-  float32_t __a = (a); \
-  (uint32_t)__builtin_neon_vcvts_n_u32_f32(__a, __b); })
-
-#define vcvtd_n_u64_f64(a, __b) __extension__ ({ \
-  float64_t __a = (a); \
-  (uint64_t)__builtin_neon_vcvtd_n_u64_f64(__a, __b); })
-
-__ai float32_t vpmaxnms_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vpmaxnms_f32(__a); }
-__ai float64_t vpmaxnmqd_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vpmaxnmqd_f64(__a); }
-
-__ai float32_t vpmaxs_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vpmaxs_f32(__a); }
-__ai float64_t vpmaxqd_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vpmaxqd_f64(__a); }
-
-__ai float32_t vpminnms_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vpminnms_f32(__a); }
-__ai float64_t vpminnmqd_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vpminnmqd_f64(__a); }
-
-__ai float32_t vpmins_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vpmins_f32(__a); }
-__ai float64_t vpminqd_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vpminqd_f64(__a); }
-
-#define vfmas_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32_t __a = (a); float32_t __b = (b); float32x2_t __c = (c); \
-  (float32_t)__builtin_neon_vfmas_lane_f32(__a, __b, __c, __d); })
-#define vfmad_lane_f64(a, b, c, __d) __extension__ ({ \
-  float64_t __a = (a); float64_t __b = (b); float64x1_t __c = (c); \
-  (float64_t)__builtin_neon_vfmad_lane_f64(__a, __b, __c, __d); })
-
-#define vfmas_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32_t __a = (a); float32_t __b = (b); float32x4_t __c = (c); \
-  (float32_t)__builtin_neon_vfmas_laneq_f32(__a, __b, __c, __d); })
-#define vfmad_laneq_f64(a, b, c, __d) __extension__ ({ \
-  float64_t __a = (a); float64_t __b = (b); float64x2_t __c = (c); \
-  (float64_t)__builtin_neon_vfmad_laneq_f64(__a, __b, __c, __d); })
-
-#define vfmss_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32_t __a = (a); float32_t __b = (b); float32x2_t __c = (c); \
-  float32_t __a1 = __a; \
-  float32_t __b1 = __b; \
-  float32x2_t __c1 = __c; \
-  vfmas_lane_f32(__a1, __b1, -__c1, __d); })
-#define vfmsd_lane_f64(a, b, c, __d) __extension__ ({ \
-  float64_t __a = (a); float64_t __b = (b); float64x1_t __c = (c); \
-  float64_t __a1 = __a; \
-  float64_t __b1 = __b; \
-  float64x1_t __c1 = __c; \
-  vfmad_lane_f64(__a1, __b1, -__c1, __d); })
-
-#define vfmss_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32_t __a = (a); float32_t __b = (b); float32x4_t __c = (c); \
-  float32_t __a1 = __a; \
-  float32_t __b1 = __b; \
-  float32x4_t __c1 = __c; \
-  vfmas_laneq_f32(__a1, __b1, -__c1, __d); })
-#define vfmsd_laneq_f64(a, b, c, __d) __extension__ ({ \
-  float64_t __a = (a); float64_t __b = (b); float64x2_t __c = (c); \
-  float64_t __a1 = __a; \
-  float64_t __b1 = __b; \
-  float64x2_t __c1 = __c; \
-  vfmad_laneq_f64(__a1, __b1, -__c1, __d); })
-
-__ai float32_t vmulxs_f32(float32_t __a, float32_t __b) {
-  return (float32_t)__builtin_neon_vmulxs_f32(__a, __b); }
-__ai float64_t vmulxd_f64(float64_t __a, float64_t __b) {
-  return (float64_t)__builtin_neon_vmulxd_f64(__a, __b); }
-
-#define vmulxs_lane_f32(a, b, __c) __extension__ ({ \
-  float32_t __a = (a); float32x2_t __b = (b); \
-  float32_t __d1 = vget_lane_f32(__b, __c);\
-  vmulxs_f32(__a, __d1); })
-#define vmulxd_lane_f64(a, b, __c) __extension__ ({ \
-  float64_t __a = (a); float64x1_t __b = (b); \
-  float64_t __d1 = vget_lane_f64(__b, __c);\
-  vmulxd_f64(__a, __d1); })
-
-#define vmulxs_laneq_f32(a, b, __c) __extension__ ({ \
-  float32_t __a = (a); float32x4_t __b = (b); \
-  float32_t __d1 = vgetq_lane_f32(__b, __c);\
-  vmulxs_f32(__a, __d1); })
-#define vmulxd_laneq_f64(a, b, __c) __extension__ ({ \
-  float64_t __a = (a); float64x2_t __b = (b); \
-  float64_t __d1 = vgetq_lane_f64(__b, __c);\
-  vmulxd_f64(__a, __d1); })
-
-#define vmuls_lane_f32(a, b, __c) __extension__ ({ \
-  float32_t __a = (a); float32x2_t __b = (b); \
-  float32_t __d1 = vget_lane_f32(__b, __c);\
-  __a * __d1; })
-#define vmuld_lane_f64(a, b, __c) __extension__ ({ \
-  float64_t __a = (a); float64x1_t __b = (b); \
-  float64_t __d1 = vget_lane_f64(__b, __c);\
-  __a * __d1; })
-
-#define vmuls_laneq_f32(a, b, __c) __extension__ ({ \
-  float32_t __a = (a); float32x4_t __b = (b); \
-  float32_t __d1 = vgetq_lane_f32(__b, __c);\
-  __a * __d1; })
-#define vmuld_laneq_f64(a, b, __c) __extension__ ({ \
-  float64_t __a = (a); float64x2_t __b = (b); \
-  float64_t __d1 = vgetq_lane_f64(__b, __c);\
-  __a * __d1; })
-
-__ai float32_t vrecpes_f32(float32_t __a) {
-  return (float32_t)__builtin_neon_vrecpes_f32(__a); }
-__ai float64_t vrecped_f64(float64_t __a) {
-  return (float64_t)__builtin_neon_vrecped_f64(__a); }
-
-__ai float32_t vrecpss_f32(float32_t __a, float32_t __b) {
-  return (float32_t)__builtin_neon_vrecpss_f32(__a, __b); }
-__ai float64_t vrecpsd_f64(float64_t __a, float64_t __b) {
-  return (float64_t)__builtin_neon_vrecpsd_f64(__a, __b); }
-
-__ai float32_t vrecpxs_f32(float32_t __a) {
-  return (float32_t)__builtin_neon_vrecpxs_f32(__a); }
-__ai float64_t vrecpxd_f64(float64_t __a) {
-  return (float64_t)__builtin_neon_vrecpxd_f64(__a); }
-
-__ai float32_t vrsqrtes_f32(float32_t __a) {
-  return (float32_t)__builtin_neon_vrsqrtes_f32(__a); }
-__ai float64_t vrsqrted_f64(float64_t __a) {
-  return (float64_t)__builtin_neon_vrsqrted_f64(__a); }
-
-__ai float32_t vrsqrtss_f32(float32_t __a, float32_t __b) {
-  return (float32_t)__builtin_neon_vrsqrtss_f32(__a, __b); }
-__ai float64_t vrsqrtsd_f64(float64_t __a, float64_t __b) {
-  return (float64_t)__builtin_neon_vrsqrtsd_f64(__a, __b); }
-
-#define vget_lane_f16(a, __b) __extension__ ({ \
-  float16x4_t __a = (a); \
-  int16x4_t __a1 = vreinterpret_s16_f16(__a);\
-  vget_lane_s16(__a1, __b); })
-#define vgetq_lane_f16(a, __b) __extension__ ({ \
-  float16x8_t __a = (a); \
-  int16x8_t __a1 = vreinterpretq_s16_f16(__a);\
-  vgetq_lane_s16(__a1, __b); })
-
-__ai int64_t vnegd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vnegd_s64(__a); }
-
-__ai int8_t vqaddb_s8(int8_t __a, int8_t __b) {
-  return (int8_t)__builtin_neon_vqaddb_s8(__a, __b); }
-__ai int16_t vqaddh_s16(int16_t __a, int16_t __b) {
-  return (int16_t)__builtin_neon_vqaddh_s16(__a, __b); }
-__ai int32_t vqadds_s32(int32_t __a, int32_t __b) {
-  return (int32_t)__builtin_neon_vqadds_s32(__a, __b); }
-__ai int64_t vqaddd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vqaddd_s64(__a, __b); }
-__ai uint8_t vqaddb_u8(uint8_t __a, uint8_t __b) {
-  return (uint8_t)__builtin_neon_vqaddb_u8(__a, __b); }
-__ai uint16_t vqaddh_u16(uint16_t __a, uint16_t __b) {
-  return (uint16_t)__builtin_neon_vqaddh_u16(__a, __b); }
-__ai uint32_t vqadds_u32(uint32_t __a, uint32_t __b) {
-  return (uint32_t)__builtin_neon_vqadds_u32(__a, __b); }
-__ai uint64_t vqaddd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vqaddd_u64(__a, __b); }
-
-__ai int8_t vqrshlb_s8(int8_t __a, int8_t __b) {
-  return (int8_t)__builtin_neon_vqrshlb_s8(__a, __b); }
-__ai int16_t vqrshlh_s16(int16_t __a, int16_t __b) {
-  return (int16_t)__builtin_neon_vqrshlh_s16(__a, __b); }
-__ai int32_t vqrshls_s32(int32_t __a, int32_t __b) {
-  return (int32_t)__builtin_neon_vqrshls_s32(__a, __b); }
-__ai int64_t vqrshld_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vqrshld_s64(__a, __b); }
-__ai uint8_t vqrshlb_u8(uint8_t __a, uint8_t __b) {
-  return (uint8_t)__builtin_neon_vqrshlb_u8(__a, __b); }
-__ai uint16_t vqrshlh_u16(uint16_t __a, uint16_t __b) {
-  return (uint16_t)__builtin_neon_vqrshlh_u16(__a, __b); }
-__ai uint32_t vqrshls_u32(uint32_t __a, uint32_t __b) {
-  return (uint32_t)__builtin_neon_vqrshls_u32(__a, __b); }
-__ai uint64_t vqrshld_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vqrshld_u64(__a, __b); }
-
-__ai int8_t vqshlb_s8(int8_t __a, int8_t __b) {
-  return (int8_t)__builtin_neon_vqshlb_s8(__a, __b); }
-__ai int16_t vqshlh_s16(int16_t __a, int16_t __b) {
-  return (int16_t)__builtin_neon_vqshlh_s16(__a, __b); }
-__ai int32_t vqshls_s32(int32_t __a, int32_t __b) {
-  return (int32_t)__builtin_neon_vqshls_s32(__a, __b); }
-__ai int64_t vqshld_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vqshld_s64(__a, __b); }
-__ai uint8_t vqshlb_u8(uint8_t __a, uint8_t __b) {
-  return (uint8_t)__builtin_neon_vqshlb_u8(__a, __b); }
-__ai uint16_t vqshlh_u16(uint16_t __a, uint16_t __b) {
-  return (uint16_t)__builtin_neon_vqshlh_u16(__a, __b); }
-__ai uint32_t vqshls_u32(uint32_t __a, uint32_t __b) {
-  return (uint32_t)__builtin_neon_vqshls_u32(__a, __b); }
-__ai uint64_t vqshld_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vqshld_u64(__a, __b); }
-
-__ai int8_t vqsubb_s8(int8_t __a, int8_t __b) {
-  return (int8_t)__builtin_neon_vqsubb_s8(__a, __b); }
-__ai int16_t vqsubh_s16(int16_t __a, int16_t __b) {
-  return (int16_t)__builtin_neon_vqsubh_s16(__a, __b); }
-__ai int32_t vqsubs_s32(int32_t __a, int32_t __b) {
-  return (int32_t)__builtin_neon_vqsubs_s32(__a, __b); }
-__ai int64_t vqsubd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vqsubd_s64(__a, __b); }
-__ai uint8_t vqsubb_u8(uint8_t __a, uint8_t __b) {
-  return (uint8_t)__builtin_neon_vqsubb_u8(__a, __b); }
-__ai uint16_t vqsubh_u16(uint16_t __a, uint16_t __b) {
-  return (uint16_t)__builtin_neon_vqsubh_u16(__a, __b); }
-__ai uint32_t vqsubs_u32(uint32_t __a, uint32_t __b) {
-  return (uint32_t)__builtin_neon_vqsubs_u32(__a, __b); }
-__ai uint64_t vqsubd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vqsubd_u64(__a, __b); }
-
-__ai int64_t vrshld_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vrshld_s64(__a, __b); }
-__ai uint64_t vrshld_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vrshld_u64(__a, __b); }
-
-__ai float64_t vcvtd_f64_s64(int64_t __a) {
-  return (float64_t)__builtin_neon_vcvtd_f64_s64(__a); }
-
-__ai float32_t vcvts_f32_s32(int32_t __a) {
-  return (float32_t)__builtin_neon_vcvts_f32_s32(__a); }
-
-#define vcvts_n_f32_s32(a, __b) __extension__ ({ \
-  int32_t __a = (a); \
-  (float32_t)__builtin_neon_vcvts_n_f32_s32(__a, __b); })
-#define vcvts_n_f32_u32(a, __b) __extension__ ({ \
-  uint32_t __a = (a); \
-  (float32_t)__builtin_neon_vcvts_n_f32_u32(__a, __b); })
-
-#define vcvtd_n_f64_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (float64_t)__builtin_neon_vcvtd_n_f64_s64(__a, __b); })
-#define vcvtd_n_f64_u64(a, __b) __extension__ ({ \
-  uint64_t __a = (a); \
-  (float64_t)__builtin_neon_vcvtd_n_f64_u64(__a, __b); })
-
-#define vset_lane_f16(a, b, __c) __extension__ ({ \
-  float16_t __a = (a); float16x4_t __b = (b); \
-  int16_t __a1 = (int16_t)__a;\
-  int16x4_t __b1 = vreinterpret_s16_f16(b);\
-  int16x4_t __b2 = vset_lane_s16(__a1, __b1, __c);\
-  vreinterpret_f16_s16(__b2); })
-#define vsetq_lane_f16(a, b, __c) __extension__ ({ \
-  float16_t __a = (a); float16x8_t __b = (b); \
-  int16_t __a1 = (int16_t)__a;\
-  int16x8_t __b1 = vreinterpretq_s16_f16(b);\
-  int16x8_t __b2 = vsetq_lane_s16(__a1, __b1, __c);\
-  vreinterpretq_f16_s16(__b2); })
-
-__ai int64_t vshld_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vshld_s64(__a, __b); }
-__ai uint64_t vshld_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vshld_u64(__a, __b); }
-
-#define vshld_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int64_t)__builtin_neon_vshld_n_s64(__a, __b); })
-#define vshld_n_u64(a, __b) __extension__ ({ \
-  uint64_t __a = (a); \
-  (uint64_t)__builtin_neon_vshld_n_u64(__a, __b); })
-
-#define vslid_n_s64(a, b, __c) __extension__ ({ \
-  int64_t __a = (a); int64_t __b = (b); \
-  (int64_t)__builtin_neon_vslid_n_s64(__a, __b, __c); })
-#define vslid_n_u64(a, b, __c) __extension__ ({ \
-  uint64_t __a = (a); uint64_t __b = (b); \
-  (uint64_t)__builtin_neon_vslid_n_u64(__a, __b, __c); })
-
-__ai int8_t vqabsb_s8(int8_t __a) {
-  return (int8_t)__builtin_neon_vqabsb_s8(__a); }
-__ai int16_t vqabsh_s16(int16_t __a) {
-  return (int16_t)__builtin_neon_vqabsh_s16(__a); }
-__ai int32_t vqabss_s32(int32_t __a) {
-  return (int32_t)__builtin_neon_vqabss_s32(__a); }
-__ai int64_t vqabsd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vqabsd_s64(__a); }
-
-__ai int32_t vqdmlalh_s16(int32_t __a, int16_t __b, int16_t __c) {
-  return (int32_t)__builtin_neon_vqdmlalh_s16(__a, __b, __c); }
-__ai int64_t vqdmlals_s32(int64_t __a, int32_t __b, int32_t __c) {
-  return (int64_t)__builtin_neon_vqdmlals_s32(__a, __b, __c); }
-
-#define vqdmlalh_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32_t __a = (a); int16_t __b = (b); int16x4_t __c = (c); \
-  (int32_t)__builtin_neon_vqdmlalh_lane_s16(__a, __b, __c, __d); })
-#define vqdmlals_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64_t __a = (a); int32_t __b = (b); int32x2_t __c = (c); \
-  (int64_t)__builtin_neon_vqdmlals_lane_s32(__a, __b, __c, __d); })
-
-#define vqdmlalh_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32_t __a = (a); int16_t __b = (b); int16x8_t __c = (c); \
-  (int32_t)__builtin_neon_vqdmlalh_laneq_s16(__a, __b, __c, __d); })
-#define vqdmlals_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64_t __a = (a); int32_t __b = (b); int32x4_t __c = (c); \
-  (int64_t)__builtin_neon_vqdmlals_laneq_s32(__a, __b, __c, __d); })
-
-__ai int32_t vqdmlslh_s16(int32_t __a, int16_t __b, int16_t __c) {
-  return (int32_t)__builtin_neon_vqdmlslh_s16(__a, __b, __c); }
-__ai int64_t vqdmlsls_s32(int64_t __a, int32_t __b, int32_t __c) {
-  return (int64_t)__builtin_neon_vqdmlsls_s32(__a, __b, __c); }
-
-#define vqdmlslh_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32_t __a = (a); int16_t __b = (b); int16x4_t __c = (c); \
-  (int32_t)__builtin_neon_vqdmlslh_lane_s16(__a, __b, __c, __d); })
-#define vqdmlsls_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64_t __a = (a); int32_t __b = (b); int32x2_t __c = (c); \
-  (int64_t)__builtin_neon_vqdmlsls_lane_s32(__a, __b, __c, __d); })
-
-#define vqdmlslh_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32_t __a = (a); int16_t __b = (b); int16x8_t __c = (c); \
-  (int32_t)__builtin_neon_vqdmlslh_laneq_s16(__a, __b, __c, __d); })
-#define vqdmlsls_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64_t __a = (a); int32_t __b = (b); int32x4_t __c = (c); \
-  (int64_t)__builtin_neon_vqdmlsls_laneq_s32(__a, __b, __c, __d); })
-
-__ai int16_t vqdmulhh_s16(int16_t __a, int16_t __b) {
-  return (int16_t)__builtin_neon_vqdmulhh_s16(__a, __b); }
-__ai int32_t vqdmulhs_s32(int32_t __a, int32_t __b) {
-  return (int32_t)__builtin_neon_vqdmulhs_s32(__a, __b); }
-
-#define vqdmulhh_lane_s16(a, b, __c) __extension__ ({ \
-  int16_t __a = (a); int16x4_t __b = (b); \
-  vqdmulhh_s16(__a, vget_lane_s16(__b, __c)); })
-#define vqdmulhs_lane_s32(a, b, __c) __extension__ ({ \
-  int32_t __a = (a); int32x2_t __b = (b); \
-  vqdmulhs_s32(__a, vget_lane_s32(__b, __c)); })
-
-#define vqdmulhh_laneq_s16(a, b, __c) __extension__ ({ \
-  int16_t __a = (a); int16x8_t __b = (b); \
-  vqdmulhh_s16(__a, vgetq_lane_s16(__b, __c)); })
-#define vqdmulhs_laneq_s32(a, b, __c) __extension__ ({ \
-  int32_t __a = (a); int32x4_t __b = (b); \
-  vqdmulhs_s32(__a, vgetq_lane_s32(__b, __c)); })
-
-__ai int32_t vqdmullh_s16(int16_t __a, int16_t __b) {
-  return (int32_t)__builtin_neon_vqdmullh_s16(__a, __b); }
-__ai int64_t vqdmulls_s32(int32_t __a, int32_t __b) {
-  return (int64_t)__builtin_neon_vqdmulls_s32(__a, __b); }
-
-#define vqdmullh_lane_s16(a, b, __c) __extension__ ({ \
-  int16_t __a = (a); int16x4_t __b = (b); \
-  vqdmullh_s16(__a, vget_lane_s16(b, __c)); })
-#define vqdmulls_lane_s32(a, b, __c) __extension__ ({ \
-  int32_t __a = (a); int32x2_t __b = (b); \
-  vqdmulls_s32(__a, vget_lane_s32(b, __c)); })
-
-#define vqdmullh_laneq_s16(a, b, __c) __extension__ ({ \
-  int16_t __a = (a); int16x8_t __b = (b); \
-  vqdmullh_s16(__a, vgetq_lane_s16(b, __c)); })
-#define vqdmulls_laneq_s32(a, b, __c) __extension__ ({ \
-  int32_t __a = (a); int32x4_t __b = (b); \
-  vqdmulls_s32(__a, vgetq_lane_s32(b, __c)); })
-
-__ai int8_t vqnegb_s8(int8_t __a) {
-  return (int8_t)__builtin_neon_vqnegb_s8(__a); }
-__ai int16_t vqnegh_s16(int16_t __a) {
-  return (int16_t)__builtin_neon_vqnegh_s16(__a); }
-__ai int32_t vqnegs_s32(int32_t __a) {
-  return (int32_t)__builtin_neon_vqnegs_s32(__a); }
-__ai int64_t vqnegd_s64(int64_t __a) {
-  return (int64_t)__builtin_neon_vqnegd_s64(__a); }
-
-__ai int16_t vqrdmulhh_s16(int16_t __a, int16_t __b) {
-  return (int16_t)__builtin_neon_vqrdmulhh_s16(__a, __b); }
-__ai int32_t vqrdmulhs_s32(int32_t __a, int32_t __b) {
-  return (int32_t)__builtin_neon_vqrdmulhs_s32(__a, __b); }
-
-#define vqrdmulhh_lane_s16(a, b, __c) __extension__ ({ \
-  int16_t __a = (a); int16x4_t __b = (b); \
-  vqrdmulhh_s16(__a, vget_lane_s16(__b, __c)); })
-#define vqrdmulhs_lane_s32(a, b, __c) __extension__ ({ \
-  int32_t __a = (a); int32x2_t __b = (b); \
-  vqrdmulhs_s32(__a, vget_lane_s32(__b, __c)); })
-
-#define vqrdmulhh_laneq_s16(a, b, __c) __extension__ ({ \
-  int16_t __a = (a); int16x8_t __b = (b); \
-  vqrdmulhh_s16(__a, vgetq_lane_s16(__b, __c)); })
-#define vqrdmulhs_laneq_s32(a, b, __c) __extension__ ({ \
-  int32_t __a = (a); int32x4_t __b = (b); \
-  vqrdmulhs_s32(__a, vgetq_lane_s32(__b, __c)); })
-
-#define vqrshrnh_n_s16(a, __b) __extension__ ({ \
-  int16_t __a = (a); \
-  (int8_t)__builtin_neon_vqrshrnh_n_s16(__a, __b); })
-#define vqrshrns_n_s32(a, __b) __extension__ ({ \
-  int32_t __a = (a); \
-  (int16_t)__builtin_neon_vqrshrns_n_s32(__a, __b); })
-#define vqrshrnd_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int32_t)__builtin_neon_vqrshrnd_n_s64(__a, __b); })
-#define vqrshrnh_n_u16(a, __b) __extension__ ({ \
-  uint16_t __a = (a); \
-  (uint8_t)__builtin_neon_vqrshrnh_n_u16(__a, __b); })
-#define vqrshrns_n_u32(a, __b) __extension__ ({ \
-  uint32_t __a = (a); \
-  (uint16_t)__builtin_neon_vqrshrns_n_u32(__a, __b); })
-#define vqrshrnd_n_u64(a, __b) __extension__ ({ \
-  uint64_t __a = (a); \
-  (uint32_t)__builtin_neon_vqrshrnd_n_u64(__a, __b); })
-
-#define vqrshrunh_n_s16(a, __b) __extension__ ({ \
-  int16_t __a = (a); \
-  (int8_t)__builtin_neon_vqrshrunh_n_s16(__a, __b); })
-#define vqrshruns_n_s32(a, __b) __extension__ ({ \
-  int32_t __a = (a); \
-  (int16_t)__builtin_neon_vqrshruns_n_s32(__a, __b); })
-#define vqrshrund_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int32_t)__builtin_neon_vqrshrund_n_s64(__a, __b); })
-
-#define vqshlub_n_s8(a, __b) __extension__ ({ \
-  int8_t __a = (a); \
-  (int8_t)__builtin_neon_vqshlub_n_s8(__a, __b); })
-#define vqshluh_n_s16(a, __b) __extension__ ({ \
-  int16_t __a = (a); \
-  (int16_t)__builtin_neon_vqshluh_n_s16(__a, __b); })
-#define vqshlus_n_s32(a, __b) __extension__ ({ \
-  int32_t __a = (a); \
-  (int32_t)__builtin_neon_vqshlus_n_s32(__a, __b); })
-#define vqshlud_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int64_t)__builtin_neon_vqshlud_n_s64(__a, __b); })
-
-#define vqshlb_n_s8(a, __b) __extension__ ({ \
-  int8_t __a = (a); \
-  (int8_t)__builtin_neon_vqshlb_n_s8(__a, __b); })
-#define vqshlh_n_s16(a, __b) __extension__ ({ \
-  int16_t __a = (a); \
-  (int16_t)__builtin_neon_vqshlh_n_s16(__a, __b); })
-#define vqshls_n_s32(a, __b) __extension__ ({ \
-  int32_t __a = (a); \
-  (int32_t)__builtin_neon_vqshls_n_s32(__a, __b); })
-#define vqshld_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int64_t)__builtin_neon_vqshld_n_s64(__a, __b); })
-#define vqshlb_n_u8(a, __b) __extension__ ({ \
-  uint8_t __a = (a); \
-  (uint8_t)__builtin_neon_vqshlb_n_u8(__a, __b); })
-#define vqshlh_n_u16(a, __b) __extension__ ({ \
-  uint16_t __a = (a); \
-  (uint16_t)__builtin_neon_vqshlh_n_u16(__a, __b); })
-#define vqshls_n_u32(a, __b) __extension__ ({ \
-  uint32_t __a = (a); \
-  (uint32_t)__builtin_neon_vqshls_n_u32(__a, __b); })
-#define vqshld_n_u64(a, __b) __extension__ ({ \
-  uint64_t __a = (a); \
-  (uint64_t)__builtin_neon_vqshld_n_u64(__a, __b); })
-
-#define vqshrnh_n_s16(a, __b) __extension__ ({ \
-  int16_t __a = (a); \
-  (int8_t)__builtin_neon_vqshrnh_n_s16(__a, __b); })
-#define vqshrns_n_s32(a, __b) __extension__ ({ \
-  int32_t __a = (a); \
-  (int16_t)__builtin_neon_vqshrns_n_s32(__a, __b); })
-#define vqshrnd_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int32_t)__builtin_neon_vqshrnd_n_s64(__a, __b); })
-#define vqshrnh_n_u16(a, __b) __extension__ ({ \
-  uint16_t __a = (a); \
-  (uint8_t)__builtin_neon_vqshrnh_n_u16(__a, __b); })
-#define vqshrns_n_u32(a, __b) __extension__ ({ \
-  uint32_t __a = (a); \
-  (uint16_t)__builtin_neon_vqshrns_n_u32(__a, __b); })
-#define vqshrnd_n_u64(a, __b) __extension__ ({ \
-  uint64_t __a = (a); \
-  (uint32_t)__builtin_neon_vqshrnd_n_u64(__a, __b); })
-
-#define vqshrunh_n_s16(a, __b) __extension__ ({ \
-  int16_t __a = (a); \
-  (int8_t)__builtin_neon_vqshrunh_n_s16(__a, __b); })
-#define vqshruns_n_s32(a, __b) __extension__ ({ \
-  int32_t __a = (a); \
-  (int16_t)__builtin_neon_vqshruns_n_s32(__a, __b); })
-#define vqshrund_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int32_t)__builtin_neon_vqshrund_n_s64(__a, __b); })
-
-__ai int8_t vqmovnh_s16(int16_t __a) {
-  return (int8_t)__builtin_neon_vqmovnh_s16(__a); }
-__ai int16_t vqmovns_s32(int32_t __a) {
-  return (int16_t)__builtin_neon_vqmovns_s32(__a); }
-__ai int32_t vqmovnd_s64(int64_t __a) {
-  return (int32_t)__builtin_neon_vqmovnd_s64(__a); }
-
-__ai int8_t vqmovunh_s16(int16_t __a) {
-  return (int8_t)__builtin_neon_vqmovunh_s16(__a); }
-__ai int16_t vqmovuns_s32(int32_t __a) {
-  return (int16_t)__builtin_neon_vqmovuns_s32(__a); }
-__ai int32_t vqmovund_s64(int64_t __a) {
-  return (int32_t)__builtin_neon_vqmovund_s64(__a); }
-
-#define vsrid_n_s64(a, b, __c) __extension__ ({ \
-  int64_t __a = (a); int64_t __b = (b); \
-  (int64_t)__builtin_neon_vsrid_n_s64(__a, __b, __c); })
-#define vsrid_n_u64(a, b, __c) __extension__ ({ \
-  uint64_t __a = (a); uint64_t __b = (b); \
-  (uint64_t)__builtin_neon_vsrid_n_u64(__a, __b, __c); })
-
-#define vrshrd_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int64_t)__builtin_neon_vrshrd_n_s64(__a, __b); })
-#define vrshrd_n_u64(a, __b) __extension__ ({ \
-  uint64_t __a = (a); \
-  (uint64_t)__builtin_neon_vrshrd_n_u64(__a, __b); })
-
-#define vrsrad_n_s64(a, b, __c) __extension__ ({ \
-  int64_t __a = (a); int64_t __b = (b); \
-  (int64_t)__builtin_neon_vrsrad_n_s64(__a, __b, __c); })
-#define vrsrad_n_u64(a, b, __c) __extension__ ({ \
-  uint64_t __a = (a); uint64_t __b = (b); \
-  (uint64_t)__builtin_neon_vrsrad_n_u64(__a, __b, __c); })
-
-#define vshrd_n_s64(a, __b) __extension__ ({ \
-  int64_t __a = (a); \
-  (int64_t)__builtin_neon_vshrd_n_s64(__a, __b); })
-#define vshrd_n_u64(a, __b) __extension__ ({ \
-  uint64_t __a = (a); \
-  (uint64_t)__builtin_neon_vshrd_n_u64(__a, __b); })
-
-#define vsrad_n_s64(a, b, __c) __extension__ ({ \
-  int64_t __a = (a); int64_t __b = (b); \
-  (int64_t)__builtin_neon_vsrad_n_s64(__a, __b, __c); })
-#define vsrad_n_u64(a, b, __c) __extension__ ({ \
-  uint64_t __a = (a); uint64_t __b = (b); \
-  (uint64_t)__builtin_neon_vsrad_n_u64(__a, __b, __c); })
-
-__ai int64_t vsubd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vsubd_s64(__a, __b); }
-__ai uint64_t vsubd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vsubd_u64(__a, __b); }
-
-__ai int8_t vuqaddb_s8(int8_t __a, int8_t __b) {
-  return (int8_t)__builtin_neon_vuqaddb_s8(__a, __b); }
-__ai int16_t vuqaddh_s16(int16_t __a, int16_t __b) {
-  return (int16_t)__builtin_neon_vuqaddh_s16(__a, __b); }
-__ai int32_t vuqadds_s32(int32_t __a, int32_t __b) {
-  return (int32_t)__builtin_neon_vuqadds_s32(__a, __b); }
-__ai int64_t vuqaddd_s64(int64_t __a, int64_t __b) {
-  return (int64_t)__builtin_neon_vuqaddd_s64(__a, __b); }
-
-__ai float64_t vcvtd_f64_u64(uint64_t __a) {
-  return (float64_t)__builtin_neon_vcvtd_f64_u64(__a); }
-
-__ai float32_t vcvts_f32_u32(uint32_t __a) {
-  return (float32_t)__builtin_neon_vcvts_f32_u32(__a); }
-
-__ai uint8_t vqmovnh_u16(uint16_t __a) {
-  return (uint8_t)__builtin_neon_vqmovnh_u16(__a); }
-__ai uint16_t vqmovns_u32(uint32_t __a) {
-  return (uint16_t)__builtin_neon_vqmovns_u32(__a); }
-__ai uint32_t vqmovnd_u64(uint64_t __a) {
-  return (uint32_t)__builtin_neon_vqmovnd_u64(__a); }
-
-__ai uint8_t vsqaddb_u8(uint8_t __a, uint8_t __b) {
-  return (uint8_t)__builtin_neon_vsqaddb_u8(__a, __b); }
-__ai uint16_t vsqaddh_u16(uint16_t __a, uint16_t __b) {
-  return (uint16_t)__builtin_neon_vsqaddh_u16(__a, __b); }
-__ai uint32_t vsqadds_u32(uint32_t __a, uint32_t __b) {
-  return (uint32_t)__builtin_neon_vsqadds_u32(__a, __b); }
-__ai uint64_t vsqaddd_u64(uint64_t __a, uint64_t __b) {
-  return (uint64_t)__builtin_neon_vsqaddd_u64(__a, __b); }
-
-#define vdupb_lane_s8(a, __b) __extension__ ({ \
-  int8x8_t __a = (a); \
-  (int8_t)__builtin_neon_vdupb_lane_i8(__a, __b); })
-#define vduph_lane_s16(a, __b) __extension__ ({ \
-  int16x4_t __a = (a); \
-  (int16_t)__builtin_neon_vduph_lane_i16(__a, __b); })
-#define vdups_lane_s32(a, __b) __extension__ ({ \
-  int32x2_t __a = (a); \
-  (int32_t)__builtin_neon_vdups_lane_i32(__a, __b); })
-#define vdupd_lane_s64(a, __b) __extension__ ({ \
-  int64x1_t __a = (a); \
-  (int64_t)__builtin_neon_vdupd_lane_i64(__a, __b); })
-#define vdups_lane_f32(a, __b) __extension__ ({ \
-  float32x2_t __a = (a); \
-  (float32_t)__builtin_neon_vdups_lane_f32(__a, __b); })
-#define vdupd_lane_f64(a, __b) __extension__ ({ \
-  float64x1_t __a = (a); \
-  (float64_t)__builtin_neon_vdupd_lane_f64(__a, __b); })
-#define vdupb_lane_u8(a, __b) __extension__ ({ \
-  uint8x8_t __a = (a); \
-  (uint8_t)__builtin_neon_vdupb_lane_i8((int8x8_t)__a, __b); })
-#define vduph_lane_u16(a, __b) __extension__ ({ \
-  uint16x4_t __a = (a); \
-  (uint16_t)__builtin_neon_vduph_lane_i16((int16x4_t)__a, __b); })
-#define vdups_lane_u32(a, __b) __extension__ ({ \
-  uint32x2_t __a = (a); \
-  (uint32_t)__builtin_neon_vdups_lane_i32((int32x2_t)__a, __b); })
-#define vdupd_lane_u64(a, __b) __extension__ ({ \
-  uint64x1_t __a = (a); \
-  (uint64_t)__builtin_neon_vdupd_lane_i64((int64x1_t)__a, __b); })
-#define vdupb_lane_p8(a, __b) __extension__ ({ \
-  poly8x8_t __a = (a); \
-  (poly8_t)__builtin_neon_vdupb_lane_i8((int8x8_t)__a, __b); })
-#define vduph_lane_p16(a, __b) __extension__ ({ \
-  poly16x4_t __a = (a); \
-  (poly16_t)__builtin_neon_vduph_lane_i16((int16x4_t)__a, __b); })
-
-#define vdupb_laneq_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  (int8_t)__builtin_neon_vdupb_laneq_i8(__a, __b); })
-#define vduph_laneq_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  (int16_t)__builtin_neon_vduph_laneq_i16(__a, __b); })
-#define vdups_laneq_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  (int32_t)__builtin_neon_vdups_laneq_i32(__a, __b); })
-#define vdupd_laneq_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  (int64_t)__builtin_neon_vdupd_laneq_i64(__a, __b); })
-#define vdups_laneq_f32(a, __b) __extension__ ({ \
-  float32x4_t __a = (a); \
-  (float32_t)__builtin_neon_vdups_laneq_f32(__a, __b); })
-#define vdupd_laneq_f64(a, __b) __extension__ ({ \
-  float64x2_t __a = (a); \
-  (float64_t)__builtin_neon_vdupd_laneq_f64(__a, __b); })
-#define vdupb_laneq_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  (uint8_t)__builtin_neon_vdupb_laneq_i8((int8x16_t)__a, __b); })
-#define vduph_laneq_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  (uint16_t)__builtin_neon_vduph_laneq_i16((int16x8_t)__a, __b); })
-#define vdups_laneq_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  (uint32_t)__builtin_neon_vdups_laneq_i32((int32x4_t)__a, __b); })
-#define vdupd_laneq_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  (uint64_t)__builtin_neon_vdupd_laneq_i64((int64x2_t)__a, __b); })
-#define vdupb_laneq_p8(a, __b) __extension__ ({ \
-  poly8x16_t __a = (a); \
-  (poly8_t)__builtin_neon_vdupb_laneq_i8((int8x16_t)__a, __b); })
-#define vduph_laneq_p16(a, __b) __extension__ ({ \
-  poly16x8_t __a = (a); \
-  (poly16_t)__builtin_neon_vduph_laneq_i16((int16x8_t)__a, __b); })
-
-#define vmulx_lane_f64(a, b, __c) __extension__ ({ \
-  float64x1_t __a = (a); float64x1_t __b = (b); \
-  float64_t __d1 = vget_lane_f64(__a, 0);\
-  float64_t __e1 = vget_lane_f64(__b, __c);\
-  float64_t __f1 = vmulxd_f64(__d1, __e1);\
-  float64x1_t __g1;\
-  vset_lane_f64(__f1, __g1, __c); })
-
-#define vmulx_laneq_f64(a, b, __c) __extension__ ({ \
-  float64x1_t __a = (a); float64x2_t __b = (b); \
-  float64_t __d1 = vget_lane_f64(__a, 0);\
-  float64_t __e1 = vgetq_lane_f64(__b, __c);\
-  float64_t __f1 = vmulxd_f64(__d1, __e1);\
-  float64x1_t __g1;\
-  vset_lane_f64(__f1, __g1, 0); })
-
-#define vmul_lane_f64(a, b, __c) __extension__ ({ \
-  float64x1_t __a = (a); float64x1_t __b = (b); \
-  (float64x1_t)__builtin_neon_vmul_lane_v((int8x8_t)__a, (int8x8_t)__b, __c, 9); })
-
-#define vmul_laneq_f64(a, b, __c) __extension__ ({ \
-  float64x1_t __a = (a); float64x2_t __b = (b); \
-  (float64x1_t)__builtin_neon_vmul_laneq_v((int8x8_t)__a, (int8x16_t)__b, __c, 9); })
-
-__ai float64x1_t vmul_n_f64(float64x1_t __a, float64_t __b) {
-  return (float64x1_t)__builtin_neon_vmul_n_f64(__a, __b); }
-
-#define vset_lane_f64(a, b, __c) __extension__ ({ \
-  float64_t __a = (a); float64x1_t __b = (b); \
-  (float64x1_t)__builtin_neon_vset_lane_f64(__a, __b, __c); })
-#define vsetq_lane_f64(a, b, __c) __extension__ ({ \
-  float64_t __a = (a); float64x2_t __b = (b); \
-  (float64x2_t)__builtin_neon_vsetq_lane_f64(__a, __b, __c); })
-#define vset_lane_p64(a, b, __c) __extension__ ({ \
-  poly64_t __a = (a); poly64x1_t __b = (b); \
-  (poly64x1_t)__builtin_neon_vset_lane_i64(__a, (int64x1_t)__b, __c); })
-#define vsetq_lane_p64(a, b, __c) __extension__ ({ \
-  poly64_t __a = (a); poly64x2_t __b = (b); \
-  (poly64x2_t)__builtin_neon_vsetq_lane_i64(__a, (int64x2_t)__b, __c); })
-
-#define vshll_high_n_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  int8x8_t __a1 = vget_high_s8(__a); \
-  (int16x8_t)vshll_n_s8(__a1, __b); })
-#define vshll_high_n_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  int16x4_t __a1 = vget_high_s16(__a); \
-  (int32x4_t)vshll_n_s16(__a1, __b); })
-#define vshll_high_n_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  int32x2_t __a1 = vget_high_s32(__a); \
-  (int64x2_t)vshll_n_s32(__a1, __b); })
-#define vshll_high_n_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  uint8x8_t __a1 = vget_high_u8(__a); \
-  (uint16x8_t)vshll_n_u8(__a1, __b); })
-#define vshll_high_n_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  uint16x4_t __a1 = vget_high_u16(__a); \
-  (uint32x4_t)vshll_n_u16(__a1, __b); })
-#define vshll_high_n_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  uint32x2_t __a1 = vget_high_u32(__a); \
-  (uint64x2_t)vshll_n_u32(__a1, __b); })
-
-#define vshrn_high_n_s16(a, b, __c) __extension__ ({ \
-  int8x8_t __a = (a); int16x8_t __b = (b); \
-  (int8x16_t)vcombine_s16(__a, vshrn_n_s16(__b, __c)); })
-#define vshrn_high_n_s32(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int32x4_t __b = (b); \
-  (int16x8_t)vcombine_s32(__a, vshrn_n_s32(__b, __c)); })
-#define vshrn_high_n_s64(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int64x2_t __b = (b); \
-  (int32x4_t)vcombine_s64(__a, vshrn_n_s64(__b, __c)); })
-#define vshrn_high_n_u16(a, b, __c) __extension__ ({ \
-  uint8x8_t __a = (a); uint16x8_t __b = (b); \
-  (uint8x16_t)vcombine_u16(__a, vshrn_n_u16(__b, __c)); })
-#define vshrn_high_n_u32(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint32x4_t __b = (b); \
-  (uint16x8_t)vcombine_u32(__a, vshrn_n_u32(__b, __c)); })
-#define vshrn_high_n_u64(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint64x2_t __b = (b); \
-  (uint32x4_t)vcombine_u64(__a, vshrn_n_u64(__b, __c)); })
-
-#define vsli_n_p64(a, b, __c) __extension__ ({ \
-  poly64x1_t __a = (a); poly64x1_t __b = (b); \
-  (poly64x1_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 6); })
-#define vsliq_n_p64(a, b, __c) __extension__ ({ \
-  poly64x2_t __a = (a); poly64x2_t __b = (b); \
-  (poly64x2_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 38); })
-
-__ai int8x16_t vqmovun_high_s16(int8x8_t __a, int16x8_t __b) {
-  int8x8_t __a1 = vqmovun_s16(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
-__ai int16x8_t vqmovun_high_s32(int16x4_t __a, int32x4_t __b) {
-  int16x4_t __a1 = vqmovun_s32(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
-__ai int32x4_t vqmovun_high_s64(int32x2_t __a, int64x2_t __b) {
-  int32x2_t __a1 = vqmovun_s64(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
-
-#define vsri_n_p64(a, b, __c) __extension__ ({ \
-  poly64x1_t __a = (a); poly64x1_t __b = (b); \
-  (poly64x1_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 6); })
-#define vsriq_n_p64(a, b, __c) __extension__ ({ \
-  poly64x2_t __a = (a); poly64x2_t __b = (b); \
-  (poly64x2_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 38); })
-
-#define vst1q_f64(__a, b) __extension__ ({ \
-  float64x2_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 41); })
-#define vst1_f64(__a, b) __extension__ ({ \
-  float64x1_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 9); })
-#define vst1_p64(__a, b) __extension__ ({ \
-  poly64x1_t __b = (b); \
-  __builtin_neon_vst1_v(__a, (int8x8_t)__b, 6); })
-#define vst1q_p64(__a, b) __extension__ ({ \
-  poly64x2_t __b = (b); \
-  __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 38); })
-
-#define vst1q_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x2_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 41); })
-#define vst1q_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x2_t __b = (b); \
-  __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 38); })
-#define vst1_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x1_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 9); })
-#define vst1_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x1_t __b = (b); \
-  __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 6); })
-
-#define vst1q_u8_x2(__a, b) __extension__ ({ \
-  uint8x16x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 48); })
-#define vst1q_u16_x2(__a, b) __extension__ ({ \
-  uint16x8x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 49); })
-#define vst1q_u32_x2(__a, b) __extension__ ({ \
-  uint32x4x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 50); })
-#define vst1q_u64_x2(__a, b) __extension__ ({ \
-  uint64x2x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 51); })
-#define vst1q_s8_x2(__a, b) __extension__ ({ \
-  int8x16x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, __b.val[0], __b.val[1], 32); })
-#define vst1q_s16_x2(__a, b) __extension__ ({ \
-  int16x8x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 33); })
-#define vst1q_s32_x2(__a, b) __extension__ ({ \
-  int32x4x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 34); })
-#define vst1q_s64_x2(__a, b) __extension__ ({ \
-  int64x2x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 35); })
-#define vst1q_f16_x2(__a, b) __extension__ ({ \
-  float16x8x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 39); })
-#define vst1q_f32_x2(__a, b) __extension__ ({ \
-  float32x4x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 40); })
-#define vst1q_f64_x2(__a, b) __extension__ ({ \
-  float64x2x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 41); })
-#define vst1q_p8_x2(__a, b) __extension__ ({ \
-  poly8x16x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 36); })
-#define vst1q_p16_x2(__a, b) __extension__ ({ \
-  poly16x8x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 37); })
-#define vst1q_p64_x2(__a, b) __extension__ ({ \
-  poly64x2x2_t __b = (b); \
-  __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 38); })
-#define vst1_u8_x2(__a, b) __extension__ ({ \
-  uint8x8x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 16); })
-#define vst1_u16_x2(__a, b) __extension__ ({ \
-  uint16x4x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 17); })
-#define vst1_u32_x2(__a, b) __extension__ ({ \
-  uint32x2x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 18); })
-#define vst1_u64_x2(__a, b) __extension__ ({ \
-  uint64x1x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 19); })
-#define vst1_s8_x2(__a, b) __extension__ ({ \
-  int8x8x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, __b.val[0], __b.val[1], 0); })
-#define vst1_s16_x2(__a, b) __extension__ ({ \
-  int16x4x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 1); })
-#define vst1_s32_x2(__a, b) __extension__ ({ \
-  int32x2x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 2); })
-#define vst1_s64_x2(__a, b) __extension__ ({ \
-  int64x1x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 3); })
-#define vst1_f16_x2(__a, b) __extension__ ({ \
-  float16x4x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 7); })
-#define vst1_f32_x2(__a, b) __extension__ ({ \
-  float32x2x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 8); })
-#define vst1_f64_x2(__a, b) __extension__ ({ \
-  float64x1x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 9); })
-#define vst1_p8_x2(__a, b) __extension__ ({ \
-  poly8x8x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 4); })
-#define vst1_p16_x2(__a, b) __extension__ ({ \
-  poly16x4x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 5); })
-#define vst1_p64_x2(__a, b) __extension__ ({ \
-  poly64x1x2_t __b = (b); \
-  __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 6); })
-
-#define vst1q_u8_x3(__a, b) __extension__ ({ \
-  uint8x16x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 48); })
-#define vst1q_u16_x3(__a, b) __extension__ ({ \
-  uint16x8x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 49); })
-#define vst1q_u32_x3(__a, b) __extension__ ({ \
-  uint32x4x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 50); })
-#define vst1q_u64_x3(__a, b) __extension__ ({ \
-  uint64x2x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 51); })
-#define vst1q_s8_x3(__a, b) __extension__ ({ \
-  int8x16x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, __b.val[0], __b.val[1], __b.val[2], 32); })
-#define vst1q_s16_x3(__a, b) __extension__ ({ \
-  int16x8x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 33); })
-#define vst1q_s32_x3(__a, b) __extension__ ({ \
-  int32x4x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 34); })
-#define vst1q_s64_x3(__a, b) __extension__ ({ \
-  int64x2x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 35); })
-#define vst1q_f16_x3(__a, b) __extension__ ({ \
-  float16x8x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 39); })
-#define vst1q_f32_x3(__a, b) __extension__ ({ \
-  float32x4x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 40); })
-#define vst1q_f64_x3(__a, b) __extension__ ({ \
-  float64x2x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 41); })
-#define vst1q_p8_x3(__a, b) __extension__ ({ \
-  poly8x16x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 36); })
-#define vst1q_p16_x3(__a, b) __extension__ ({ \
-  poly16x8x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 37); })
-#define vst1q_p64_x3(__a, b) __extension__ ({ \
-  poly64x2x3_t __b = (b); \
-  __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 38); })
-#define vst1_u8_x3(__a, b) __extension__ ({ \
-  uint8x8x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 16); })
-#define vst1_u16_x3(__a, b) __extension__ ({ \
-  uint16x4x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 17); })
-#define vst1_u32_x3(__a, b) __extension__ ({ \
-  uint32x2x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 18); })
-#define vst1_u64_x3(__a, b) __extension__ ({ \
-  uint64x1x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 19); })
-#define vst1_s8_x3(__a, b) __extension__ ({ \
-  int8x8x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, __b.val[0], __b.val[1], __b.val[2], 0); })
-#define vst1_s16_x3(__a, b) __extension__ ({ \
-  int16x4x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 1); })
-#define vst1_s32_x3(__a, b) __extension__ ({ \
-  int32x2x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 2); })
-#define vst1_s64_x3(__a, b) __extension__ ({ \
-  int64x1x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 3); })
-#define vst1_f16_x3(__a, b) __extension__ ({ \
-  float16x4x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 7); })
-#define vst1_f32_x3(__a, b) __extension__ ({ \
-  float32x2x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 8); })
-#define vst1_f64_x3(__a, b) __extension__ ({ \
-  float64x1x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 9); })
-#define vst1_p8_x3(__a, b) __extension__ ({ \
-  poly8x8x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 4); })
-#define vst1_p16_x3(__a, b) __extension__ ({ \
-  poly16x4x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 5); })
-#define vst1_p64_x3(__a, b) __extension__ ({ \
-  poly64x1x3_t __b = (b); \
-  __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 6); })
-
-#define vst1q_u8_x4(__a, b) __extension__ ({ \
-  uint8x16x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 48); })
-#define vst1q_u16_x4(__a, b) __extension__ ({ \
-  uint16x8x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 49); })
-#define vst1q_u32_x4(__a, b) __extension__ ({ \
-  uint32x4x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 50); })
-#define vst1q_u64_x4(__a, b) __extension__ ({ \
-  uint64x2x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 51); })
-#define vst1q_s8_x4(__a, b) __extension__ ({ \
-  int8x16x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], 32); })
-#define vst1q_s16_x4(__a, b) __extension__ ({ \
-  int16x8x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 33); })
-#define vst1q_s32_x4(__a, b) __extension__ ({ \
-  int32x4x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 34); })
-#define vst1q_s64_x4(__a, b) __extension__ ({ \
-  int64x2x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 35); })
-#define vst1q_f16_x4(__a, b) __extension__ ({ \
-  float16x8x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 39); })
-#define vst1q_f32_x4(__a, b) __extension__ ({ \
-  float32x4x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 40); })
-#define vst1q_f64_x4(__a, b) __extension__ ({ \
-  float64x2x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 41); })
-#define vst1q_p8_x4(__a, b) __extension__ ({ \
-  poly8x16x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 36); })
-#define vst1q_p16_x4(__a, b) __extension__ ({ \
-  poly16x8x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 37); })
-#define vst1q_p64_x4(__a, b) __extension__ ({ \
-  poly64x2x4_t __b = (b); \
-  __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 38); })
-#define vst1_u8_x4(__a, b) __extension__ ({ \
-  uint8x8x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 16); })
-#define vst1_u16_x4(__a, b) __extension__ ({ \
-  uint16x4x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 17); })
-#define vst1_u32_x4(__a, b) __extension__ ({ \
-  uint32x2x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 18); })
-#define vst1_u64_x4(__a, b) __extension__ ({ \
-  uint64x1x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 19); })
-#define vst1_s8_x4(__a, b) __extension__ ({ \
-  int8x8x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], 0); })
-#define vst1_s16_x4(__a, b) __extension__ ({ \
-  int16x4x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 1); })
-#define vst1_s32_x4(__a, b) __extension__ ({ \
-  int32x2x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 2); })
-#define vst1_s64_x4(__a, b) __extension__ ({ \
-  int64x1x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 3); })
-#define vst1_f16_x4(__a, b) __extension__ ({ \
-  float16x4x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 7); })
-#define vst1_f32_x4(__a, b) __extension__ ({ \
-  float32x2x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 8); })
-#define vst1_f64_x4(__a, b) __extension__ ({ \
-  float64x1x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 9); })
-#define vst1_p8_x4(__a, b) __extension__ ({ \
-  poly8x8x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 4); })
-#define vst1_p16_x4(__a, b) __extension__ ({ \
-  poly16x4x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 5); })
-#define vst1_p64_x4(__a, b) __extension__ ({ \
-  poly64x1x4_t __b = (b); \
-  __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 6); })
-
-#define vst2q_u64(__a, b) __extension__ ({ \
-  uint64x2x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 51); })
-#define vst2q_s64(__a, b) __extension__ ({ \
-  int64x2x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 35); })
-#define vst2q_f64(__a, b) __extension__ ({ \
-  float64x2x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 41); })
-#define vst2_f64(__a, b) __extension__ ({ \
-  float64x1x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 9); })
-#define vst2_p64(__a, b) __extension__ ({ \
-  poly64x1x2_t __b = (b); \
-  __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 6); })
-#define vst2q_p64(__a, b) __extension__ ({ \
-  poly64x2x2_t __b = (b); \
-  __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 38); })
-
-#define vst2q_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x16x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 48); })
-#define vst2q_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x2x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 51); })
-#define vst2q_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x16x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, __b.val[0], __b.val[1], __c, 32); })
-#define vst2q_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x2x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 35); })
-#define vst2q_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x2x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 41); })
-#define vst2q_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x16x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 36); })
-#define vst2q_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x2x2_t __b = (b); \
-  __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 38); })
-#define vst2_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x1x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 19); })
-#define vst2_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x1x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 3); })
-#define vst2_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x1x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 9); })
-#define vst2_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x1x2_t __b = (b); \
-  __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 6); })
-
-#define vst3q_u64(__a, b) __extension__ ({ \
-  uint64x2x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 51); })
-#define vst3q_s64(__a, b) __extension__ ({ \
-  int64x2x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 35); })
-#define vst3q_f64(__a, b) __extension__ ({ \
-  float64x2x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 41); })
-#define vst3_f64(__a, b) __extension__ ({ \
-  float64x1x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 9); })
-#define vst3_p64(__a, b) __extension__ ({ \
-  poly64x1x3_t __b = (b); \
-  __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 6); })
-#define vst3q_p64(__a, b) __extension__ ({ \
-  poly64x2x3_t __b = (b); \
-  __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 38); })
-
-#define vst3q_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x16x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 48); })
-#define vst3q_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x2x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 51); })
-#define vst3q_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x16x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 32); })
-#define vst3q_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x2x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 35); })
-#define vst3q_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x2x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 41); })
-#define vst3q_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x16x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 36); })
-#define vst3q_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x2x3_t __b = (b); \
-  __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 38); })
-#define vst3_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x1x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 19); })
-#define vst3_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x1x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 3); })
-#define vst3_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x1x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 9); })
-#define vst3_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x1x3_t __b = (b); \
-  __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 6); })
-
-#define vst4q_u64(__a, b) __extension__ ({ \
-  uint64x2x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 51); })
-#define vst4q_s64(__a, b) __extension__ ({ \
-  int64x2x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 35); })
-#define vst4q_f64(__a, b) __extension__ ({ \
-  float64x2x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 41); })
-#define vst4_f64(__a, b) __extension__ ({ \
-  float64x1x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 9); })
-#define vst4_p64(__a, b) __extension__ ({ \
-  poly64x1x4_t __b = (b); \
-  __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 6); })
-#define vst4q_p64(__a, b) __extension__ ({ \
-  poly64x2x4_t __b = (b); \
-  __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 38); })
-
-#define vst4q_lane_u8(__a, b, __c) __extension__ ({ \
-  uint8x16x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 48); })
-#define vst4q_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x2x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 51); })
-#define vst4q_lane_s8(__a, b, __c) __extension__ ({ \
-  int8x16x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 32); })
-#define vst4q_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x2x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 35); })
-#define vst4q_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x2x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 41); })
-#define vst4q_lane_p8(__a, b, __c) __extension__ ({ \
-  poly8x16x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 36); })
-#define vst4q_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x2x4_t __b = (b); \
-  __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 38); })
-#define vst4_lane_u64(__a, b, __c) __extension__ ({ \
-  uint64x1x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 19); })
-#define vst4_lane_s64(__a, b, __c) __extension__ ({ \
-  int64x1x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 3); })
-#define vst4_lane_f64(__a, b, __c) __extension__ ({ \
-  float64x1x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 9); })
-#define vst4_lane_p64(__a, b, __c) __extension__ ({ \
-  poly64x1x4_t __b = (b); \
-  __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 6); })
-
-__ai float64x1_t vsub_f64(float64x1_t __a, float64x1_t __b) {
-  return __a - __b; }
-__ai float64x2_t vsubq_f64(float64x2_t __a, float64x2_t __b) {
-  return __a - __b; }
-
-__ai int8x8_t vuqadd_s8(int8x8_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vuqadd_v(__a, __b, 0); }
-__ai int16x4_t vuqadd_s16(int16x4_t __a, int16x4_t __b) {
-  return (int16x4_t)__builtin_neon_vuqadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
-__ai int32x2_t vuqadd_s32(int32x2_t __a, int32x2_t __b) {
-  return (int32x2_t)__builtin_neon_vuqadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
-__ai int64x1_t vuqadd_s64(int64x1_t __a, int64x1_t __b) {
-  return (int64x1_t)__builtin_neon_vuqadd_v((int8x8_t)__a, (int8x8_t)__b, 3); }
-__ai int8x16_t vuqaddq_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vuqaddq_v(__a, __b, 32); }
-__ai int16x8_t vuqaddq_s16(int16x8_t __a, int16x8_t __b) {
-  return (int16x8_t)__builtin_neon_vuqaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
-__ai int32x4_t vuqaddq_s32(int32x4_t __a, int32x4_t __b) {
-  return (int32x4_t)__builtin_neon_vuqaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
-__ai int64x2_t vuqaddq_s64(int64x2_t __a, int64x2_t __b) {
-  return (int64x2_t)__builtin_neon_vuqaddq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
-
-__ai uint8x8_t vsqadd_u8(uint8x8_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vsqadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
-__ai uint16x4_t vsqadd_u16(uint16x4_t __a, uint16x4_t __b) {
-  return (uint16x4_t)__builtin_neon_vsqadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
-__ai uint32x2_t vsqadd_u32(uint32x2_t __a, uint32x2_t __b) {
-  return (uint32x2_t)__builtin_neon_vsqadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
-__ai uint64x1_t vsqadd_u64(uint64x1_t __a, uint64x1_t __b) {
-  return (uint64x1_t)__builtin_neon_vsqadd_v((int8x8_t)__a, (int8x8_t)__b, 19); }
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vsqaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __a, uint16x8_t __b) {
-  return (uint16x8_t)__builtin_neon_vsqaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vsqaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __a, uint64x2_t __b) {
-  return (uint64x2_t)__builtin_neon_vsqaddq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
-
-__ai int16x8_t vabal_high_s8(int16x8_t __a, int8x16_t __b, int8x16_t __c) {
-  return vabal_s8(__a, vget_high_s8(__b), vget_high_s8(__c)); }
-__ai int32x4_t vabal_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
-  return vabal_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
-__ai int64x2_t vabal_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
-  return vabal_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
-__ai uint16x8_t vabal_high_u8(uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) {
-  return vabal_u8(__a, vget_high_u8(__b), vget_high_u8(__c)); }
-__ai uint32x4_t vabal_high_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return vabal_u16(__a, vget_high_u16(__b), vget_high_u16(__c)); }
-__ai uint64x2_t vabal_high_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return vabal_u32(__a, vget_high_u32(__b), vget_high_u32(__c)); }
-
-
-__ai int8x16_t vaddhn_high_s16(int8x8_t __a, int16x8_t __b, int16x8_t __c) {
-  return vcombine_s8(__a, vaddhn_s16(__b, __c)); }
-__ai int16x8_t vaddhn_high_s32(int16x4_t __a, int32x4_t __b, int32x4_t __c) {
-  return vcombine_s16(__a, vaddhn_s32(__b, __c)); }
-__ai int32x4_t vaddhn_high_s64(int32x2_t __a, int64x2_t __b, int64x2_t __c) {
-  return vcombine_s32(__a, vaddhn_s64(__b, __c)); }
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return vcombine_u8(__a, vaddhn_u16(__b, __c)); }
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return vcombine_u16(__a, vaddhn_u32(__b, __c)); }
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) {
-  return vcombine_u32(__a, vaddhn_u64(__b, __c)); }
-
-__ai int16_t vaddlv_s8(int8x8_t __a) {
-  return (int16_t)__builtin_neon_vaddlv_s8(__a); }
-__ai int32_t vaddlv_s16(int16x4_t __a) {
-  return (int32_t)__builtin_neon_vaddlv_s16(__a); }
-__ai int64_t vaddlv_s32(int32x2_t __a) {
-  return (int64_t)__builtin_neon_vaddlv_s32(__a); }
-__ai uint16_t vaddlv_u8(uint8x8_t __a) {
-  return (uint16_t)__builtin_neon_vaddlv_u8((int8x8_t)__a); }
-__ai uint32_t vaddlv_u16(uint16x4_t __a) {
-  return (uint32_t)__builtin_neon_vaddlv_u16((int16x4_t)__a); }
-__ai uint64_t vaddlv_u32(uint32x2_t __a) {
-  return (uint64_t)__builtin_neon_vaddlv_u32((int32x2_t)__a); }
-__ai int16_t vaddlvq_s8(int8x16_t __a) {
-  return (int16_t)__builtin_neon_vaddlvq_s8(__a); }
-__ai int32_t vaddlvq_s16(int16x8_t __a) {
-  return (int32_t)__builtin_neon_vaddlvq_s16(__a); }
-__ai int64_t vaddlvq_s32(int32x4_t __a) {
-  return (int64_t)__builtin_neon_vaddlvq_s32(__a); }
-__ai uint16_t vaddlvq_u8(uint8x16_t __a) {
-  return (uint16_t)__builtin_neon_vaddlvq_u8((int8x16_t)__a); }
-__ai uint32_t vaddlvq_u16(uint16x8_t __a) {
-  return (uint32_t)__builtin_neon_vaddlvq_u16((int16x8_t)__a); }
-__ai uint64_t vaddlvq_u32(uint32x4_t __a) {
-  return (uint64_t)__builtin_neon_vaddlvq_u32((int32x4_t)__a); }
-
-__ai int16x8_t vaddl_high_s8(int8x16_t __a, int8x16_t __b) {
-  return vmovl_high_s8(__a) + vmovl_high_s8(__b); }
-__ai int32x4_t vaddl_high_s16(int16x8_t __a, int16x8_t __b) {
-  return vmovl_high_s16(__a) + vmovl_high_s16(__b); }
-__ai int64x2_t vaddl_high_s32(int32x4_t __a, int32x4_t __b) {
-  return vmovl_high_s32(__a) + vmovl_high_s32(__b); }
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __a, uint8x16_t __b) {
-  return vmovl_high_u8(__a) + vmovl_high_u8(__b); }
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __a, uint16x8_t __b) {
-  return vmovl_high_u16(__a) + vmovl_high_u16(__b); }
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __a, uint32x4_t __b) {
-  return vmovl_high_u32(__a) + vmovl_high_u32(__b); }
-
-__ai int8_t vaddv_s8(int8x8_t __a) {
-  return (int8_t)__builtin_neon_vaddv_s8(__a); }
-__ai int16_t vaddv_s16(int16x4_t __a) {
-  return (int16_t)__builtin_neon_vaddv_s16(__a); }
-__ai int32_t vaddv_s32(int32x2_t __a) {
-  return (int32_t)__builtin_neon_vaddv_s32(__a); }
-__ai float32_t vaddv_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vaddv_f32(__a); }
-__ai uint8_t vaddv_u8(uint8x8_t __a) {
-  return (uint8_t)__builtin_neon_vaddv_u8((int8x8_t)__a); }
-__ai uint16_t vaddv_u16(uint16x4_t __a) {
-  return (uint16_t)__builtin_neon_vaddv_u16((int16x4_t)__a); }
-__ai uint32_t vaddv_u32(uint32x2_t __a) {
-  return (uint32_t)__builtin_neon_vaddv_u32((int32x2_t)__a); }
-__ai int8_t vaddvq_s8(int8x16_t __a) {
-  return (int8_t)__builtin_neon_vaddvq_s8(__a); }
-__ai int16_t vaddvq_s16(int16x8_t __a) {
-  return (int16_t)__builtin_neon_vaddvq_s16(__a); }
-__ai int32_t vaddvq_s32(int32x4_t __a) {
-  return (int32_t)__builtin_neon_vaddvq_s32(__a); }
-__ai uint8_t vaddvq_u8(uint8x16_t __a) {
-  return (uint8_t)__builtin_neon_vaddvq_u8((int8x16_t)__a); }
-__ai uint16_t vaddvq_u16(uint16x8_t __a) {
-  return (uint16_t)__builtin_neon_vaddvq_u16((int16x8_t)__a); }
-__ai uint32_t vaddvq_u32(uint32x4_t __a) {
-  return (uint32_t)__builtin_neon_vaddvq_u32((int32x4_t)__a); }
-__ai float32_t vaddvq_f32(float32x4_t __a) {
-  return (float32_t)__builtin_neon_vaddvq_f32(__a); }
-__ai float64_t vaddvq_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vaddvq_f64(__a); }
-__ai int64_t vaddvq_s64(int64x2_t __a) {
-  return (int64_t)__builtin_neon_vaddvq_s64(__a); }
-__ai uint64_t vaddvq_u64(uint64x2_t __a) {
-  return (uint64_t)__builtin_neon_vaddvq_u64((int64x2_t)__a); }
-
-__ai int16x8_t vaddw_high_s8(int16x8_t __a, int8x16_t __b) {
-  return __a + vmovl_high_s8(__b); }
-__ai int32x4_t vaddw_high_s16(int32x4_t __a, int16x8_t __b) {
-  return __a + vmovl_high_s16(__b); }
-__ai int64x2_t vaddw_high_s32(int64x2_t __a, int32x4_t __b) {
-  return __a + vmovl_high_s32(__b); }
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __a, uint8x16_t __b) {
-  return __a + vmovl_high_u8(__b); }
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __a, uint16x8_t __b) {
-  return __a + vmovl_high_u16(__b); }
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __a, uint32x4_t __b) {
-  return __a + vmovl_high_u32(__b); }
-
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __a) {
-  return (float32x2_t)__builtin_neon_vcvtx_f32_v((int8x16_t)__a, 41); }
-
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __a, float64x2_t __b) {
-  float32x2_t __a1 = vcvtx_f32_f64(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
-
-__ai float32x2_t vcvt_f32_f64(float64x2_t __a) {
-  return (float32x2_t)__builtin_neon_vcvt_f32_f64((int8x16_t)__a, 41); }
-
-__ai float64x1_t vcvt_f64_s64(int64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vcvt_f64_v((int8x8_t)__a, 3); }
-__ai float64x1_t vcvt_f64_u64(uint64x1_t __a) {
-  return (float64x1_t)__builtin_neon_vcvt_f64_v((int8x8_t)__a, 19); }
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vcvtq_f64_v((int8x16_t)__a, 35); }
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __a) {
-  return (float64x2_t)__builtin_neon_vcvtq_f64_v((int8x16_t)__a, 51); }
-
-__ai float64x2_t vcvt_f64_f32(float32x2_t __a) {
-  return (float64x2_t)__builtin_neon_vcvt_f64_f32((int8x8_t)__a, 41); }
-
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __a, float32x4_t __b) {
-  float16x4_t __a1 = vcvt_f16_f32(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
-
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __a) {
-  float16x4_t __a1 = vget_high_f16(__a);
-  return vcvt_f32_f16(__a1); }
-
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __a, float64x2_t __b) {
-  float32x2_t __a1 = vcvt_f32_f64(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
-
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __a) {
-  float32x2_t __a1 = vget_high_f32(__a);
-  return vcvt_f64_f32(__a1); }
-
-__ai int64x1_t vcvt_s64_f64(float64x1_t __a) {
-  return (int64x1_t)__builtin_neon_vcvt_s64_v((int8x8_t)__a, 3); }
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __a) {
-  return (int64x2_t)__builtin_neon_vcvtq_s64_v((int8x16_t)__a, 35); }
-
-__ai uint64x1_t vcvt_u64_f64(float64x1_t __a) {
-  return (uint64x1_t)__builtin_neon_vcvt_u64_v((int8x8_t)__a, 19); }
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __a) {
-  return (uint64x2_t)__builtin_neon_vcvtq_u64_v((int8x16_t)__a, 51); }
-
-#define vdup_lane_f16(a, __b) __extension__ ({ \
-  float16x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_lane_f64(a, __b) __extension__ ({ \
-  float64x1_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b); })
-#define vdupq_lane_f16(a, __b) __extension__ ({ \
-  float16x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_lane_f64(a, __b) __extension__ ({ \
-  float64x1_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdup_lane_p64(a, __b) __extension__ ({ \
-  poly64x1_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b); })
-#define vdupq_lane_p64(a, __b) __extension__ ({ \
-  poly64x1_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-
-#define vdup_laneq_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdup_laneq_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_laneq_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdup_laneq_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b); })
-#define vdup_laneq_p8(a, __b) __extension__ ({ \
-  poly8x16_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdup_laneq_p16(a, __b) __extension__ ({ \
-  poly16x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_laneq_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdup_laneq_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_laneq_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdup_laneq_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b); })
-#define vdup_laneq_f16(a, __b) __extension__ ({ \
-  float16x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdup_laneq_f32(a, __b) __extension__ ({ \
-  float32x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdup_laneq_f64(a, __b) __extension__ ({ \
-  float64x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b); })
-#define vdupq_laneq_s8(a, __b) __extension__ ({ \
-  int8x16_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_laneq_s16(a, __b) __extension__ ({ \
-  int16x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_laneq_s32(a, __b) __extension__ ({ \
-  int32x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdupq_laneq_s64(a, __b) __extension__ ({ \
-  int64x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdupq_laneq_p8(a, __b) __extension__ ({ \
-  poly8x16_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_laneq_p16(a, __b) __extension__ ({ \
-  poly16x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_laneq_u8(a, __b) __extension__ ({ \
-  uint8x16_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_laneq_u16(a, __b) __extension__ ({ \
-  uint16x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_laneq_u32(a, __b) __extension__ ({ \
-  uint32x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdupq_laneq_u64(a, __b) __extension__ ({ \
-  uint64x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdupq_laneq_f16(a, __b) __extension__ ({ \
-  float16x8_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
-#define vdupq_laneq_f32(a, __b) __extension__ ({ \
-  float32x4_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
-#define vdupq_laneq_f64(a, __b) __extension__ ({ \
-  float64x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-#define vdup_laneq_p64(a, __b) __extension__ ({ \
-  poly64x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b); })
-#define vdupq_laneq_p64(a, __b) __extension__ ({ \
-  poly64x2_t __a = (a); \
-  __builtin_shufflevector(__a, __a, __b, __b); })
-
-#define vext_f64(a, b, __c) __extension__ ({ \
-  float64x1_t __a = (a); float64x1_t __b = (b); \
-  (float64x1_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 9); })
-#define vextq_f64(a, b, __c) __extension__ ({ \
-  float64x2_t __a = (a); float64x2_t __b = (b); \
-  (float64x2_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 41); })
-#define vext_p64(a, b, __c) __extension__ ({ \
-  poly64x1_t __a = (a); poly64x1_t __b = (b); \
-  (poly64x1_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 6); })
-#define vextq_p64(a, b, __c) __extension__ ({ \
-  poly64x2_t __a = (a); poly64x2_t __b = (b); \
-  (poly64x2_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 38); })
-
-#define vfma_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); float32x2_t __c = (c); \
-  (float32x2_t)__builtin_neon_vfma_lane_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, __d, 8); })
-#define vfma_lane_f64(a, b, c, __d) __extension__ ({ \
-  float64x1_t __a = (a); float64x1_t __b = (b); float64x1_t __c = (c); \
-  (float64x1_t)__builtin_neon_vfma_lane_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, __d, 9); })
-#define vfmaq_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); float32x2_t __c = (c); \
-  (float32x4_t)__builtin_neon_vfmaq_lane_v((int8x16_t)__a, (int8x16_t)__b, (int8x8_t)__c, __d, 40); })
-#define vfmaq_lane_f64(a, b, c, __d) __extension__ ({ \
-  float64x2_t __a = (a); float64x2_t __b = (b); float64x1_t __c = (c); \
-  (float64x2_t)__builtin_neon_vfmaq_lane_v((int8x16_t)__a, (int8x16_t)__b, (int8x8_t)__c, __d, 41); })
-
-#define vfma_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); float32x4_t __c = (c); \
-  (float32x2_t)__builtin_neon_vfma_laneq_v((int8x8_t)__a, (int8x8_t)__b, (int8x16_t)__c, __d, 8); })
-#define vfma_laneq_f64(a, b, c, __d) __extension__ ({ \
-  float64x1_t __a = (a); float64x1_t __b = (b); float64x2_t __c = (c); \
-  (float64x1_t)__builtin_neon_vfma_laneq_v((int8x8_t)__a, (int8x8_t)__b, (int8x16_t)__c, __d, 9); })
-#define vfmaq_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); float32x4_t __c = (c); \
-  (float32x4_t)__builtin_neon_vfmaq_laneq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, __d, 40); })
-#define vfmaq_laneq_f64(a, b, c, __d) __extension__ ({ \
-  float64x2_t __a = (a); float64x2_t __b = (b); float64x2_t __c = (c); \
-  (float64x2_t)__builtin_neon_vfmaq_laneq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, __d, 41); })
-
-#define vfms_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); float32x2_t __c = (c); \
-  float32x2_t __a1 = __a; \
-  float32x2_t __b1 = __b; \
-  float32x2_t __c1 = __c; \
-  vfma_lane_f32(__a1, __b1, -__c1, __d); })
-#define vfms_lane_f64(a, b, c, __d) __extension__ ({ \
-  float64x1_t __a = (a); float64x1_t __b = (b); float64x1_t __c = (c); \
-  float64x1_t __a1 = __a; \
-  float64x1_t __b1 = __b; \
-  float64x1_t __c1 = __c; \
-  vfma_lane_f64(__a1, __b1, -__c1, __d); })
-#define vfmsq_lane_f32(a, b, c, __d) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); float32x2_t __c = (c); \
-  float32x4_t __a1 = __a; \
-  float32x4_t __b1 = __b; \
-  float32x2_t __c1 = __c; \
-  vfmaq_lane_f32(__a1, __b1, -__c1, __d); })
-#define vfmsq_lane_f64(a, b, c, __d) __extension__ ({ \
-  float64x2_t __a = (a); float64x2_t __b = (b); float64x1_t __c = (c); \
-  float64x2_t __a1 = __a; \
-  float64x2_t __b1 = __b; \
-  float64x1_t __c1 = __c; \
-  vfmaq_lane_f64(__a1, __b1, -__c1, __d); })
-
-#define vfms_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); float32x4_t __c = (c); \
-  float32x2_t __a1 = __a; \
-  float32x2_t __b1 = __b; \
-  float32x4_t __c1 = __c; \
-  vfma_laneq_f32(__a1, __b1, -__c1, __d); })
-#define vfms_laneq_f64(a, b, c, __d) __extension__ ({ \
-  float64x1_t __a = (a); float64x1_t __b = (b); float64x2_t __c = (c); \
-  float64x1_t __a1 = __a; \
-  float64x1_t __b1 = __b; \
-  float64x2_t __c1 = __c; \
-  vfma_laneq_f64(__a1, __b1, -__c1, __d); })
-#define vfmsq_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); float32x4_t __c = (c); \
-  float32x4_t __a1 = __a; \
-  float32x4_t __b1 = __b; \
-  float32x4_t __c1 = __c; \
-  vfmaq_laneq_f32(__a1, __b1, -__c1, __d); })
-#define vfmsq_laneq_f64(a, b, c, __d) __extension__ ({ \
-  float64x2_t __a = (a); float64x2_t __b = (b); float64x2_t __c = (c); \
-  float64x2_t __a1 = __a; \
-  float64x2_t __b1 = __b; \
-  float64x2_t __c1 = __c; \
-  vfmaq_laneq_f64(__a1, __b1, -__c1, __d); })
-
-__ai float64x1_t vget_high_f64(float64x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 1); }
-__ai poly64x1_t vget_high_p64(poly64x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 1); }
-
-__ai float64x1_t vget_low_f64(float64x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 0); }
-__ai poly64x1_t vget_low_p64(poly64x2_t __a) {
-  return __builtin_shufflevector(__a, __a, 0); }
-
-__ai int8_t vmaxv_s8(int8x8_t __a) {
-  return (int8_t)__builtin_neon_vmaxv_s8(__a); }
-__ai int16_t vmaxv_s16(int16x4_t __a) {
-  return (int16_t)__builtin_neon_vmaxv_s16(__a); }
-__ai int32_t vmaxv_s32(int32x2_t __a) {
-  return (int32_t)__builtin_neon_vmaxv_s32(__a); }
-__ai float32_t vmaxv_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vmaxv_f32(__a); }
-__ai uint8_t vmaxv_u8(uint8x8_t __a) {
-  return (uint8_t)__builtin_neon_vmaxv_u8((int8x8_t)__a); }
-__ai uint16_t vmaxv_u16(uint16x4_t __a) {
-  return (uint16_t)__builtin_neon_vmaxv_u16((int16x4_t)__a); }
-__ai uint32_t vmaxv_u32(uint32x2_t __a) {
-  return (uint32_t)__builtin_neon_vmaxv_u32((int32x2_t)__a); }
-__ai int8_t vmaxvq_s8(int8x16_t __a) {
-  return (int8_t)__builtin_neon_vmaxvq_s8(__a); }
-__ai int16_t vmaxvq_s16(int16x8_t __a) {
-  return (int16_t)__builtin_neon_vmaxvq_s16(__a); }
-__ai int32_t vmaxvq_s32(int32x4_t __a) {
-  return (int32_t)__builtin_neon_vmaxvq_s32(__a); }
-__ai uint8_t vmaxvq_u8(uint8x16_t __a) {
-  return (uint8_t)__builtin_neon_vmaxvq_u8((int8x16_t)__a); }
-__ai uint16_t vmaxvq_u16(uint16x8_t __a) {
-  return (uint16_t)__builtin_neon_vmaxvq_u16((int16x8_t)__a); }
-__ai uint32_t vmaxvq_u32(uint32x4_t __a) {
-  return (uint32_t)__builtin_neon_vmaxvq_u32((int32x4_t)__a); }
-__ai float32_t vmaxvq_f32(float32x4_t __a) {
-  return (float32_t)__builtin_neon_vmaxvq_f32(__a); }
-__ai float64_t vmaxvq_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vmaxvq_f64(__a); }
-
-__ai int8_t vminv_s8(int8x8_t __a) {
-  return (int8_t)__builtin_neon_vminv_s8(__a); }
-__ai int16_t vminv_s16(int16x4_t __a) {
-  return (int16_t)__builtin_neon_vminv_s16(__a); }
-__ai int32_t vminv_s32(int32x2_t __a) {
-  return (int32_t)__builtin_neon_vminv_s32(__a); }
-__ai float32_t vminv_f32(float32x2_t __a) {
-  return (float32_t)__builtin_neon_vminv_f32(__a); }
-__ai uint8_t vminv_u8(uint8x8_t __a) {
-  return (uint8_t)__builtin_neon_vminv_u8((int8x8_t)__a); }
-__ai uint16_t vminv_u16(uint16x4_t __a) {
-  return (uint16_t)__builtin_neon_vminv_u16((int16x4_t)__a); }
-__ai uint32_t vminv_u32(uint32x2_t __a) {
-  return (uint32_t)__builtin_neon_vminv_u32((int32x2_t)__a); }
-__ai int8_t vminvq_s8(int8x16_t __a) {
-  return (int8_t)__builtin_neon_vminvq_s8(__a); }
-__ai int16_t vminvq_s16(int16x8_t __a) {
-  return (int16_t)__builtin_neon_vminvq_s16(__a); }
-__ai int32_t vminvq_s32(int32x4_t __a) {
-  return (int32_t)__builtin_neon_vminvq_s32(__a); }
-__ai uint8_t vminvq_u8(uint8x16_t __a) {
-  return (uint8_t)__builtin_neon_vminvq_u8((int8x16_t)__a); }
-__ai uint16_t vminvq_u16(uint16x8_t __a) {
-  return (uint16_t)__builtin_neon_vminvq_u16((int16x8_t)__a); }
-__ai uint32_t vminvq_u32(uint32x4_t __a) {
-  return (uint32_t)__builtin_neon_vminvq_u32((int32x4_t)__a); }
-__ai float32_t vminvq_f32(float32x4_t __a) {
-  return (float32_t)__builtin_neon_vminvq_f32(__a); }
-__ai float64_t vminvq_f64(float64x2_t __a) {
-  return (float64_t)__builtin_neon_vminvq_f64(__a); }
-
-__ai int16x8_t vmlal_high_s8(int16x8_t __a, int8x16_t __b, int8x16_t __c) {
-  return vmlal_s8(__a, vget_high_s8(__b), vget_high_s8(__c)); }
-__ai int32x4_t vmlal_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
-  return vmlal_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
-__ai int64x2_t vmlal_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
-  return vmlal_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) {
-  return vmlal_u8(__a, vget_high_u8(__b), vget_high_u8(__c)); }
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return vmlal_u16(__a, vget_high_u16(__b), vget_high_u16(__c)); }
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return vmlal_u32(__a, vget_high_u32(__b), vget_high_u32(__c)); }
-
-#define vmlal_high_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
-  __a + vmull_s16(vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlal_high_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
-  __a + vmull_s32(vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlal_high_lane_u16(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint16x8_t __b = (b); uint16x4_t __c = (c); \
-  __a + vmull_u16(vget_high_u16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlal_high_lane_u32(a, b, c, __d) __extension__ ({ \
-  uint64x2_t __a = (a); uint32x4_t __b = (b); uint32x2_t __c = (c); \
-  __a + vmull_u32(vget_high_u32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-
-#define vmlal_high_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
-  __a + vmull_s16(vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlal_high_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
-  __a + vmull_s32(vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlal_high_laneq_u16(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint16x8_t __b = (b); uint16x8_t __c = (c); \
-  __a + vmull_u16(vget_high_u16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlal_high_laneq_u32(a, b, c, __d) __extension__ ({ \
-  uint64x2_t __a = (a); uint32x4_t __b = (b); uint32x4_t __c = (c); \
-  __a + vmull_u32(vget_high_u32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __a, int16x8_t __b, int16_t __c) {
-  return vmlal_n_s16(__a, vget_high_s16(__b), __c); }
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __a, int32x4_t __b, int32_t __c) {
-  return vmlal_n_s32(__a, vget_high_s32(__b), __c); }
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __a, uint16x8_t __b, uint16_t __c) {
-  return vmlal_n_u16(__a, vget_high_u16(__b), __c); }
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __a, uint32x4_t __b, uint32_t __c) {
-  return vmlal_n_u32(__a, vget_high_u32(__b), __c); }
-
-#define vmlal_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
-  __a + vmull_s16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlal_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
-  __a + vmull_s32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlal_laneq_u16(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint16x4_t __b = (b); uint16x8_t __c = (c); \
-  __a + vmull_u16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlal_laneq_u32(a, b, c, __d) __extension__ ({ \
-  uint64x2_t __a = (a); uint32x2_t __b = (b); uint32x4_t __c = (c); \
-  __a + vmull_u32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
-
-#define vmla_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmla_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmla_laneq_u16(a, b, c, __d) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); uint16x8_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmla_laneq_u32(a, b, c, __d) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); uint32x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmla_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); float32x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlaq_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
-#define vmlaq_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlaq_laneq_u16(a, b, c, __d) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); uint16x8_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
-#define vmlaq_laneq_u32(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); uint32x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlaq_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); float32x4_t __c = (c); \
-  __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-
-__ai int16x8_t vmlsl_high_s8(int16x8_t __a, int8x16_t __b, int8x16_t __c) {
-  return vmlsl_s8(__a, vget_high_s8(__b), vget_high_s8(__c)); }
-__ai int32x4_t vmlsl_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
-  return vmlsl_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
-__ai int64x2_t vmlsl_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
-  return vmlsl_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) {
-  return vmlsl_u8(__a, vget_high_u8(__b), vget_high_u8(__c)); }
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return vmlsl_u16(__a, vget_high_u16(__b), vget_high_u16(__c)); }
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return vmlsl_u32(__a, vget_high_u32(__b), vget_high_u32(__c)); }
-
-#define vmlsl_high_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
-  __a - vmull_s16(vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsl_high_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
-  __a - vmull_s32(vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlsl_high_lane_u16(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint16x8_t __b = (b); uint16x4_t __c = (c); \
-  __a - vmull_u16(vget_high_u16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsl_high_lane_u32(a, b, c, __d) __extension__ ({ \
-  uint64x2_t __a = (a); uint32x4_t __b = (b); uint32x2_t __c = (c); \
-  __a - vmull_u32(vget_high_u32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-
-#define vmlsl_high_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
-  __a - vmull_s16(vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsl_high_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
-  __a - vmull_s32(vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlsl_high_laneq_u16(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint16x8_t __b = (b); uint16x8_t __c = (c); \
-  __a - vmull_u16(vget_high_u16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsl_high_laneq_u32(a, b, c, __d) __extension__ ({ \
-  uint64x2_t __a = (a); uint32x4_t __b = (b); uint32x4_t __c = (c); \
-  __a - vmull_u32(vget_high_u32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __a, int16x8_t __b, int16_t __c) {
-  return vmlsl_n_s16(__a, vget_high_s16(__b), __c); }
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __a, int32x4_t __b, int32_t __c) {
-  return vmlsl_n_s32(__a, vget_high_s32(__b), __c); }
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __a, uint16x8_t __b, uint16_t __c) {
-  return vmlsl_n_u16(__a, vget_high_u16(__b), __c); }
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __a, uint32x4_t __b, uint32_t __c) {
-  return vmlsl_n_u32(__a, vget_high_u32(__b), __c); }
-
-#define vmlsl_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
-  __a - vmull_s16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsl_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
-  __a - vmull_s32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlsl_laneq_u16(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint16x4_t __b = (b); uint16x8_t __c = (c); \
-  __a - vmull_u16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsl_laneq_u32(a, b, c, __d) __extension__ ({ \
-  uint64x2_t __a = (a); uint32x2_t __b = (b); uint32x4_t __c = (c); \
-  __a - vmull_u32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
-
-#define vmls_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int16x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmls_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int32x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmls_laneq_u16(a, b, c, __d) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x4_t __b = (b); uint16x8_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmls_laneq_u32(a, b, c, __d) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x2_t __b = (b); uint32x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmls_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); float32x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
-#define vmlsq_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
-#define vmlsq_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsq_laneq_u16(a, b, c, __d) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); uint16x8_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
-#define vmlsq_laneq_u32(a, b, c, __d) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); uint32x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vmlsq_laneq_f32(a, b, c, __d) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); float32x4_t __c = (c); \
-  __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-
-
-
-#define vmull_high_lane_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x4_t __b = (b); \
-  vmull_s16(vget_high_s16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmull_high_lane_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x2_t __b = (b); \
-  vmull_s32(vget_high_s32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vmull_high_lane_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x4_t __b = (b); \
-  vmull_u16(vget_high_u16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmull_high_lane_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x2_t __b = (b); \
-  vmull_u32(vget_high_u32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
-
-#define vmull_high_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  vmull_s16(vget_high_s16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmull_high_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  vmull_s32(vget_high_s32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vmull_high_laneq_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); \
-  vmull_u16(vget_high_u16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmull_high_laneq_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); \
-  vmull_u32(vget_high_u32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
-
-__ai int32x4_t vmull_high_n_s16(int16x8_t __a, int16_t __b) {
-  return vmull_n_s16(vget_high_s16(__a), __b); }
-__ai int64x2_t vmull_high_n_s32(int32x4_t __a, int32_t __b) {
-  return vmull_n_s32(vget_high_s32(__a), __b); }
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __a, uint16_t __b) {
-  return vmull_n_u16(vget_high_u16(__a), __b); }
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __a, uint32_t __b) {
-  return vmull_n_u32(vget_high_u32(__a), __b); }
-
-#define vmull_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x8_t __b = (b); \
-  vmull_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmull_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x4_t __b = (b); \
-  vmull_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vmull_laneq_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x8_t __b = (b); \
-  vmull_u16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmull_laneq_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x4_t __b = (b); \
-  vmull_u32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-
-#define vmulx_lane_f32(a, b, __c) __extension__ ({ \
-  float32x2_t __a = (a); float32x2_t __b = (b); \
-  vmulx_f32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vmulxq_lane_f32(a, b, __c) __extension__ ({ \
-  float32x4_t __a = (a); float32x2_t __b = (b); \
-  vmulxq_f32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmulxq_lane_f64(a, b, __c) __extension__ ({ \
-  float64x2_t __a = (a); float64x1_t __b = (b); \
-  vmulxq_f64(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-
-#define vmulx_laneq_f32(a, b, __c) __extension__ ({ \
-  float32x2_t __a = (a); float32x4_t __b = (b); \
-  vmulx_f32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vmulxq_laneq_f32(a, b, __c) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); \
-  vmulxq_f32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vmulxq_laneq_f64(a, b, __c) __extension__ ({ \
-  float64x2_t __a = (a); float64x2_t __b = (b); \
-  vmulxq_f64(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-
-#define vmul_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x8_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmul_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c); })
-#define vmul_laneq_f32(a, b, __c) __extension__ ({ \
-  float32x2_t __a = (a); float32x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c); })
-#define vmul_laneq_u16(a, b, __c) __extension__ ({ \
-  uint16x4_t __a = (a); uint16x8_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmul_laneq_u32(a, b, __c) __extension__ ({ \
-  uint32x2_t __a = (a); uint32x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c); })
-#define vmulq_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c); })
-#define vmulq_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmulq_laneq_f32(a, b, __c) __extension__ ({ \
-  float32x4_t __a = (a); float32x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmulq_laneq_u16(a, b, __c) __extension__ ({ \
-  uint16x8_t __a = (a); uint16x8_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c); })
-#define vmulq_laneq_u32(a, b, __c) __extension__ ({ \
-  uint32x4_t __a = (a); uint32x4_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
-#define vmulq_laneq_f64(a, b, __c) __extension__ ({ \
-  float64x2_t __a = (a); float64x2_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c); })
-
-#define vmulq_lane_f64(a, b, __c) __extension__ ({ \
-  float64x2_t __a = (a); float64x1_t __b = (b); \
-  __a * __builtin_shufflevector(__b, __b, __c, __c); })
-
-__ai float64x2_t vmulq_n_f64(float64x2_t __a, float64_t __b) {
-  return __a * (float64x2_t){ __b, __b }; }
-
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
-  return vqdmlal_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
-  return vqdmlal_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
-
-#define vqdmlal_high_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
-  vqdmlal_s16(__a, vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vqdmlal_high_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
-  vqdmlal_s32(__a, vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-
-#define vqdmlal_high_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
-  vqdmlal_s16(__a, vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vqdmlal_high_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
-  vqdmlal_s32(__a, vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __a, int16x8_t __b, int16_t __c) {
-  return vqdmlal_n_s16(__a, vget_high_s16(__b), __c); }
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __a, int32x4_t __b, int32_t __c) {
-  return vqdmlal_n_s32(__a, vget_high_s32(__b), __c); }
-
-#define vqdmlal_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
-  vqdmlal_s16(__a, __b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vqdmlal_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
-  vqdmlal_s32(__a, __b, __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
-  return vqdmlsl_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
-  return vqdmlsl_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
-
-#define vqdmlsl_high_lane_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
-  vqdmlsl_s16(__a, vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vqdmlsl_high_lane_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
-  vqdmlsl_s32(__a, vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-
-#define vqdmlsl_high_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
-  vqdmlsl_s16(__a, vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vqdmlsl_high_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
-  vqdmlsl_s32(__a, vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
-
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __a, int16x8_t __b, int16_t __c) {
-  return vqdmlsl_n_s16(__a, vget_high_s16(__b), __c); }
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __a, int32x4_t __b, int32_t __c) {
-  return vqdmlsl_n_s32(__a, vget_high_s32(__b), __c); }
-
-#define vqdmlsl_laneq_s16(a, b, c, __d) __extension__ ({ \
-  int32x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
-  vqdmlsl_s16(__a, __b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
-#define vqdmlsl_laneq_s32(a, b, c, __d) __extension__ ({ \
-  int64x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
-  vqdmlsl_s32(__a, __b, __builtin_shufflevector(__c, __c, __d, __d)); })
-
-#define vqdmulh_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x8_t __b = (b); \
-  vqdmulh_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vqdmulh_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x4_t __b = (b); \
-  vqdmulh_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vqdmulhq_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  vqdmulhq_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c)); })
-#define vqdmulhq_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  vqdmulhq_s32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-
-__ai int32x4_t vqdmull_high_s16(int16x8_t __a, int16x8_t __b) {
-  return vqdmull_s16(vget_high_s16(__a), vget_high_s16(__b)); }
-__ai int64x2_t vqdmull_high_s32(int32x4_t __a, int32x4_t __b) {
-  return vqdmull_s32(vget_high_s32(__a), vget_high_s32(__b)); }
-
-#define vqdmull_high_lane_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x4_t __b = (b); \
-  vqdmull_s16(vget_high_s16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vqdmull_high_lane_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x2_t __b = (b); \
-  vqdmull_s32(vget_high_s32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
-
-#define vqdmull_high_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  vqdmull_s16(vget_high_s16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vqdmull_high_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  vqdmull_s32(vget_high_s32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
-
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __a, int16_t __b) {
-  return vqdmull_n_s16(vget_high_s16(__a), __b); }
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __a, int32_t __b) {
-  return vqdmull_n_s32(vget_high_s32(__a), __b); }
-
-#define vqdmull_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x8_t __b = (b); \
-  vqdmull_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vqdmull_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x4_t __b = (b); \
-  vqdmull_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-
-#define vqrdmulh_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x4_t __a = (a); int16x8_t __b = (b); \
-  vqrdmulh_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-#define vqrdmulh_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x2_t __a = (a); int32x4_t __b = (b); \
-  vqrdmulh_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
-#define vqrdmulhq_laneq_s16(a, b, __c) __extension__ ({ \
-  int16x8_t __a = (a); int16x8_t __b = (b); \
-  vqrdmulhq_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c)); })
-#define vqrdmulhq_laneq_s32(a, b, __c) __extension__ ({ \
-  int32x4_t __a = (a); int32x4_t __b = (b); \
-  vqrdmulhq_s32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
-
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vqtbl1_v((int8x16_t)__a, (int8x8_t)__b, 16); }
-__ai int8x8_t vqtbl1_s8(int8x16_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vqtbl1_v(__a, __b, 0); }
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __a, uint8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vqtbl1_v((int8x16_t)__a, (int8x8_t)__b, 4); }
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vqtbl1q_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-__ai int8x16_t vqtbl1q_s8(int8x16_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vqtbl1q_v(__a, __b, 32); }
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __a, uint8x16_t __b) {
-  return (poly8x16_t)__builtin_neon_vqtbl1q_v((int8x16_t)__a, (int8x16_t)__b, 36); }
-
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vqtbl2_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x8_t)__b, 16); }
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vqtbl2_v(__a.val[0], __a.val[1], __b, 0); }
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __a, uint8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vqtbl2_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x8_t)__b, 4); }
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vqtbl2q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__b, 48); }
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vqtbl2q_v(__a.val[0], __a.val[1], __b, 32); }
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __a, uint8x16_t __b) {
-  return (poly8x16_t)__builtin_neon_vqtbl2q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__b, 36); }
-
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vqtbl3_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x8_t)__b, 16); }
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vqtbl3_v(__a.val[0], __a.val[1], __a.val[2], __b, 0); }
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __a, uint8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vqtbl3_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x8_t)__b, 4); }
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vqtbl3q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__b, 48); }
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vqtbl3q_v(__a.val[0], __a.val[1], __a.val[2], __b, 32); }
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __a, uint8x16_t __b) {
-  return (poly8x16_t)__builtin_neon_vqtbl3q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__b, 36); }
-
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __a, uint8x8_t __b) {
-  return (uint8x8_t)__builtin_neon_vqtbl4_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__a.val[3], (int8x8_t)__b, 16); }
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __a, int8x8_t __b) {
-  return (int8x8_t)__builtin_neon_vqtbl4_v(__a.val[0], __a.val[1], __a.val[2], __a.val[3], __b, 0); }
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __a, uint8x8_t __b) {
-  return (poly8x8_t)__builtin_neon_vqtbl4_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__a.val[3], (int8x8_t)__b, 4); }
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vqtbl4q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__a.val[3], (int8x16_t)__b, 48); }
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __a, int8x16_t __b) {
-  return (int8x16_t)__builtin_neon_vqtbl4q_v(__a.val[0], __a.val[1], __a.val[2], __a.val[3], __b, 32); }
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __a, uint8x16_t __b) {
-  return (poly8x16_t)__builtin_neon_vqtbl4q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__a.val[3], (int8x16_t)__b, 36); }
-
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __a, uint8x16_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vqtbx1_v((int8x8_t)__a, (int8x16_t)__b, (int8x8_t)__c, 16); }
-__ai int8x8_t vqtbx1_s8(int8x8_t __a, int8x16_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vqtbx1_v(__a, __b, __c, 0); }
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __a, poly8x16_t __b, uint8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vqtbx1_v((int8x8_t)__a, (int8x16_t)__b, (int8x8_t)__c, 4); }
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
-  return (uint8x16_t)__builtin_neon_vqtbx1q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 48); }
-__ai int8x16_t vqtbx1q_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c) {
-  return (int8x16_t)__builtin_neon_vqtbx1q_v(__a, __b, __c, 32); }
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __a, poly8x16_t __b, uint8x16_t __c) {
-  return (poly8x16_t)__builtin_neon_vqtbx1q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 36); }
-
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __a, uint8x16x2_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vqtbx2_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x8_t)__c, 16); }
-__ai int8x8_t vqtbx2_s8(int8x8_t __a, int8x16x2_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vqtbx2_v(__a, __b.val[0], __b.val[1], __c, 0); }
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __a, poly8x16x2_t __b, uint8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vqtbx2_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x8_t)__c, 4); }
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __a, uint8x16x2_t __b, uint8x16_t __c) {
-  return (uint8x16_t)__builtin_neon_vqtbx2q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__c, 48); }
-__ai int8x16_t vqtbx2q_s8(int8x16_t __a, int8x16x2_t __b, int8x16_t __c) {
-  return (int8x16_t)__builtin_neon_vqtbx2q_v(__a, __b.val[0], __b.val[1], __c, 32); }
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __a, poly8x16x2_t __b, uint8x16_t __c) {
-  return (poly8x16_t)__builtin_neon_vqtbx2q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__c, 36); }
-
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __a, uint8x16x3_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vqtbx3_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x8_t)__c, 16); }
-__ai int8x8_t vqtbx3_s8(int8x8_t __a, int8x16x3_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vqtbx3_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 0); }
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __a, poly8x16x3_t __b, uint8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vqtbx3_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x8_t)__c, 4); }
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __a, uint8x16x3_t __b, uint8x16_t __c) {
-  return (uint8x16_t)__builtin_neon_vqtbx3q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__c, 48); }
-__ai int8x16_t vqtbx3q_s8(int8x16_t __a, int8x16x3_t __b, int8x16_t __c) {
-  return (int8x16_t)__builtin_neon_vqtbx3q_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 32); }
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __a, poly8x16x3_t __b, uint8x16_t __c) {
-  return (poly8x16_t)__builtin_neon_vqtbx3q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__c, 36); }
-
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __a, uint8x16x4_t __b, uint8x8_t __c) {
-  return (uint8x8_t)__builtin_neon_vqtbx4_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], (int8x8_t)__c, 16); }
-__ai int8x8_t vqtbx4_s8(int8x8_t __a, int8x16x4_t __b, int8x8_t __c) {
-  return (int8x8_t)__builtin_neon_vqtbx4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 0); }
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __a, poly8x16x4_t __b, uint8x8_t __c) {
-  return (poly8x8_t)__builtin_neon_vqtbx4_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], (int8x8_t)__c, 4); }
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __a, uint8x16x4_t __b, uint8x16_t __c) {
-  return (uint8x16_t)__builtin_neon_vqtbx4q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], (int8x16_t)__c, 48); }
-__ai int8x16_t vqtbx4q_s8(int8x16_t __a, int8x16x4_t __b, int8x16_t __c) {
-  return (int8x16_t)__builtin_neon_vqtbx4q_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 32); }
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __a, poly8x16x4_t __b, uint8x16_t __c) {
-  return (poly8x16_t)__builtin_neon_vqtbx4q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], (int8x16_t)__c, 36); }
-
-__ai int8x16_t vraddhn_high_s16(int8x8_t __a, int16x8_t __b, int16x8_t __c) {
-  return vcombine_s8(__a, vraddhn_s16(__b, __c)); }
-__ai int16x8_t vraddhn_high_s32(int16x4_t __a, int32x4_t __b, int32x4_t __c) {
-  return vcombine_s16(__a, vraddhn_s32(__b, __c)); }
-__ai int32x4_t vraddhn_high_s64(int32x2_t __a, int64x2_t __b, int64x2_t __c) {
-  return vcombine_s32(__a, vraddhn_s64(__b, __c)); }
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return vcombine_u8(__a, vraddhn_u16(__b, __c)); }
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return vcombine_u16(__a, vraddhn_u32(__b, __c)); }
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) {
-  return vcombine_u32(__a, vraddhn_u64(__b, __c)); }
-
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __a, int16x8_t __b, int16x8_t __c) {
-  return vcombine_s8(__a, vrsubhn_s16(__b, __c)); }
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __a, int32x4_t __b, int32x4_t __c) {
-  return vcombine_s16(__a, vrsubhn_s32(__b, __c)); }
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __a, int64x2_t __b, int64x2_t __c) {
-  return vcombine_s32(__a, vrsubhn_s64(__b, __c)); }
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return vcombine_u8(__a, vrsubhn_u16(__b, __c)); }
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return vcombine_u16(__a, vrsubhn_u32(__b, __c)); }
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) {
-  return vcombine_u32(__a, vrsubhn_u64(__b, __c)); }
-
-__ai int8x16_t vsubhn_high_s16(int8x8_t __a, int16x8_t __b, int16x8_t __c) {
-  return vcombine_s8(__a, vsubhn_s16(__b, __c)); }
-__ai int16x8_t vsubhn_high_s32(int16x4_t __a, int32x4_t __b, int32x4_t __c) {
-  return vcombine_s16(__a, vsubhn_s32(__b, __c)); }
-__ai int32x4_t vsubhn_high_s64(int32x2_t __a, int64x2_t __b, int64x2_t __c) {
-  return vcombine_s32(__a, vsubhn_s64(__b, __c)); }
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) {
-  return vcombine_u8(__a, vsubhn_u16(__b, __c)); }
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return vcombine_u16(__a, vsubhn_u32(__b, __c)); }
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) {
-  return vcombine_u32(__a, vsubhn_u64(__b, __c)); }
-
-__ai int16x8_t vsubl_high_s8(int8x16_t __a, int8x16_t __b) {
-  return vmovl_high_s8(__a) - vmovl_high_s8(__b); }
-__ai int32x4_t vsubl_high_s16(int16x8_t __a, int16x8_t __b) {
-  return vmovl_high_s16(__a) - vmovl_high_s16(__b); }
-__ai int64x2_t vsubl_high_s32(int32x4_t __a, int32x4_t __b) {
-  return vmovl_high_s32(__a) - vmovl_high_s32(__b); }
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __a, uint8x16_t __b) {
-  return vmovl_high_u8(__a) - vmovl_high_u8(__b); }
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __a, uint16x8_t __b) {
-  return vmovl_high_u16(__a) - vmovl_high_u16(__b); }
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __a, uint32x4_t __b) {
-  return vmovl_high_u32(__a) - vmovl_high_u32(__b); }
-
-__ai int16x8_t vsubw_high_s8(int16x8_t __a, int8x16_t __b) {
-  return __a - vmovl_high_s8(__b); }
-__ai int32x4_t vsubw_high_s16(int32x4_t __a, int16x8_t __b) {
-  return __a - vmovl_high_s16(__b); }
-__ai int64x2_t vsubw_high_s32(int64x2_t __a, int32x4_t __b) {
-  return __a - vmovl_high_s32(__b); }
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __a, uint8x16_t __b) {
-  return __a - vmovl_high_u8(__b); }
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __a, uint16x8_t __b) {
-  return __a - vmovl_high_u16(__b); }
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __a, uint32x4_t __b) {
-  return __a - vmovl_high_u32(__b); }
-
-__ai int8x8_t vtrn1_s8(int8x8_t __a, int8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
-__ai int16x4_t vtrn1_s16(int16x4_t __a, int16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
-__ai int32x2_t vtrn1_s32(int32x2_t __a, int32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai uint8x8_t vtrn1_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
-__ai uint16x4_t vtrn1_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
-__ai uint32x2_t vtrn1_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai float32x2_t vtrn1_f32(float32x2_t __a, float32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai poly8x8_t vtrn1_p8(poly8x8_t __a, poly8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
-__ai poly16x4_t vtrn1_p16(poly16x4_t __a, poly16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
-__ai int8x16_t vtrn1q_s8(int8x16_t __a, int8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); }
-__ai int16x8_t vtrn1q_s16(int16x8_t __a, int16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
-__ai int32x4_t vtrn1q_s32(int32x4_t __a, int32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
-__ai int64x2_t vtrn1q_s64(int64x2_t __a, int64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); }
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai float32x4_t vtrn1q_f32(float32x4_t __a, float32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
-__ai float64x2_t vtrn1q_f64(float64x2_t __a, float64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __a, poly8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); }
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __a, poly16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __a, poly64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-
-__ai int8x8_t vtrn2_s8(int8x8_t __a, int8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
-__ai int16x4_t vtrn2_s16(int16x4_t __a, int16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
-__ai int32x2_t vtrn2_s32(int32x2_t __a, int32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai uint8x8_t vtrn2_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
-__ai uint16x4_t vtrn2_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
-__ai uint32x2_t vtrn2_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai float32x2_t vtrn2_f32(float32x2_t __a, float32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai poly8x8_t vtrn2_p8(poly8x8_t __a, poly8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
-__ai poly16x4_t vtrn2_p16(poly16x4_t __a, poly16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
-__ai int8x16_t vtrn2q_s8(int8x16_t __a, int8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); }
-__ai int16x8_t vtrn2q_s16(int16x8_t __a, int16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
-__ai int32x4_t vtrn2q_s32(int32x4_t __a, int32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
-__ai int64x2_t vtrn2q_s64(int64x2_t __a, int64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); }
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai float32x4_t vtrn2q_f32(float32x4_t __a, float32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
-__ai float64x2_t vtrn2q_f64(float64x2_t __a, float64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __a, poly8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); }
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __a, poly16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __a, poly64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-
-__ai int8x8_t vuzp1_s8(int8x8_t __a, int8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
-__ai int16x4_t vuzp1_s16(int16x4_t __a, int16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
-__ai int32x2_t vuzp1_s32(int32x2_t __a, int32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai uint8x8_t vuzp1_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
-__ai uint16x4_t vuzp1_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
-__ai uint32x2_t vuzp1_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai float32x2_t vuzp1_f32(float32x2_t __a, float32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai poly8x8_t vuzp1_p8(poly8x8_t __a, poly8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
-__ai poly16x4_t vuzp1_p16(poly16x4_t __a, poly16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
-__ai int8x16_t vuzp1q_s8(int8x16_t __a, int8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); }
-__ai int16x8_t vuzp1q_s16(int16x8_t __a, int16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
-__ai int32x4_t vuzp1q_s32(int32x4_t __a, int32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
-__ai int64x2_t vuzp1q_s64(int64x2_t __a, int64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); }
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai float32x4_t vuzp1q_f32(float32x4_t __a, float32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
-__ai float64x2_t vuzp1q_f64(float64x2_t __a, float64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __a, poly8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); }
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __a, poly16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __a, poly64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-
-__ai int8x8_t vuzp2_s8(int8x8_t __a, int8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
-__ai int16x4_t vuzp2_s16(int16x4_t __a, int16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
-__ai int32x2_t vuzp2_s32(int32x2_t __a, int32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai uint8x8_t vuzp2_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
-__ai uint16x4_t vuzp2_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
-__ai uint32x2_t vuzp2_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai float32x2_t vuzp2_f32(float32x2_t __a, float32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai poly8x8_t vuzp2_p8(poly8x8_t __a, poly8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
-__ai poly16x4_t vuzp2_p16(poly16x4_t __a, poly16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
-__ai int8x16_t vuzp2q_s8(int8x16_t __a, int8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); }
-__ai int16x8_t vuzp2q_s16(int16x8_t __a, int16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
-__ai int32x4_t vuzp2q_s32(int32x4_t __a, int32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
-__ai int64x2_t vuzp2q_s64(int64x2_t __a, int64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); }
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai float32x4_t vuzp2q_f32(float32x4_t __a, float32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
-__ai float64x2_t vuzp2q_f64(float64x2_t __a, float64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __a, poly8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); }
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __a, poly16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __a, poly64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-
-__ai int8x8_t vzip1_s8(int8x8_t __a, int8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
-__ai int16x4_t vzip1_s16(int16x4_t __a, int16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
-__ai int32x2_t vzip1_s32(int32x2_t __a, int32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai uint8x8_t vzip1_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
-__ai uint16x4_t vzip1_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
-__ai uint32x2_t vzip1_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai float32x2_t vzip1_f32(float32x2_t __a, float32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai poly8x8_t vzip1_p8(poly8x8_t __a, poly8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
-__ai poly16x4_t vzip1_p16(poly16x4_t __a, poly16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
-__ai int8x16_t vzip1q_s8(int8x16_t __a, int8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); }
-__ai int16x8_t vzip1q_s16(int16x8_t __a, int16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
-__ai int32x4_t vzip1q_s32(int32x4_t __a, int32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
-__ai int64x2_t vzip1q_s64(int64x2_t __a, int64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai uint8x16_t vzip1q_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); }
-__ai uint16x8_t vzip1q_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
-__ai uint32x4_t vzip1q_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
-__ai uint64x2_t vzip1q_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai float32x4_t vzip1q_f32(float32x4_t __a, float32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
-__ai float64x2_t vzip1q_f64(float64x2_t __a, float64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-__ai poly8x16_t vzip1q_p8(poly8x16_t __a, poly8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); }
-__ai poly16x8_t vzip1q_p16(poly16x8_t __a, poly16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
-__ai poly64x2_t vzip1q_p64(poly64x2_t __a, poly64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 0, 2); }
-
-__ai int8x8_t vzip2_s8(int8x8_t __a, int8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
-__ai int16x4_t vzip2_s16(int16x4_t __a, int16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
-__ai int32x2_t vzip2_s32(int32x2_t __a, int32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai uint8x8_t vzip2_u8(uint8x8_t __a, uint8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
-__ai uint16x4_t vzip2_u16(uint16x4_t __a, uint16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
-__ai uint32x2_t vzip2_u32(uint32x2_t __a, uint32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai float32x2_t vzip2_f32(float32x2_t __a, float32x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai poly8x8_t vzip2_p8(poly8x8_t __a, poly8x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
-__ai poly16x4_t vzip2_p16(poly16x4_t __a, poly16x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
-__ai int8x16_t vzip2q_s8(int8x16_t __a, int8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); }
-__ai int16x8_t vzip2q_s16(int16x8_t __a, int16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
-__ai int32x4_t vzip2q_s32(int32x4_t __a, int32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
-__ai int64x2_t vzip2q_s64(int64x2_t __a, int64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai uint8x16_t vzip2q_u8(uint8x16_t __a, uint8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); }
-__ai uint16x8_t vzip2q_u16(uint16x8_t __a, uint16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
-__ai uint32x4_t vzip2q_u32(uint32x4_t __a, uint32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
-__ai uint64x2_t vzip2q_u64(uint64x2_t __a, uint64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai float32x4_t vzip2q_f32(float32x4_t __a, float32x4_t __b) {
-  return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
-__ai float64x2_t vzip2q_f64(float64x2_t __a, float64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-__ai poly8x16_t vzip2q_p8(poly8x16_t __a, poly8x16_t __b) {
-  return __builtin_shufflevector(__a, __b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); }
-__ai poly16x8_t vzip2q_p16(poly16x8_t __a, poly16x8_t __b) {
-  return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
-__ai poly64x2_t vzip2q_p64(poly64x2_t __a, poly64x2_t __b) {
-  return __builtin_shufflevector(__a, __b, 1, 3); }
-
-__ai int8x16_t vmovn_high_s16(int8x8_t __a, int16x8_t __b) {
-  int8x8_t __a1 = vmovn_s16(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
-__ai int16x8_t vmovn_high_s32(int16x4_t __a, int32x4_t __b) {
-  int16x4_t __a1 = vmovn_s32(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
-__ai int32x4_t vmovn_high_s64(int32x2_t __a, int64x2_t __b) {
-  int32x2_t __a1 = vmovn_s64(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __a, uint16x8_t __b) {
-  uint8x8_t __a1 = vmovn_u16(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __a, uint32x4_t __b) {
-  uint16x4_t __a1 = vmovn_u32(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __a, uint64x2_t __b) {
-  uint32x2_t __a1 = vmovn_u64(__b);
-  return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
-
-#ifdef __ARM_FEATURE_CRYPTO
-__ai uint8x16_t vaesdq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vaesdq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-
-__ai uint8x16_t vaeseq_u8(uint8x16_t __a, uint8x16_t __b) {
-  return (uint8x16_t)__builtin_neon_vaeseq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
-
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vaesimcq_v((int8x16_t)__a, 48); }
-
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __a) {
-  return (uint8x16_t)__builtin_neon_vaesmcq_v((int8x16_t)__a, 48); }
-
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __a, uint32_t __b, uint32x4_t __c) {
-  return (uint32x4_t)__builtin_neon_vsha1cq_u32((int32x4_t)__a, __b, (int32x4_t)__c); }
-
-__ai uint32_t vsha1h_u32(uint32_t __a) {
-  return (uint32_t)__builtin_neon_vsha1h_u32(__a); }
-
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __a, uint32_t __b, uint32x4_t __c) {
-  return (uint32x4_t)__builtin_neon_vsha1mq_u32((int32x4_t)__a, __b, (int32x4_t)__c); }
-
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __a, uint32_t __b, uint32x4_t __c) {
-  return (uint32x4_t)__builtin_neon_vsha1pq_u32((int32x4_t)__a, __b, (int32x4_t)__c); }
-
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return (uint32x4_t)__builtin_neon_vsha1su0q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
-
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vsha1su1q_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return (uint32x4_t)__builtin_neon_vsha256hq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
-
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return (uint32x4_t)__builtin_neon_vsha256h2q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
-
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __a, uint32x4_t __b) {
-  return (uint32x4_t)__builtin_neon_vsha256su0q_v((int8x16_t)__a, (int8x16_t)__b, 50); }
-
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
-  return (uint32x4_t)__builtin_neon_vsha256su1q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
-
-#endif
-
-#endif
-
-#undef __ai
-
-#endif /* __ARM_NEON_H */
diff --git a/lib/Headers/cpuid.h b/lib/Headers/cpuid.h
index 8f12cae..f9254e9 100644
--- a/lib/Headers/cpuid.h
+++ b/lib/Headers/cpuid.h
@@ -79,6 +79,7 @@
 #define bit_ACPI        0x00400000
 #define bit_MMX         0x00800000
 #define bit_FXSR        0x01000000
+#define bit_FXSAVE      bit_FXSR    /* for gcc compat */
 #define bit_SSE         0x02000000
 #define bit_SSE2        0x04000000
 #define bit_SS          0x08000000
diff --git a/lib/Headers/float.h b/lib/Headers/float.h
index 2cb13d3..02ef6bf 100644
--- a/lib/Headers/float.h
+++ b/lib/Headers/float.h
@@ -29,7 +29,7 @@
  * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
  */
 #if (defined(__MINGW32__) || defined(_MSC_VER)) && \
-    defined(__has_include_next) && __has_include_next(<float.h>)
+    __has_include_next(<float.h>)
 #  include_next <float.h>
 
 /* Undefine anything that we'll be redefining below. */
diff --git a/lib/Headers/ia32intrin.h b/lib/Headers/ia32intrin.h
new file mode 100644
index 0000000..a5985f6
--- /dev/null
+++ b/lib/Headers/ia32intrin.h
@@ -0,0 +1,92 @@
+/* ===-------- ia32intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <ia32intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __IA32INTRIN_H
+#define __IA32INTRIN_H
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__readeflags(void)
+{
+  unsigned long long __res = 0;
+  __asm__ __volatile__ ("pushf\n\t"
+                        "popq %0\n"
+                        :"=r"(__res)
+                        :
+                        :
+                       );
+  return __res;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__writeeflags(unsigned long long __f)
+{
+  __asm__ __volatile__ ("pushq %0\n\t"
+                        "popf\n"
+                        :
+                        :"r"(__f)
+                        :"flags"
+                       );
+}
+
+#else /* !__x86_64__ */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__readeflags(void)
+{
+  unsigned int __res = 0;
+  __asm__ __volatile__ ("pushf\n\t"
+                        "popl %0\n"
+                        :"=r"(__res)
+                        :
+                        :
+                       );
+  return __res;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__writeeflags(unsigned int __f)
+{
+  __asm__ __volatile__ ("pushl %0\n\t"
+                        "popf\n"
+                        :
+                        :"r"(__f)
+                        :"flags"
+                       );
+}
+#endif /* !__x86_64__ */
+
+/* __rdtsc */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdtsc(void) {
+  unsigned int __eax, __edx;
+  __asm__ ("rdtsc" : "=a" (__eax), "=d" (__edx));
+  return ((unsigned long long)__edx << 32) | __eax;
+}
+
+#define _rdtsc() __rdtsc()
+
+#endif /* __IA32INTRIN_H */
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index 15d6e05..df4bea8 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -48,7 +48,7 @@
 #include <smmintrin.h>
 #endif
 
-#if defined (__AES__)
+#if defined (__AES__) || defined (__PCLMUL__)
 #include <wmmintrin.h>
 #endif
 
diff --git a/lib/Headers/limits.h b/lib/Headers/limits.h
index 91bd404..f04187c 100644
--- a/lib/Headers/limits.h
+++ b/lib/Headers/limits.h
@@ -33,8 +33,7 @@
 
 /* System headers include a number of constants from POSIX in <limits.h>.
    Include it if we're hosted. */
-#if __STDC_HOSTED__ && \
-    defined(__has_include_next) && __has_include_next(<limits.h>)
+#if __STDC_HOSTED__ && __has_include_next(<limits.h>)
 #include_next <limits.h>
 #endif
 
@@ -90,7 +89,7 @@
 /* C99 5.2.4.2.1: Added long long.
    C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
  */
-#if __STDC_VERSION__ >= 199901 || __cplusplus >= 201103L
+#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
 
 #undef  LLONG_MIN
 #undef  LLONG_MAX
diff --git a/lib/Headers/stdarg.h b/lib/Headers/stdarg.h
index 2957bf0..6110a06 100644
--- a/lib/Headers/stdarg.h
+++ b/lib/Headers/stdarg.h
@@ -39,7 +39,7 @@
  */
 #define __va_copy(d,s) __builtin_va_copy(d,s)
 
-#if __STDC_VERSION__ >= 199900L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
+#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
 #define va_copy(dest, src)  __builtin_va_copy(dest, src)
 #endif
 
diff --git a/lib/Headers/stddef.h b/lib/Headers/stddef.h
index 6a64d6d..97126ed 100644
--- a/lib/Headers/stddef.h
+++ b/lib/Headers/stddef.h
@@ -84,6 +84,20 @@
 #endif
 #endif
 
+#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
+#ifndef _MSC_VER
+typedef struct {
+  long long __clang_max_align_nonce1
+      __attribute__((__aligned__(__alignof__(long long))));
+  long double __clang_max_align_nonce2
+      __attribute__((__aligned__(__alignof__(long double))));
+} max_align_t;
+#else
+typedef double max_align_t;
+#endif
+#define __CLANG_MAX_ALIGN_T_DEFINED
+#endif
+
 #define offsetof(t, d) __builtin_offsetof(t, d)
 
 #endif /* __STDDEF_H */
diff --git a/lib/Headers/stdint.h b/lib/Headers/stdint.h
index 11529c0..2b1bc09 100644
--- a/lib/Headers/stdint.h
+++ b/lib/Headers/stdint.h
@@ -28,8 +28,7 @@
 /* If we're hosted, fall back to the system's stdint.h, which might have
  * additional definitions.
  */
-#if __STDC_HOSTED__ && \
-    defined(__has_include_next) && __has_include_next(<stdint.h>)
+#if __STDC_HOSTED__ && __has_include_next(<stdint.h>)
 
 // C99 7.18.3 Limits of other integer types
 //
diff --git a/lib/Headers/x86intrin.h b/lib/Headers/x86intrin.h
index 399016f..be9e71d 100644
--- a/lib/Headers/x86intrin.h
+++ b/lib/Headers/x86intrin.h
@@ -24,6 +24,8 @@
 #ifndef __X86INTRIN_H
 #define __X86INTRIN_H
 
+#include <ia32intrin.h>
+
 #include <immintrin.h>
 
 #ifdef __3dNOW__
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index c68d3ed..c8765a7 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -672,10 +672,12 @@
 #define _MM_HINT_T2 1
 #define _MM_HINT_NTA 0
 
+#ifndef _MSC_VER
 /* FIXME: We have to #define this because "sel" must be a constant integer, and
    Sema doesn't do any form of constant propagation yet. */
 
 #define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
+#endif
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
 _mm_stream_pi(__m64 *__p, __m64 __a)
diff --git a/lib/Index/CMakeLists.txt b/lib/Index/CMakeLists.txt
index c4ff5a0..fd9810d 100644
--- a/lib/Index/CMakeLists.txt
+++ b/lib/Index/CMakeLists.txt
@@ -1,11 +1,19 @@
-add_clang_library(clangIndex
-  CommentToXML.cpp
-  SimpleFormatContext.h
-  USRGeneration.cpp
+set(LLVM_LINK_COMPONENTS
+  Support
   )
 
-target_link_libraries(clangIndex
-  clangBasic
+add_clang_library(clangIndex
+  CommentToXML.cpp
+  USRGeneration.cpp
+
+  ADDITIONAL_HEADERS
+  SimpleFormatContext.h
+
+  LINK_LIBS
   clangAST
+  clangBasic
   clangFormat
+  clangLex
+  clangRewriteCore
+  clangTooling
   )
diff --git a/lib/Index/CommentToXML.cpp b/lib/Index/CommentToXML.cpp
index 0a9619d..43c4232 100644
--- a/lib/Index/CommentToXML.cpp
+++ b/lib/Index/CommentToXML.cpp
@@ -9,8 +9,8 @@
 
 #include "clang/Index/CommentToXML.h"
 #include "SimpleFormatContext.h"
-#include "clang/AST/Attr.h"
 #include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
 #include "clang/AST/Comment.h"
 #include "clang/AST/CommentVisitor.h"
 #include "clang/Format/Format.h"
diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp
index 16d89f8..15aa875 100644
--- a/lib/Index/USRGeneration.cpp
+++ b/lib/Index/USRGeneration.cpp
@@ -11,6 +11,7 @@
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/AST/DeclVisitor.h"
+#include "clang/Lex/PreprocessingRecord.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Support/raw_ostream.h"
@@ -22,6 +23,30 @@
 // USR generation.
 //===----------------------------------------------------------------------===//
 
+/// \returns true on error.
+static bool printLoc(llvm::raw_ostream &OS, SourceLocation Loc,
+                     const SourceManager &SM, bool IncludeOffset) {
+  if (Loc.isInvalid()) {
+    return true;
+  }
+  Loc = SM.getExpansionLoc(Loc);
+  const std::pair<FileID, unsigned> &Decomposed = SM.getDecomposedLoc(Loc);
+  const FileEntry *FE = SM.getFileEntryForID(Decomposed.first);
+  if (FE) {
+    OS << llvm::sys::path::filename(FE->getName());
+  } else {
+    // This case really isn't interesting.
+    return true;
+  }
+  if (IncludeOffset) {
+    // Use the offest into the FileID to represent the location.  Using
+    // a line/column can cause us to look back at the original source file,
+    // which is expensive.
+    OS << '@' << Decomposed.second;
+  }
+  return false;
+}
+
 namespace {
 class USRGenerator : public ConstDeclVisitor<USRGenerator> {
   SmallVectorImpl<char> &Buf;
@@ -80,10 +105,16 @@
   void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D) {
     IgnoreResults = true;
   }
-  
+
+  bool ShouldGenerateLocation(const NamedDecl *D);
+
+  bool isLocal(const NamedDecl *D) {
+    return D->getParentFunctionOrMethod() != 0;
+  }
+
   /// Generate the string component containing the location of the
   ///  declaration.
-  bool GenLoc(const Decl *D);
+  bool GenLoc(const Decl *D, bool IncludeOffset);
 
   /// String generation methods used both by the visitation methods
   /// and from other clients that want to directly generate USRs.  These
@@ -99,16 +130,6 @@
   void GenObjCCategory(StringRef cls, StringRef cat) {
     generateUSRForObjCCategory(cls, cat, Out);
   }
-  /// Generate a USR fragment for an Objective-C instance variable.  The
-  /// complete USR can be created by concatenating the USR for the
-  /// encompassing class with this USR fragment.
-  void GenObjCIvar(StringRef ivar) {
-    generateUSRForObjCIvar(ivar, Out);
-  }
-  /// Generate a USR fragment for an Objective-C method.
-  void GenObjCMethod(StringRef sel, bool isInstanceMethod) {
-    generateUSRForObjCMethod(sel, isInstanceMethod, Out);
-  }
   /// Generate a USR fragment for an Objective-C property.
   void GenObjCProperty(StringRef prop) {
     generateUSRForObjCProperty(prop, Out);
@@ -143,8 +164,13 @@
   return startSize == endSize;
 }
 
-static inline bool ShouldGenerateLocation(const NamedDecl *D) {
-  return !D->isExternallyVisible();
+bool USRGenerator::ShouldGenerateLocation(const NamedDecl *D) {
+  if (D->isExternallyVisible())
+    return false;
+  if (D->getParentFunctionOrMethod())
+    return true;
+  const SourceManager &SM = Context->getSourceManager();
+  return !SM.isInSystemHeader(D->getLocation());
 }
 
 void USRGenerator::VisitDeclContext(const DeclContext *DC) {
@@ -168,7 +194,7 @@
 }
 
 void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
-  if (ShouldGenerateLocation(D) && GenLoc(D))
+  if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
     return;
 
   VisitDeclContext(D->getDeclContext());
@@ -194,12 +220,9 @@
   }
 
   // Mangle in type information for the arguments.
-  for (FunctionDecl::param_const_iterator I = D->param_begin(),
-                                          E = D->param_end();
-       I != E; ++I) {
+  for (auto PD : D->params()) {
     Out << '#';
-    if (ParmVarDecl *PD = *I)
-      VisitType(PD->getType());
+    VisitType(PD->getType());
   }
   if (D->isVariadic())
     Out << '.';
@@ -229,7 +252,7 @@
   // VarDecls can be declared 'extern' within a function or method body,
   // but their enclosing DeclContext is the function, not the TU.  We need
   // to check the storage class to correctly generate the USR.
-  if (ShouldGenerateLocation(D) && GenLoc(D))
+  if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
     return;
 
   VisitDeclContext(D->getDeclContext());
@@ -249,13 +272,13 @@
 
 void USRGenerator::VisitNonTypeTemplateParmDecl(
                                         const NonTypeTemplateParmDecl *D) {
-  GenLoc(D);
+  GenLoc(D, /*IncludeOffset=*/true);
   return;
 }
 
 void USRGenerator::VisitTemplateTemplateParmDecl(
                                         const TemplateTemplateParmDecl *D) {
-  GenLoc(D);
+  GenLoc(D, /*IncludeOffset=*/true);
   return;
 }
 
@@ -329,7 +352,7 @@
       // We want to mangle in the location to uniquely distinguish them.
       if (CD->IsClassExtension()) {
         Out << "objc(ext)" << ID->getName() << '@';
-        GenLoc(CD);
+        GenLoc(CD, /*IncludeOffset=*/true);
       }
       else
         GenObjCCategory(ID->getName(), CD->getName());
@@ -378,7 +401,7 @@
 void USRGenerator::VisitTagDecl(const TagDecl *D) {
   // Add the location of the tag decl to handle resolution across
   // translation units.
-  if (ShouldGenerateLocation(D) && GenLoc(D))
+  if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
     return;
 
   D = D->getCanonicalDecl();
@@ -449,7 +472,7 @@
 }
 
 void USRGenerator::VisitTypedefDecl(const TypedefDecl *D) {
-  if (ShouldGenerateLocation(D) && GenLoc(D))
+  if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
     return;
   const DeclContext *DC = D->getDeclContext();
   if (const NamedDecl *DCN = dyn_cast<NamedDecl>(DC))
@@ -459,15 +482,15 @@
 }
 
 void USRGenerator::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
-  GenLoc(D);
+  GenLoc(D, /*IncludeOffset=*/true);
   return;
 }
 
-bool USRGenerator::GenLoc(const Decl *D) {
+bool USRGenerator::GenLoc(const Decl *D, bool IncludeOffset) {
   if (generatedLoc)
     return IgnoreResults;
   generatedLoc = true;
-  
+
   // Guard against null declarations in invalid code.
   if (!D) {
     IgnoreResults = true;
@@ -477,27 +500,10 @@
   // Use the location of canonical decl.
   D = D->getCanonicalDecl();
 
-  const SourceManager &SM = Context->getSourceManager();
-  SourceLocation L = D->getLocStart();
-  if (L.isInvalid()) {
-    IgnoreResults = true;
-    return true;
-  }
-  L = SM.getExpansionLoc(L);
-  const std::pair<FileID, unsigned> &Decomposed = SM.getDecomposedLoc(L);
-  const FileEntry *FE = SM.getFileEntryForID(Decomposed.first);
-  if (FE) {
-    Out << llvm::sys::path::filename(FE->getName());
-  }
-  else {
-    // This case really isn't interesting.
-    IgnoreResults = true;
-    return true;
-  }
-  // Use the offest into the FileID to represent the location.  Using
-  // a line/column can cause us to look back at the original source file,
-  // which is expensive.
-  Out << '@' << Decomposed.second;
+  IgnoreResults =
+      IgnoreResults || printLoc(Out, D->getLocStart(),
+                                Context->getSourceManager(), IncludeOffset);
+
   return IgnoreResults;
 }
 
@@ -627,11 +633,9 @@
     }
     if (const FunctionProtoType *FT = T->getAs<FunctionProtoType>()) {
       Out << 'F';
-      VisitType(FT->getResultType());
-      for (FunctionProtoType::arg_type_iterator
-            I = FT->arg_type_begin(), E = FT->arg_type_end(); I!=E; ++I) {
-        VisitType(*I);
-      }
+      VisitType(FT->getReturnType());
+      for (const auto &I : FT->param_types())
+        VisitType(I);
       if (FT->isVariadic())
         Out << '.';
       return;
@@ -801,3 +805,26 @@
   UG.Visit(D);
   return UG.ignoreResults();
 }
+
+bool clang::index::generateUSRForMacro(const MacroDefinition *MD,
+                                       const SourceManager &SM,
+                                       SmallVectorImpl<char> &Buf) {
+  // Don't generate USRs for things with invalid locations.
+  if (!MD || MD->getLocation().isInvalid())
+    return true;
+
+  llvm::raw_svector_ostream Out(Buf);
+
+  // Assume that system headers are sane.  Don't put source location
+  // information into the USR if the macro comes from a system header.
+  SourceLocation Loc = MD->getLocation();
+  bool ShouldGenerateLocation = !SM.isInSystemHeader(Loc);
+
+  Out << getUSRSpacePrefix();
+  if (ShouldGenerateLocation)
+    printLoc(Out, Loc, SM, /*IncludeOffset=*/true);
+  Out << "@macro@";
+  Out << MD->getName()->getNameStart();
+  return false;
+}
+
diff --git a/lib/Lex/Android.mk b/lib/Lex/Android.mk
index f3e294f..a5e9661 100644
--- a/lib/Lex/Android.mk
+++ b/lib/Lex/Android.mk
@@ -7,8 +7,7 @@
 
 TBLGEN_TABLES := \
   DiagnosticLexKinds.inc \
-  DiagnosticCommonKinds.inc \
-  AttrSpellings.inc
+  DiagnosticCommonKinds.inc
 
 clang_lex_SRC_FILES := \
   HeaderMap.cpp \
diff --git a/lib/Lex/CMakeLists.txt b/lib/Lex/CMakeLists.txt
index 2ee4682..38df144 100644
--- a/lib/Lex/CMakeLists.txt
+++ b/lib/Lex/CMakeLists.txt
@@ -25,14 +25,7 @@
   ScratchBuffer.cpp
   TokenConcatenation.cpp
   TokenLexer.cpp
-  )
 
-add_dependencies(clangLex
-  ClangAttrSpellings
-  ClangDiagnosticCommon
-  ClangDiagnosticLex
-  )
-
-target_link_libraries(clangLex
+  LINK_LIBS
   clangBasic
   )
diff --git a/lib/Lex/HeaderMap.cpp b/lib/Lex/HeaderMap.cpp
index 478462c..2d07428 100644
--- a/lib/Lex/HeaderMap.cpp
+++ b/lib/Lex/HeaderMap.cpp
@@ -14,12 +14,12 @@
 #include "clang/Lex/HeaderMap.h"
 #include "clang/Basic/CharInfo.h"
 #include "clang/Basic/FileManager.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/Support/DataTypes.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include <cstdio>
+#include <memory>
 using namespace clang;
 
 //===----------------------------------------------------------------------===//
@@ -81,7 +81,7 @@
   unsigned FileSize = FE->getSize();
   if (FileSize <= sizeof(HMapHeader)) return 0;
 
-  OwningPtr<const llvm::MemoryBuffer> FileBuffer(FM.getBufferForFile(FE));
+  std::unique_ptr<const llvm::MemoryBuffer> FileBuffer(FM.getBufferForFile(FE));
   if (!FileBuffer) return 0;  // Unreadable file?
   const char *FileStart = FileBuffer->getBufferStart();
 
@@ -103,7 +103,7 @@
   if (Header->Reserved != 0) return 0;
 
   // Okay, everything looks good, create the header map.
-  return new HeaderMap(FileBuffer.take(), NeedsByteSwap);
+  return new HeaderMap(FileBuffer.release(), NeedsByteSwap);
 }
 
 HeaderMap::~HeaderMap() {
@@ -201,18 +201,29 @@
 /// this HeaderMap.  If so, open it and return its FileEntry.
 const FileEntry *HeaderMap::LookupFile(
     StringRef Filename, FileManager &FM) const {
+
+  SmallString<1024> Path;
+  StringRef Dest = lookupFilename(Filename, Path);
+  if (Dest.empty())
+    return 0;
+
+  return FM.getFile(Dest);
+}
+
+StringRef HeaderMap::lookupFilename(StringRef Filename,
+                                    SmallVectorImpl<char> &DestPath) const {
   const HMapHeader &Hdr = getHeader();
   unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
 
   // If the number of buckets is not a power of two, the headermap is corrupt.
   // Don't probe infinitely.
   if (NumBuckets & (NumBuckets-1))
-    return 0;
+    return StringRef();
 
   // Linearly probe the hash table.
   for (unsigned Bucket = HashHMapKey(Filename);; ++Bucket) {
     HMapBucket B = getBucket(Bucket & (NumBuckets-1));
-    if (B.Key == HMAP_EmptyBucketKey) return 0; // Hash miss.
+    if (B.Key == HMAP_EmptyBucketKey) return StringRef(); // Hash miss.
 
     // See if the key matches.  If not, probe on.
     if (!Filename.equals_lower(getString(B.Key)))
@@ -220,9 +231,11 @@
 
     // If so, we have a match in the hash table.  Construct the destination
     // path.
-    SmallString<1024> DestPath;
-    DestPath += getString(B.Prefix);
-    DestPath += getString(B.Suffix);
-    return FM.getFile(DestPath.str());
+    StringRef Prefix = getString(B.Prefix);
+    StringRef Suffix = getString(B.Suffix);
+    DestPath.clear();
+    DestPath.append(Prefix.begin(), Prefix.end());
+    DestPath.append(Suffix.begin(), Suffix.end());
+    return StringRef(DestPath.begin(), DestPath.size());
   }
 }
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index 9e43dda..f081024 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -12,11 +12,11 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Lex/HeaderSearch.h"
-#include "clang/Basic/Diagnostic.h"
 #include "clang/Basic/FileManager.h"
 #include "clang/Basic/IdentifierTable.h"
 #include "clang/Lex/HeaderMap.h"
 #include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/LexDiagnostic.h"
 #include "clang/Lex/Lexer.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/Support/Capacity.h"
@@ -45,11 +45,10 @@
 
 HeaderSearch::HeaderSearch(IntrusiveRefCntPtr<HeaderSearchOptions> HSOpts,
                            SourceManager &SourceMgr, DiagnosticsEngine &Diags,
-                           const LangOptions &LangOpts, 
+                           const LangOptions &LangOpts,
                            const TargetInfo *Target)
-  : HSOpts(HSOpts), FileMgr(SourceMgr.getFileManager()), FrameworkMap(64),
-    ModMap(SourceMgr, *Diags.getClient(), LangOpts, Target, *this)
-{
+    : HSOpts(HSOpts), Diags(Diags), FileMgr(SourceMgr.getFileManager()),
+      FrameworkMap(64), ModMap(SourceMgr, Diags, LangOpts, Target, *this) {
   AngledDirIdx = 0;
   SystemDirIdx = 0;
   NoCurDirSearch = false;
@@ -59,6 +58,8 @@
   NumIncluded = 0;
   NumMultiIncludeFileOptzn = 0;
   NumFrameworkLookups = NumSubFrameworkLookups = 0;
+
+  EnabledModules = LangOpts.Modules;
 }
 
 HeaderSearch::~HeaderSearch() {
@@ -164,8 +165,8 @@
 
     bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
     // Search for a module map file in this directory.
-    if (loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem)
-          == LMM_NewlyLoaded) {
+    if (loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem,
+                          /*IsFramework*/false) == LMM_NewlyLoaded) {
       // We just loaded a module map file; check whether the module is
       // available now.
       Module = ModMap.findModule(ModuleName);
@@ -178,7 +179,8 @@
     SmallString<128> NestedModuleMapDirName;
     NestedModuleMapDirName = SearchDirs[Idx].getDir()->getName();
     llvm::sys::path::append(NestedModuleMapDirName, ModuleName);
-    if (loadModuleMapFile(NestedModuleMapDirName, IsSystem) == LMM_NewlyLoaded){
+    if (loadModuleMapFile(NestedModuleMapDirName, IsSystem,
+                          /*IsFramework*/false) == LMM_NewlyLoaded){
       // If we just loaded a module map file, look for the module again.
       Module = ModMap.findModule(ModuleName);
       if (Module)
@@ -218,17 +220,46 @@
   return getHeaderMap()->getFileName();
 }
 
+static const FileEntry *
+getFileAndSuggestModule(HeaderSearch &HS, StringRef FileName,
+                        const DirectoryEntry *Dir, bool IsSystemHeaderDir,
+                        ModuleMap::KnownHeader *SuggestedModule) {
+  // If we have a module map that might map this header, load it and
+  // check whether we'll have a suggestion for a module.
+  HS.hasModuleMap(FileName, Dir, IsSystemHeaderDir);
+  if (SuggestedModule) {
+    const FileEntry *File = HS.getFileMgr().getFile(FileName,
+                                                    /*OpenFile=*/false);
+    if (File) {
+      // If there is a module that corresponds to this header, suggest it.
+      *SuggestedModule = HS.findModuleForHeader(File);
+
+      // FIXME: This appears to be a no-op. We loaded the module map for this
+      // directory at the start of this function.
+      if (!SuggestedModule->getModule() &&
+          HS.hasModuleMap(FileName, Dir, IsSystemHeaderDir))
+        *SuggestedModule = HS.findModuleForHeader(File);
+    }
+
+    return File;
+  }
+
+  return HS.getFileMgr().getFile(FileName, /*openFile=*/true);
+}
 
 /// LookupFile - Lookup the specified file in this search path, returning it
 /// if it exists or returning null if not.
 const FileEntry *DirectoryLookup::LookupFile(
-    StringRef Filename,
+    StringRef &Filename,
     HeaderSearch &HS,
     SmallVectorImpl<char> *SearchPath,
     SmallVectorImpl<char> *RelativePath,
     ModuleMap::KnownHeader *SuggestedModule,
-    bool &InUserSpecifiedSystemFramework) const {
+    bool &InUserSpecifiedSystemFramework,
+    bool &HasBeenMapped,
+    SmallVectorImpl<char> &MappedName) const {
   InUserSpecifiedSystemFramework = false;
+  HasBeenMapped = false;
 
   SmallString<1024> TmpDir;
   if (isNormalDir()) {
@@ -244,25 +275,10 @@
       RelativePath->clear();
       RelativePath->append(Filename.begin(), Filename.end());
     }
-    
-    // If we have a module map that might map this header, load it and
-    // check whether we'll have a suggestion for a module.
-    HS.hasModuleMap(TmpDir, getDir(), isSystemHeaderDirectory());
-    if (SuggestedModule) {
-      const FileEntry *File = HS.getFileMgr().getFile(TmpDir.str(),
-                                                      /*openFile=*/false);
-      if (!File)
-        return File;
-      
-      // If there is a module that corresponds to this header, suggest it.
-      *SuggestedModule = HS.findModuleForHeader(File);
-      if (!SuggestedModule->getModule() &&
-          HS.hasModuleMap(TmpDir, getDir(), isSystemHeaderDirectory()))
-        *SuggestedModule = HS.findModuleForHeader(File);
-      return File;
-    }
-    
-    return HS.getFileMgr().getFile(TmpDir.str(), /*openFile=*/true);
+
+    return getFileAndSuggestModule(HS, TmpDir.str(), getDir(),
+                                   isSystemHeaderDirectory(),
+                                   SuggestedModule);
   }
 
   if (isFramework())
@@ -270,8 +286,28 @@
                              SuggestedModule, InUserSpecifiedSystemFramework);
 
   assert(isHeaderMap() && "Unknown directory lookup");
-  const FileEntry * const Result = getHeaderMap()->LookupFile(
-      Filename, HS.getFileMgr());
+  const HeaderMap *HM = getHeaderMap();
+  SmallString<1024> Path;
+  StringRef Dest = HM->lookupFilename(Filename, Path);
+  if (Dest.empty())
+    return 0;
+
+  const FileEntry *Result;
+
+  // Check if the headermap maps the filename to a framework include
+  // ("Foo.h" -> "Foo/Foo.h"), in which case continue header lookup using the
+  // framework include.
+  if (llvm::sys::path::is_relative(Dest)) {
+    MappedName.clear();
+    MappedName.append(Dest.begin(), Dest.end());
+    Filename = StringRef(MappedName.begin(), MappedName.size());
+    HasBeenMapped = true;
+    Result = HM->LookupFile(Filename, HS.getFileMgr());
+
+  } else {
+    Result = HS.getFileMgr().getFile(Dest);
+  }
+
   if (Result) {
     if (SearchPath != NULL) {
       StringRef SearchPathRef(getName());
@@ -488,28 +524,43 @@
 // Header File Location.
 //===----------------------------------------------------------------------===//
 
+/// \brief Return true with a diagnostic if the file that MSVC would have found
+/// fails to match the one that Clang would have found with MSVC header search
+/// disabled.
+static bool checkMSVCHeaderSearch(DiagnosticsEngine &Diags,
+                                  const FileEntry *MSFE, const FileEntry *FE,
+                                  SourceLocation IncludeLoc) {
+  if (MSFE && FE != MSFE) {
+    Diags.Report(IncludeLoc, diag::ext_pp_include_search_ms) << MSFE->getName();
+    return true;
+  }
+  return false;
+}
+
+static const char *copyString(StringRef Str, llvm::BumpPtrAllocator &Alloc) {
+  assert(!Str.empty());
+  char *CopyStr = Alloc.Allocate<char>(Str.size()+1);
+  std::copy(Str.begin(), Str.end(), CopyStr);
+  CopyStr[Str.size()] = '\0';
+  return CopyStr;
+}
 
 /// LookupFile - Given a "foo" or \<foo> reference, look up the indicated file,
 /// return null on failure.  isAngled indicates whether the file reference is
-/// for system \#include's or not (i.e. using <> instead of "").  CurFileEnt, if
-/// non-null, indicates where the \#including file is, in case a relative search
-/// is needed.
+/// for system \#include's or not (i.e. using <> instead of ""). Includers, if
+/// non-empty, indicates where the \#including file(s) are, in case a relative
+/// search is needed. Microsoft mode will pass all \#including files.
 const FileEntry *HeaderSearch::LookupFile(
-    StringRef Filename,
-    bool isAngled,
-    const DirectoryLookup *FromDir,
-    const DirectoryLookup *&CurDir,
-    const FileEntry *CurFileEnt,
-    SmallVectorImpl<char> *SearchPath,
+    StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
+    const DirectoryLookup *FromDir, const DirectoryLookup *&CurDir,
+    ArrayRef<const FileEntry *> Includers, SmallVectorImpl<char> *SearchPath,
     SmallVectorImpl<char> *RelativePath,
-    ModuleMap::KnownHeader *SuggestedModule,
-    bool SkipCache)
-{
+    ModuleMap::KnownHeader *SuggestedModule, bool SkipCache) {
   if (!HSOpts->ModuleMapFiles.empty()) {
     // Preload all explicitly specified module map files. This enables modules
     // map files lying in a directory structure separate from the header files
     // that they describe. These cannot be loaded lazily upon encountering a
-    // header file, as there is no other knwon mapping from a header file to its
+    // header file, as there is no other known mapping from a header file to its
     // module map file.
     for (llvm::SetVector<std::string>::iterator
              I = HSOpts->ModuleMapFiles.begin(),
@@ -543,45 +594,80 @@
     return FileMgr.getFile(Filename, /*openFile=*/true);
   }
 
+  // This is the header that MSVC's header search would have found.
+  const FileEntry *MSFE = 0;
+  ModuleMap::KnownHeader MSSuggestedModule;
+
   // Unless disabled, check to see if the file is in the #includer's
-  // directory.  This has to be based on CurFileEnt, not CurDir, because
-  // CurFileEnt could be a #include of a subdirectory (#include "foo/bar.h") and
-  // a subsequent include of "baz.h" should resolve to "whatever/foo/baz.h".
+  // directory.  This cannot be based on CurDir, because each includer could be
+  // a #include of a subdirectory (#include "foo/bar.h") and a subsequent
+  // include of "baz.h" should resolve to "whatever/foo/baz.h".
   // This search is not done for <> headers.
-  if (CurFileEnt && !isAngled && !NoCurDirSearch) {
+  if (!Includers.empty() && !isAngled && !NoCurDirSearch) {
     SmallString<1024> TmpDir;
-    // Concatenate the requested file onto the directory.
-    // FIXME: Portability.  Filename concatenation should be in sys::Path.
-    TmpDir += CurFileEnt->getDir()->getName();
-    TmpDir.push_back('/');
-    TmpDir.append(Filename.begin(), Filename.end());
-    if (const FileEntry *FE = FileMgr.getFile(TmpDir.str(),/*openFile=*/true)) {
-      // Leave CurDir unset.
-      // This file is a system header or C++ unfriendly if the old file is.
-      //
-      // Note that we only use one of FromHFI/ToHFI at once, due to potential
-      // reallocation of the underlying vector potentially making the first
-      // reference binding dangling.
-      HeaderFileInfo &FromHFI = getFileInfo(CurFileEnt);
-      unsigned DirInfo = FromHFI.DirInfo;
-      bool IndexHeaderMapHeader = FromHFI.IndexHeaderMapHeader;
-      StringRef Framework = FromHFI.Framework;
+    for (ArrayRef<const FileEntry *>::iterator I = Includers.begin(),
+                                               E = Includers.end();
+         I != E; ++I) {
+      const FileEntry *Includer = *I;
+      // Concatenate the requested file onto the directory.
+      // FIXME: Portability.  Filename concatenation should be in sys::Path.
+      TmpDir = Includer->getDir()->getName();
+      TmpDir.push_back('/');
+      TmpDir.append(Filename.begin(), Filename.end());
 
-      HeaderFileInfo &ToHFI = getFileInfo(FE);
-      ToHFI.DirInfo = DirInfo;
-      ToHFI.IndexHeaderMapHeader = IndexHeaderMapHeader;
-      ToHFI.Framework = Framework;
+      // FIXME: We don't cache the result of getFileInfo across the call to
+      // getFileAndSuggestModule, because it's a reference to an element of
+      // a container that could be reallocated across this call.
+      bool IncluderIsSystemHeader =
+          getFileInfo(Includer).DirInfo != SrcMgr::C_User;
+      if (const FileEntry *FE =
+              getFileAndSuggestModule(*this, TmpDir.str(), Includer->getDir(),
+                                      IncluderIsSystemHeader,
+                                      SuggestedModule)) {
+        // Leave CurDir unset.
+        // This file is a system header or C++ unfriendly if the old file is.
+        //
+        // Note that we only use one of FromHFI/ToHFI at once, due to potential
+        // reallocation of the underlying vector potentially making the first
+        // reference binding dangling.
+        HeaderFileInfo &FromHFI = getFileInfo(Includer);
+        unsigned DirInfo = FromHFI.DirInfo;
+        bool IndexHeaderMapHeader = FromHFI.IndexHeaderMapHeader;
+        StringRef Framework = FromHFI.Framework;
 
-      if (SearchPath != NULL) {
-        StringRef SearchPathRef(CurFileEnt->getDir()->getName());
-        SearchPath->clear();
-        SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
+        HeaderFileInfo &ToHFI = getFileInfo(FE);
+        ToHFI.DirInfo = DirInfo;
+        ToHFI.IndexHeaderMapHeader = IndexHeaderMapHeader;
+        ToHFI.Framework = Framework;
+
+        if (SearchPath != NULL) {
+          StringRef SearchPathRef(Includer->getDir()->getName());
+          SearchPath->clear();
+          SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
+        }
+        if (RelativePath != NULL) {
+          RelativePath->clear();
+          RelativePath->append(Filename.begin(), Filename.end());
+        }
+        if (I == Includers.begin())
+          return FE;
+
+        // Otherwise, we found the path via MSVC header search rules.  If
+        // -Wmsvc-include is enabled, we have to keep searching to see if we
+        // would've found this header in -I or -isystem directories.
+        if (Diags.getDiagnosticLevel(diag::ext_pp_include_search_ms,
+                                     IncludeLoc) ==
+            DiagnosticsEngine::Ignored) {
+          return FE;
+        } else {
+          MSFE = FE;
+          if (SuggestedModule) {
+            MSSuggestedModule = *SuggestedModule;
+            *SuggestedModule = ModuleMap::KnownHeader();
+          }
+          break;
+        }
       }
-      if (RelativePath != NULL) {
-        RelativePath->clear();
-        RelativePath->append(Filename.begin(), Filename.end());
-      }
-      return FE;
     }
   }
 
@@ -599,28 +685,38 @@
   // multiply included, and the "pragma once" optimization prevents them from
   // being relex/pp'd, but they would still have to search through a
   // (potentially huge) series of SearchDirs to find it.
-  std::pair<unsigned, unsigned> &CacheLookup =
+  LookupFileCacheInfo &CacheLookup =
     LookupFileCache.GetOrCreateValue(Filename).getValue();
 
   // If the entry has been previously looked up, the first value will be
   // non-zero.  If the value is equal to i (the start point of our search), then
   // this is a matching hit.
-  if (!SkipCache && CacheLookup.first == i+1) {
+  if (!SkipCache && CacheLookup.StartIdx == i+1) {
     // Skip querying potentially lots of directories for this lookup.
-    i = CacheLookup.second;
+    i = CacheLookup.HitIdx;
+    if (CacheLookup.MappedName)
+      Filename = CacheLookup.MappedName;
   } else {
     // Otherwise, this is the first query, or the previous query didn't match
     // our search start.  We will fill in our found location below, so prime the
     // start point value.
-    CacheLookup.first = i+1;
+    CacheLookup.reset(/*StartIdx=*/i+1);
   }
 
+  SmallString<64> MappedName;
+
   // Check each directory in sequence to see if it contains this file.
   for (; i != SearchDirs.size(); ++i) {
     bool InUserSpecifiedSystemFramework = false;
+    bool HasBeenMapped = false;
     const FileEntry *FE =
       SearchDirs[i].LookupFile(Filename, *this, SearchPath, RelativePath,
-                               SuggestedModule, InUserSpecifiedSystemFramework);
+                               SuggestedModule, InUserSpecifiedSystemFramework,
+                               HasBeenMapped, MappedName);
+    if (HasBeenMapped) {
+      CacheLookup.MappedName =
+          copyString(Filename, LookupFileCache.getAllocator());
+    }
     if (!FE) continue;
 
     CurDir = &SearchDirs[i];
@@ -655,9 +751,15 @@
                                                          SlashPos));
       }
     }
-    
+
+    if (checkMSVCHeaderSearch(Diags, MSFE, FE, IncludeLoc)) {
+      if (SuggestedModule)
+        *SuggestedModule = MSSuggestedModule;
+      return MSFE;
+    }
+
     // Remember this location for the next lookup we do.
-    CacheLookup.second = i;
+    CacheLookup.HitIdx = i;
     return FE;
   }
 
@@ -665,28 +767,42 @@
   // a header in a framework that is currently being built, and we couldn't
   // resolve "foo.h" any other way, change the include to <Foo/foo.h>, where
   // "Foo" is the name of the framework in which the including header was found.
-  if (CurFileEnt && !isAngled && Filename.find('/') == StringRef::npos) {
-    HeaderFileInfo &IncludingHFI = getFileInfo(CurFileEnt);
+  if (!Includers.empty() && !isAngled &&
+      Filename.find('/') == StringRef::npos) {
+    HeaderFileInfo &IncludingHFI = getFileInfo(Includers.front());
     if (IncludingHFI.IndexHeaderMapHeader) {
       SmallString<128> ScratchFilename;
       ScratchFilename += IncludingHFI.Framework;
       ScratchFilename += '/';
       ScratchFilename += Filename;
-      
-      const FileEntry *Result = LookupFile(ScratchFilename, /*isAngled=*/true,
-                                           FromDir, CurDir, CurFileEnt, 
-                                           SearchPath, RelativePath,
-                                           SuggestedModule);
-      std::pair<unsigned, unsigned> &CacheLookup 
+
+      const FileEntry *FE = LookupFile(
+          ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir, CurDir,
+          Includers.front(), SearchPath, RelativePath, SuggestedModule);
+
+      if (checkMSVCHeaderSearch(Diags, MSFE, FE, IncludeLoc)) {
+        if (SuggestedModule)
+          *SuggestedModule = MSSuggestedModule;
+        return MSFE;
+      }
+
+      LookupFileCacheInfo &CacheLookup 
         = LookupFileCache.GetOrCreateValue(Filename).getValue();
-      CacheLookup.second
-        = LookupFileCache.GetOrCreateValue(ScratchFilename).getValue().second;
-      return Result;
+      CacheLookup.HitIdx
+        = LookupFileCache.GetOrCreateValue(ScratchFilename).getValue().HitIdx;
+      // FIXME: SuggestedModule.
+      return FE;
     }
   }
 
+  if (checkMSVCHeaderSearch(Diags, MSFE, 0, IncludeLoc)) {
+    if (SuggestedModule)
+      *SuggestedModule = MSSuggestedModule;
+    return MSFE;
+  }
+
   // Otherwise, didn't find it. Remember we didn't find this.
-  CacheLookup.second = SearchDirs.size();
+  CacheLookup.HitIdx = SearchDirs.size();
   return 0;
 }
 
@@ -873,9 +989,21 @@
   HeaderFileInfo &HFI = FileInfo[FE->getUID()];
   if (ExternalSource && !HFI.Resolved)
     mergeHeaderFileInfo(HFI, ExternalSource->GetHeaderFileInfo(FE));
+  HFI.IsValid = 1;
   return HFI;
 }
 
+bool HeaderSearch::tryGetFileInfo(const FileEntry *FE, HeaderFileInfo &Result) const {
+  if (FE->getUID() >= FileInfo.size())
+    return false;
+  const HeaderFileInfo &HFI = FileInfo[FE->getUID()];
+  if (HFI.IsValid) {
+    Result = HFI;
+    return true;
+  }
+  return false;
+}
+
 bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
   // Check if we've ever seen this file as a header.
   if (File->getUID() >= FileInfo.size())
@@ -953,6 +1081,9 @@
 bool HeaderSearch::hasModuleMap(StringRef FileName, 
                                 const DirectoryEntry *Root,
                                 bool IsSystem) {
+  if (!enabledModules())
+    return false;
+
   SmallVector<const DirectoryEntry *, 2> FixUpDirectories;
   
   StringRef DirName = FileName;
@@ -967,8 +1098,8 @@
     if (!Dir)
       return false;
 
-    // Try to load the "module.map" file in this directory.
-    switch (loadModuleMapFile(Dir, IsSystem)) {
+    // Try to load the module map file in this directory.
+    switch (loadModuleMapFile(Dir, IsSystem, /*IsFramework*/false)) {
     case LMM_NewlyLoaded:
     case LMM_AlreadyLoaded:
       // Success. All of the directories we stepped through inherit this module
@@ -1002,36 +1133,85 @@
   return ModMap.findModuleForHeader(File);
 }
 
-bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem) {
-  const DirectoryEntry *Dir = File->getDir();
-  
-  llvm::DenseMap<const DirectoryEntry *, bool>::iterator KnownDir
-    = DirectoryHasModuleMap.find(Dir);
-  if (KnownDir != DirectoryHasModuleMap.end())
-    return !KnownDir->second;
-  
-  bool Result = ModMap.parseModuleMapFile(File, IsSystem);
-  if (!Result && llvm::sys::path::filename(File->getName()) == "module.map") {
-    // If the file we loaded was a module.map, look for the corresponding
-    // module_private.map.
-    SmallString<128> PrivateFilename(Dir->getName());
+static const FileEntry *getPrivateModuleMap(StringRef ModuleMapPath,
+                                            const DirectoryEntry *Directory,
+                                            FileManager &FileMgr) {
+  StringRef Filename = llvm::sys::path::filename(ModuleMapPath);
+  SmallString<128>  PrivateFilename(Directory->getName());
+  if (Filename == "module.map")
     llvm::sys::path::append(PrivateFilename, "module_private.map");
-    if (const FileEntry *PrivateFile = FileMgr.getFile(PrivateFilename))
-      Result = ModMap.parseModuleMapFile(PrivateFile, IsSystem);
-  }
-  
-  DirectoryHasModuleMap[Dir] = !Result;  
-  return Result;
+  else if (Filename == "module.modulemap")
+    llvm::sys::path::append(PrivateFilename, "module.private.modulemap");
+  else
+    return nullptr;
+  return FileMgr.getFile(PrivateFilename);
 }
 
-Module *HeaderSearch::loadFrameworkModule(StringRef Name, 
+bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem) {
+  switch (loadModuleMapFileImpl(File, IsSystem)) {
+  case LMM_AlreadyLoaded:
+  case LMM_NewlyLoaded:
+    return false;
+  case LMM_NoDirectory:
+  case LMM_InvalidModuleMap:
+    return true;
+  }
+  llvm_unreachable("Unknown load module map result");
+}
+
+HeaderSearch::LoadModuleMapResult
+HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem) {
+  assert(File && "expected FileEntry");
+
+  const DirectoryEntry *Dir = File->getDir();
+  auto KnownDir = DirectoryHasModuleMap.find(Dir);
+  if (KnownDir != DirectoryHasModuleMap.end())
+    return KnownDir->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
+
+  if (ModMap.parseModuleMapFile(File, IsSystem)) {
+    DirectoryHasModuleMap[Dir] = false;
+    return LMM_InvalidModuleMap;
+  }
+
+  // Try to load a corresponding private module map.
+  if (const FileEntry *PMMFile =
+        getPrivateModuleMap(File->getName(), Dir, FileMgr)) {
+    if (ModMap.parseModuleMapFile(PMMFile, IsSystem)) {
+      DirectoryHasModuleMap[Dir] = false;
+      return LMM_InvalidModuleMap;
+    }
+  }
+
+  // This directory has a module map.
+  DirectoryHasModuleMap[Dir] = true;
+  return LMM_NewlyLoaded;
+}
+
+const FileEntry *
+HeaderSearch::lookupModuleMapFile(const DirectoryEntry *Dir, bool IsFramework) {
+  // For frameworks, the preferred spelling is Modules/module.modulemap, but
+  // module.map at the framework root is also accepted.
+  SmallString<128> ModuleMapFileName(Dir->getName());
+  if (IsFramework)
+    llvm::sys::path::append(ModuleMapFileName, "Modules");
+  llvm::sys::path::append(ModuleMapFileName, "module.modulemap");
+  if (const FileEntry *F = FileMgr.getFile(ModuleMapFileName))
+    return F;
+
+  // Continue to allow module.map
+  ModuleMapFileName = Dir->getName();
+  llvm::sys::path::append(ModuleMapFileName, "module.map");
+  return FileMgr.getFile(ModuleMapFileName);
+}
+
+Module *HeaderSearch::loadFrameworkModule(StringRef Name,
                                           const DirectoryEntry *Dir,
                                           bool IsSystem) {
   if (Module *Module = ModMap.findModule(Name))
     return Module;
   
   // Try to load a module map file.
-  switch (loadModuleMapFile(Dir, IsSystem)) {
+  switch (loadModuleMapFile(Dir, IsSystem, /*IsFramework*/true)) {
   case LMM_InvalidModuleMap:
     break;
     
@@ -1043,81 +1223,37 @@
     return ModMap.findModule(Name);
   }
 
-  // Figure out the top-level framework directory and the submodule path from
-  // that top-level framework to the requested framework.
-  SmallVector<std::string, 2> SubmodulePath;
-  SubmodulePath.push_back(Name);
-  const DirectoryEntry *TopFrameworkDir
-    = ::getTopFrameworkDir(FileMgr, Dir->getName(), SubmodulePath);
 
-
-  // Try to infer a module map from the top-level framework directory.
-  Module *Result = ModMap.inferFrameworkModule(SubmodulePath.back(), 
-                                               TopFrameworkDir,
-                                               IsSystem,
-                                               /*Parent=*/0);
-  if (!Result)
-    return 0;
-  
-  // Follow the submodule path to find the requested (sub)framework module
-  // within the top-level framework module.
-  SubmodulePath.pop_back();
-  while (!SubmodulePath.empty() && Result) {
-    Result = ModMap.lookupModuleQualified(SubmodulePath.back(), Result);
-    SubmodulePath.pop_back();
-  }
-  return Result;
+  // Try to infer a module map from the framework directory.
+  return ModMap.inferFrameworkModule(Name, Dir, IsSystem, /*Parent=*/0);
 }
 
 
 HeaderSearch::LoadModuleMapResult 
-HeaderSearch::loadModuleMapFile(StringRef DirName, bool IsSystem) {
+HeaderSearch::loadModuleMapFile(StringRef DirName, bool IsSystem,
+                                bool IsFramework) {
   if (const DirectoryEntry *Dir = FileMgr.getDirectory(DirName))
-    return loadModuleMapFile(Dir, IsSystem);
+    return loadModuleMapFile(Dir, IsSystem, IsFramework);
   
   return LMM_NoDirectory;
 }
 
 HeaderSearch::LoadModuleMapResult 
-HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir, bool IsSystem) {
-  llvm::DenseMap<const DirectoryEntry *, bool>::iterator KnownDir
-    = DirectoryHasModuleMap.find(Dir);
+HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir, bool IsSystem,
+                                bool IsFramework) {
+  auto KnownDir = DirectoryHasModuleMap.find(Dir);
   if (KnownDir != DirectoryHasModuleMap.end())
     return KnownDir->second? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
-  
-  SmallString<128> ModuleMapFileName;
-  ModuleMapFileName += Dir->getName();
-  unsigned ModuleMapDirNameLen = ModuleMapFileName.size();
-  llvm::sys::path::append(ModuleMapFileName, "module.map");
-  if (const FileEntry *ModuleMapFile = FileMgr.getFile(ModuleMapFileName)) {
-    // We have found a module map file. Try to parse it.
-    if (ModMap.parseModuleMapFile(ModuleMapFile, IsSystem)) {
-      // No suitable module map.
-      DirectoryHasModuleMap[Dir] = false;
-      return LMM_InvalidModuleMap;
-    }
 
-    // This directory has a module map.
-    DirectoryHasModuleMap[Dir] = true;
-    
-    // Check whether there is a private module map that we need to load as well.
-    ModuleMapFileName.erase(ModuleMapFileName.begin() + ModuleMapDirNameLen,
-                            ModuleMapFileName.end());
-    llvm::sys::path::append(ModuleMapFileName, "module_private.map");
-    if (const FileEntry *PrivateModuleMapFile
-                                        = FileMgr.getFile(ModuleMapFileName)) {
-      if (ModMap.parseModuleMapFile(PrivateModuleMapFile, IsSystem)) {
-        // No suitable module map.
-        DirectoryHasModuleMap[Dir] = false;
-        return LMM_InvalidModuleMap;
-      }      
-    }
-    
-    return LMM_NewlyLoaded;
+  if (const FileEntry *ModuleMapFile = lookupModuleMapFile(Dir, IsFramework)) {
+    LoadModuleMapResult Result = loadModuleMapFileImpl(ModuleMapFile, IsSystem);
+    // Add Dir explicitly in case ModuleMapFile is in a subdirectory.
+    // E.g. Foo.framework/Modules/module.modulemap
+    //      ^Dir                  ^ModuleMapFile
+    if (Result == LMM_NewlyLoaded)
+      DirectoryHasModuleMap[Dir] = true;
+    return Result;
   }
-  
-  // No suitable module map.
-  DirectoryHasModuleMap[Dir] = false;
   return LMM_InvalidModuleMap;
 }
 
@@ -1155,7 +1291,7 @@
       continue;
     
     // Try to load a module map file for the search directory.
-    loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem);
+    loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem, /*IsFramework*/false);
     
     // Try to load module map files for immediate subdirectories of this search
     // directory.
@@ -1180,7 +1316,8 @@
 
     // Try to load a module map file for the search directory.
     loadModuleMapFile(SearchDirs[Idx].getDir(),
-                      SearchDirs[Idx].isSystemHeaderDirectory());
+                      SearchDirs[Idx].isSystemHeaderDirectory(),
+                      SearchDirs[Idx].isFramework());
   }
 }
 
@@ -1193,7 +1330,8 @@
   llvm::sys::path::native(SearchDir.getDir()->getName(), DirNative);
   for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd;
        Dir != DirEnd && !EC; Dir.increment(EC)) {
-    loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory());
+    loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory(),
+                      SearchDir.isFramework());
   }
 
   SearchDir.setSearchedAllModuleMaps(true);
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index c071455..86d01d6 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -25,6 +25,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Lex/Lexer.h"
+#include "UnicodeCharSets.h"
 #include "clang/Basic/CharInfo.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Lex/CodeCompletionHandler.h"
@@ -37,7 +38,6 @@
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/ConvertUTF.h"
 #include "llvm/Support/MemoryBuffer.h"
-#include "UnicodeCharSets.h"
 #include <cstring>
 using namespace clang;
 
@@ -861,7 +861,7 @@
   // Break down the source locations.
   FileID FID;
   unsigned BeginOffs;
-  llvm::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
+  std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
   if (FID.isInvalid())
     return CharSourceRange();
 
@@ -1445,7 +1445,50 @@
         << Range;
     }
   }
- }
+}
+
+bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
+                                    Token &Result) {
+  const char *UCNPtr = CurPtr + Size;
+  uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/0);
+  if (CodePoint == 0 || !isAllowedIDChar(CodePoint, LangOpts))
+    return false;
+
+  if (!isLexingRawMode())
+    maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
+                              makeCharRange(*this, CurPtr, UCNPtr),
+                              /*IsFirst=*/false);
+
+  Result.setFlag(Token::HasUCN);
+  if ((UCNPtr - CurPtr ==  6 && CurPtr[1] == 'u') ||
+      (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U'))
+    CurPtr = UCNPtr;
+  else
+    while (CurPtr != UCNPtr)
+      (void)getAndAdvanceChar(CurPtr, Result);
+  return true;
+}
+
+bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) {
+  const char *UnicodePtr = CurPtr;
+  UTF32 CodePoint;
+  ConversionResult Result =
+      llvm::convertUTF8Sequence((const UTF8 **)&UnicodePtr,
+                                (const UTF8 *)BufferEnd,
+                                &CodePoint,
+                                strictConversion);
+  if (Result != conversionOK ||
+      !isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts))
+    return false;
+
+  if (!isLexingRawMode())
+    maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
+                              makeCharRange(*this, CurPtr, UnicodePtr),
+                              /*IsFirst=*/false);
+
+  CurPtr = UnicodePtr;
+  return true;
+}
 
 bool Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
   // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
@@ -1500,47 +1543,10 @@
       C = getCharAndSize(CurPtr, Size);
       continue;
 
-    } else if (C == '\\') {
-      const char *UCNPtr = CurPtr + Size;
-      uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/0);
-      if (CodePoint == 0 || !isAllowedIDChar(CodePoint, LangOpts))
-        goto FinishIdentifier;
-
-      if (!isLexingRawMode()) {
-        maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
-                                  makeCharRange(*this, CurPtr, UCNPtr),
-                                  /*IsFirst=*/false);
-      }
-
-      Result.setFlag(Token::HasUCN);
-      if ((UCNPtr - CurPtr ==  6 && CurPtr[1] == 'u') ||
-          (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U'))
-        CurPtr = UCNPtr;
-      else
-        while (CurPtr != UCNPtr)
-          (void)getAndAdvanceChar(CurPtr, Result);
-
+    } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {
       C = getCharAndSize(CurPtr, Size);
       continue;
-    } else if (!isASCII(C)) {
-      const char *UnicodePtr = CurPtr;
-      UTF32 CodePoint;
-      ConversionResult Result =
-          llvm::convertUTF8Sequence((const UTF8 **)&UnicodePtr,
-                                    (const UTF8 *)BufferEnd,
-                                    &CodePoint,
-                                    strictConversion);
-      if (Result != conversionOK ||
-          !isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts))
-        goto FinishIdentifier;
-
-      if (!isLexingRawMode()) {
-        maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
-                                  makeCharRange(*this, CurPtr, UnicodePtr),
-                                  /*IsFirst=*/false);
-      }
-
-      CurPtr = UnicodePtr;
+    } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {
       C = getCharAndSize(CurPtr, Size);
       continue;
     } else if (!isIdentifierBody(C)) {
@@ -1576,7 +1582,7 @@
   unsigned Size;
   char C = getCharAndSize(CurPtr, Size);
   char PrevCh = 0;
-  while (isPreprocessingNumberBody(C)) { // FIXME: UCNs in ud-suffix.
+  while (isPreprocessingNumberBody(C)) {
     CurPtr = ConsumeChar(CurPtr, Size, Result);
     PrevCh = C;
     C = getCharAndSize(CurPtr, Size);
@@ -1614,10 +1620,17 @@
       if (!isLexingRawMode())
         Diag(CurPtr, diag::warn_cxx11_compat_digit_separator);
       CurPtr = ConsumeChar(CurPtr, Size, Result);
+      CurPtr = ConsumeChar(CurPtr, NextSize, Result);
       return LexNumericConstant(Result, CurPtr);
     }
   }
 
+  // If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue.
+  if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
+    return LexNumericConstant(Result, CurPtr);
+  if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
+    return LexNumericConstant(Result, CurPtr);
+
   // Update the location of token as well as BufferPtr.
   const char *TokStart = BufferPtr;
   FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
@@ -1631,23 +1644,35 @@
                                bool IsStringLiteral) {
   assert(getLangOpts().CPlusPlus);
 
-  // Maximally munch an identifier. FIXME: UCNs.
+  // Maximally munch an identifier.
   unsigned Size;
   char C = getCharAndSize(CurPtr, Size);
-  if (isIdentifierHead(C)) {
-    if (!getLangOpts().CPlusPlus11) {
-      if (!isLexingRawMode())
-        Diag(CurPtr,
-             C == '_' ? diag::warn_cxx11_compat_user_defined_literal
-                      : diag::warn_cxx11_compat_reserved_user_defined_literal)
-          << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
-      return CurPtr;
-    }
+  bool Consumed = false;
 
-    // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
-    // that does not start with an underscore is ill-formed. As a conforming
-    // extension, we treat all such suffixes as if they had whitespace before
-    // them.
+  if (!isIdentifierHead(C)) {
+    if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
+      Consumed = true;
+    else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
+      Consumed = true;
+    else
+      return CurPtr;
+  }
+
+  if (!getLangOpts().CPlusPlus11) {
+    if (!isLexingRawMode())
+      Diag(CurPtr,
+           C == '_' ? diag::warn_cxx11_compat_user_defined_literal
+                    : diag::warn_cxx11_compat_reserved_user_defined_literal)
+        << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+    return CurPtr;
+  }
+
+  // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
+  // that does not start with an underscore is ill-formed. As a conforming
+  // extension, we treat all such suffixes as if they had whitespace before
+  // them. We assume a suffix beginning with a UCN or UTF-8 character is more
+  // likely to be a ud-suffix than a macro, however, and accept that.
+  if (!Consumed) {
     bool IsUDSuffix = false;
     if (C == '_')
       IsUDSuffix = true;
@@ -1682,19 +1707,25 @@
 
     if (!IsUDSuffix) {
       if (!isLexingRawMode())
-        Diag(CurPtr, getLangOpts().MicrosoftMode ?
-            diag::ext_ms_reserved_user_defined_literal :
-            diag::ext_reserved_user_defined_literal)
+        Diag(CurPtr, getLangOpts().MSVCCompat
+                         ? diag::ext_ms_reserved_user_defined_literal
+                         : diag::ext_reserved_user_defined_literal)
           << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
       return CurPtr;
     }
 
-    Result.setFlag(Token::HasUDSuffix);
-    do {
-      CurPtr = ConsumeChar(CurPtr, Size, Result);
-      C = getCharAndSize(CurPtr, Size);
-    } while (isIdentifierBody(C));
+    CurPtr = ConsumeChar(CurPtr, Size, Result);
   }
+
+  Result.setFlag(Token::HasUDSuffix);
+  while (true) {
+    C = getCharAndSize(CurPtr, Size);
+    if (isIdentifierBody(C)) { CurPtr = ConsumeChar(CurPtr, Size, Result); }
+    else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {}
+    else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {}
+    else break;
+  }
+
   return CurPtr;
 }
 
@@ -2023,8 +2054,11 @@
     if (C != 0) {
       // We found a newline, see if it's escaped.
       const char *EscapePtr = CurPtr-1;
-      while (isHorizontalWhitespace(*EscapePtr)) // Skip whitespace.
+      bool HasSpace = false;
+      while (isHorizontalWhitespace(*EscapePtr)) { // Skip whitespace.
         --EscapePtr;
+        HasSpace = true;
+      }
 
       if (*EscapePtr == '\\') // Escaped newline.
         CurPtr = EscapePtr;
@@ -2033,6 +2067,10 @@
         CurPtr = EscapePtr-2;
       else
         break; // This is a newline, we're done.
+
+      // If there was space between the backslash and newline, warn about it.
+      if (HasSpace && !isLexingRawMode())
+        Diag(EscapePtr, diag::backslash_newline_space);
     }
 
     // Otherwise, this is a hard case.  Fall back on getAndAdvanceChar to
@@ -2213,6 +2251,8 @@
 
 #ifdef __SSE2__
 #include <emmintrin.h>
+#elif __AVX2__
+#include <avx2intrin.h>
 #elif __ALTIVEC__
 #include <altivec.h>
 #undef bool
@@ -2268,17 +2308,33 @@
         // If there is a code-completion point avoid the fast scan because it
         // doesn't check for '\0'.
         !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
+#ifndef __AVX2__
       // While not aligned to a 16-byte boundary.
       while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
         C = *CurPtr++;
+#endif
 
       if (C == '/') goto FoundSlash;
 
 #ifdef __SSE2__
-      __m128i Slashes = _mm_set1_epi8('/');
-      while (CurPtr+16 <= BufferEnd) {
-        int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
-                                    Slashes));
+#define VECTOR_TYPE             __m128i
+#define SET1_EPI8(v)            _mm_set1_epi8(v)
+#define CMPEQ_EPI8(v1,v2)       _mm_cmpeq_epi8(v1,v2)
+#define MOVEMASK_EPI8(v)        _mm_movemask_epi8(v)
+#define STEP                    16
+#elif __AVX2__
+#define VECTOR_TYPE             __m256i
+#define SET1_EPI8(v)            _mm256_set1_epi8(v)
+#define CMPEQ_EPI8(v1,v2)       _mm256_cmpeq_epi8(v1,v2)
+#define MOVEMASK_EPI8(v)        _mm256_movemask_epi8(v)
+#define STEP                    32
+#endif
+
+#if defined(__SSE2__) || defined(__AVX2__)
+      VECTOR_TYPE Slashes = SET1_EPI8('/');
+      while (CurPtr+STEP <= BufferEnd) {
+        int cmp = MOVEMASK_EPI8(CMPEQ_EPI8(*(const VECTOR_TYPE*)CurPtr,
+                                Slashes));
         if (cmp != 0) {
           // Adjust the pointer to point directly after the first slash. It's
           // not necessary to set C here, it will be overwritten at the end of
@@ -2286,8 +2342,13 @@
           CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
           goto FoundSlash;
         }
-        CurPtr += 16;
+        CurPtr += STEP;
       }
+#undef VECTOR_TYPE
+#undef SET1_EPI8
+#undef CMPEQ_EPI8
+#undef MOVEMASK_EPI8
+#undef STEP
 #elif __ALTIVEC__
       __vector unsigned char Slashes = {
         '/', '/', '/', '/',  '/', '/', '/', '/',
@@ -2460,7 +2521,8 @@
     FormTokenWithChars(Result, CurPtr, tok::eod);
 
     // Restore comment saving mode, in case it was disabled for directive.
-    resetExtendedTokenMode();
+    if (PP)
+      resetExtendedTokenMode();
     return true;  // Have a token.
   }
  
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index 17c6bb3..ddfa10c 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -212,6 +212,48 @@
   return ResultChar;
 }
 
+static void appendCodePoint(unsigned Codepoint,
+                            llvm::SmallVectorImpl<char> &Str) {
+  char ResultBuf[4];
+  char *ResultPtr = ResultBuf;
+  bool Res = llvm::ConvertCodePointToUTF8(Codepoint, ResultPtr);
+  (void)Res;
+  assert(Res && "Unexpected conversion failure");
+  Str.append(ResultBuf, ResultPtr);
+}
+
+void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
+  for (StringRef::iterator I = Input.begin(), E = Input.end(); I != E; ++I) {
+    if (*I != '\\') {
+      Buf.push_back(*I);
+      continue;
+    }
+
+    ++I;
+    assert(*I == 'u' || *I == 'U');
+
+    unsigned NumHexDigits;
+    if (*I == 'u')
+      NumHexDigits = 4;
+    else
+      NumHexDigits = 8;
+
+    assert(I + NumHexDigits <= E);
+
+    uint32_t CodePoint = 0;
+    for (++I; NumHexDigits != 0; ++I, --NumHexDigits) {
+      unsigned Value = llvm::hexDigitValue(*I);
+      assert(Value != -1U);
+
+      CodePoint <<= 4;
+      CodePoint += Value;
+    }
+
+    appendCodePoint(CodePoint, Buf);
+    --I;
+  }
+}
+
 /// ProcessUCNEscape - Read the Universal Character Name, check constraints and
 /// return the UTF32.
 static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
@@ -625,8 +667,9 @@
   }
 
   if (s != ThisTokEnd) {
-    if (isValidUDSuffix(PP.getLangOpts(),
-                        StringRef(SuffixBegin, ThisTokEnd - SuffixBegin))) {
+    // FIXME: Don't bother expanding UCNs if !tok.hasUCN().
+    expandUCNs(UDSuffixBuf, StringRef(SuffixBegin, ThisTokEnd - SuffixBegin));
+    if (isValidUDSuffix(PP.getLangOpts(), UDSuffixBuf)) {
       // Any suffix pieces we might have parsed are actually part of the
       // ud-suffix.
       isLong = false;
@@ -992,7 +1035,8 @@
     do {
       --end;
     } while (end[-1] != '\'');
-    UDSuffixBuf.assign(end, UDSuffixEnd);
+    // FIXME: Don't bother with this if !tok.hasUCN().
+    expandUCNs(UDSuffixBuf, StringRef(end, UDSuffixEnd - end));
     UDSuffixOffset = end - TokBegin;
   }
 
@@ -1311,23 +1355,34 @@
       StringRef UDSuffix(ThisTokEnd, UDSuffixEnd - ThisTokEnd);
 
       if (UDSuffixBuf.empty()) {
-        UDSuffixBuf.assign(UDSuffix);
+        if (StringToks[i].hasUCN())
+          expandUCNs(UDSuffixBuf, UDSuffix);
+        else
+          UDSuffixBuf.assign(UDSuffix);
         UDSuffixToken = i;
         UDSuffixOffset = ThisTokEnd - ThisTokBuf;
         UDSuffixTokLoc = StringToks[i].getLocation();
-      } else if (!UDSuffixBuf.equals(UDSuffix)) {
+      } else {
+        SmallString<32> ExpandedUDSuffix;
+        if (StringToks[i].hasUCN()) {
+          expandUCNs(ExpandedUDSuffix, UDSuffix);
+          UDSuffix = ExpandedUDSuffix;
+        }
+
         // C++11 [lex.ext]p8: At the end of phase 6, if a string literal is the
         // result of a concatenation involving at least one user-defined-string-
         // literal, all the participating user-defined-string-literals shall
         // have the same ud-suffix.
-        if (Diags) {
-          SourceLocation TokLoc = StringToks[i].getLocation();
-          Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
-            << UDSuffixBuf << UDSuffix
-            << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc)
-            << SourceRange(TokLoc, TokLoc);
+        if (UDSuffixBuf != UDSuffix) {
+          if (Diags) {
+            SourceLocation TokLoc = StringToks[i].getLocation();
+            Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
+              << UDSuffixBuf << UDSuffix
+              << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc)
+              << SourceRange(TokLoc, TokLoc);
+          }
+          hadError = true;
         }
-        hadError = true;
       }
     }
 
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index b61ff71..0325a80 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -125,14 +125,11 @@
   return true;
 }
 
-MacroDirective::DefInfo MacroDirective::getDefinition(bool AllowHidden) {
+MacroDirective::DefInfo MacroDirective::getDefinition() {
   MacroDirective *MD = this;
   SourceLocation UndefLoc;
   Optional<bool> isPublic;
   for (; MD; MD = MD->getPrevious()) {
-    if (!AllowHidden && MD->isHidden())
-      continue;
-
     if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD))
       return DefInfo(DefMD, UndefLoc,
                      !isPublic.hasValue() || isPublic.getValue());
@@ -147,7 +144,7 @@
       isPublic = VisMD->isPublic();
   }
 
-  return DefInfo();
+  return DefInfo(0, UndefLoc, !isPublic.hasValue() || isPublic.getValue());
 }
 
 const MacroDirective::DefInfo
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index f4dfa12..e78806d 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -59,7 +59,7 @@
   Module *Context = lookupModuleUnqualified(Id[0].first, Mod);
   if (!Context) {
     if (Complain)
-      Diags->Report(Id[0].second, diag::err_mmap_missing_module_unqualified)
+      Diags.Report(Id[0].second, diag::err_mmap_missing_module_unqualified)
       << Id[0].first << Mod->getFullModuleName();
 
     return 0;
@@ -70,7 +70,7 @@
     Module *Sub = lookupModuleQualified(Id[I].first, Context);
     if (!Sub) {
       if (Complain)
-        Diags->Report(Id[I].second, diag::err_mmap_missing_module_qualified)
+        Diags.Report(Id[I].second, diag::err_mmap_missing_module_qualified)
         << Id[I].first << Context->getFullModuleName()
         << SourceRange(Id[0].second, Id[I-1].second);
 
@@ -83,19 +83,12 @@
   return Context;
 }
 
-ModuleMap::ModuleMap(SourceManager &SourceMgr, DiagnosticConsumer &DC,
+ModuleMap::ModuleMap(SourceManager &SourceMgr, DiagnosticsEngine &Diags,
                      const LangOptions &LangOpts, const TargetInfo *Target,
                      HeaderSearch &HeaderInfo)
-    : SourceMgr(SourceMgr), LangOpts(LangOpts), Target(Target),
+    : SourceMgr(SourceMgr), Diags(Diags), LangOpts(LangOpts), Target(Target),
       HeaderInfo(HeaderInfo), BuiltinIncludeDir(0), CompilingModule(0),
-      SourceModule(0) {
-  IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(new DiagnosticIDs);
-  Diags = IntrusiveRefCntPtr<DiagnosticsEngine>(
-            new DiagnosticsEngine(DiagIDs, new DiagnosticOptions));
-  Diags->setClient(new ForwardingDiagnosticConsumer(DC),
-                   /*ShouldOwnClient=*/true);
-  Diags->setSourceManager(&SourceMgr);
-}
+      SourceModule(0) {}
 
 ModuleMap::~ModuleMap() {
   for (llvm::StringMap<Module *>::iterator I = Modules.begin(), 
@@ -166,10 +159,115 @@
            .Default(false);
 }
 
+ModuleMap::HeadersMap::iterator
+ModuleMap::findKnownHeader(const FileEntry *File) {
+  HeadersMap::iterator Known = Headers.find(File);
+  if (Known == Headers.end() && File->getDir() == BuiltinIncludeDir &&
+      isBuiltinHeader(llvm::sys::path::filename(File->getName()))) {
+    HeaderInfo.loadTopLevelSystemModules();
+    return Headers.find(File);
+  }
+  return Known;
+}
+
+// Returns 'true' if 'RequestingModule directly uses 'RequestedModule'.
+static bool directlyUses(const Module *RequestingModule,
+                         const Module *RequestedModule) {
+  return std::find(RequestingModule->DirectUses.begin(),
+                   RequestingModule->DirectUses.end(),
+                   RequestedModule) != RequestingModule->DirectUses.end();
+}
+
+static bool violatesPrivateInclude(Module *RequestingModule,
+                                   const FileEntry *IncFileEnt,
+                                   ModuleMap::ModuleHeaderRole Role,
+                                   Module *RequestedModule) {
+  #ifndef NDEBUG
+  // Check for consistency between the module header role
+  // as obtained from the lookup and as obtained from the module.
+  // This check is not cheap, so enable it only for debugging.
+  SmallVectorImpl<const FileEntry *> &PvtHdrs
+      = RequestedModule->PrivateHeaders;
+  SmallVectorImpl<const FileEntry *>::iterator Look
+      = std::find(PvtHdrs.begin(), PvtHdrs.end(), IncFileEnt);
+  bool IsPrivate = Look != PvtHdrs.end();
+  assert((IsPrivate && Role == ModuleMap::PrivateHeader)
+               || (!IsPrivate && Role != ModuleMap::PrivateHeader));
+  #endif
+  return Role == ModuleMap::PrivateHeader &&
+         RequestedModule->getTopLevelModule() != RequestingModule;
+}
+
+void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
+                                        SourceLocation FilenameLoc,
+                                        StringRef Filename,
+                                        const FileEntry *File) {
+  // No errors for indirect modules. This may be a bit of a problem for modules
+  // with no source files.
+  if (RequestingModule != SourceModule)
+    return;
+
+  if (RequestingModule)
+    resolveUses(RequestingModule, /*Complain=*/false);
+
+  HeadersMap::iterator Known = findKnownHeader(File);
+  if (Known == Headers.end())
+    return;
+
+  Module *Private = NULL;
+  Module *NotUsed = NULL;
+  for (SmallVectorImpl<KnownHeader>::iterator I = Known->second.begin(),
+                                              E = Known->second.end();
+       I != E; ++I) {
+    // Excluded headers don't really belong to a module.
+    if (I->getRole() == ModuleMap::ExcludedHeader)
+      continue;
+
+    // If 'File' is part of 'RequestingModule' we can definitely include it.
+    if (I->getModule() == RequestingModule)
+      return;
+
+    // Remember private headers for later printing of a diagnostic.
+    if (violatesPrivateInclude(RequestingModule, File, I->getRole(),
+                               I->getModule())) {
+      Private = I->getModule();
+      continue;
+    }
+
+    // If uses need to be specified explicitly, we are only allowed to return
+    // modules that are explicitly used by the requesting module.
+    if (RequestingModule && LangOpts.ModulesDeclUse &&
+        !directlyUses(RequestingModule, I->getModule())) {
+      NotUsed = I->getModule();
+      continue;
+    }
+
+    // We have found a module that we can happily use.
+    return;
+  }
+
+  // We have found a header, but it is private.
+  if (Private != NULL) {
+    Diags.Report(FilenameLoc, diag::error_use_of_private_header_outside_module)
+        << Filename;
+    return;
+  }
+
+  // We have found a module, but we don't use it.
+  if (NotUsed != NULL) {
+    Diags.Report(FilenameLoc, diag::error_undeclared_use_of_module)
+        << RequestingModule->getFullModuleName() << Filename;
+    return;
+  }
+
+  // Headers for which we have not found a module are fine to include.
+}
+
 ModuleMap::KnownHeader
 ModuleMap::findModuleForHeader(const FileEntry *File,
                                Module *RequestingModule) {
-  HeadersMap::iterator Known = Headers.find(File);
+  HeadersMap::iterator Known = findKnownHeader(File);
+
   if (Known != Headers.end()) {
     ModuleMap::KnownHeader Result = KnownHeader();
 
@@ -177,9 +275,12 @@
     for (SmallVectorImpl<KnownHeader>::iterator I = Known->second.begin(),
                                                 E = Known->second.end();
          I != E; ++I) {
-      // Cannot use a module if the header is excluded or unavailable in it.
-      if (I->getRole() == ModuleMap::ExcludedHeader ||
-          !I->getModule()->isAvailable())
+      // Cannot use a module if the header is excluded in it.
+      if (I->getRole() == ModuleMap::ExcludedHeader)
+        continue;
+
+      // Cannot use a module if it is unavailable.
+      if (!I->getModule()->isAvailable())
         continue;
 
       // If 'File' is part of 'RequestingModule', 'RequestingModule' is the
@@ -190,31 +291,20 @@
       // If uses need to be specified explicitly, we are only allowed to return
       // modules that are explicitly used by the requesting module.
       if (RequestingModule && LangOpts.ModulesDeclUse &&
-          std::find(RequestingModule->DirectUses.begin(),
-                    RequestingModule->DirectUses.end(),
-                    I->getModule()) == RequestingModule->DirectUses.end())
+          !directlyUses(RequestingModule, I->getModule()))
         continue;
+
       Result = *I;
       // If 'File' is a public header of this module, this is as good as we
       // are going to get.
+      // FIXME: If we have a RequestingModule, we should prefer the header from
+      // that module.
       if (I->getRole() == ModuleMap::NormalHeader)
         break;
     }
     return Result;
   }
 
-  // If we've found a builtin header within Clang's builtin include directory,
-  // load all of the module maps to see if it will get associated with a
-  // specific module (e.g., in /usr/include).
-  if (File->getDir() == BuiltinIncludeDir &&
-      isBuiltinHeader(llvm::sys::path::filename(File->getName()))) {
-    HeaderInfo.loadTopLevelSystemModules();
-
-    // Check again.
-    if (Headers.find(File) != Headers.end())
-      return findModuleForHeader(File, RequestingModule);
-  }
-  
   const DirectoryEntry *Dir = File->getDir();
   SmallVector<const DirectoryEntry *, 2> SkippedDirs;
 
@@ -504,9 +594,9 @@
         if (inferred == InferredDirectories.end()) {
           // We haven't looked here before. Load a module map, if there is
           // one.
-          SmallString<128> ModMapPath = Parent;
-          llvm::sys::path::append(ModMapPath, "module.map");
-          if (const FileEntry *ModMapFile = FileMgr.getFile(ModMapPath)) {
+          bool IsFrameworkDir = Parent.endswith(".framework");
+          if (const FileEntry *ModMapFile =
+                HeaderInfo.lookupModuleMapFile(ParentDir, IsFrameworkDir)) {
             parseModuleMapFile(ModMapFile, IsSystem);
             inferred = InferredDirectories.find(ParentDir);
           }
@@ -818,11 +908,14 @@
 
   /// \brief The set of attributes that can be attached to a module.
   struct Attributes {
-    Attributes() : IsSystem(), IsExhaustive() { }
+    Attributes() : IsSystem(), IsExternC(), IsExhaustive() { }
 
     /// \brief Whether this is a system module.
     unsigned IsSystem : 1;
 
+    /// \brief Whether this is an extern "C" module.
+    unsigned IsExternC : 1;
+
     /// \brief Whether this is an exhaustive set of configuration macros.
     unsigned IsExhaustive : 1;
   };
@@ -1066,7 +1159,7 @@
 bool ModuleMapParser::parseModuleId(ModuleId &Id) {
   Id.clear();
   do {
-    if (Tok.is(MMToken::Identifier)) {
+    if (Tok.is(MMToken::Identifier) || Tok.is(MMToken::StringLiteral)) {
       Id.push_back(std::make_pair(Tok.getString(), Tok.getLocation()));
       consumeToken();
     } else {
@@ -1090,6 +1183,8 @@
     AT_unknown,
     /// \brief The 'system' attribute.
     AT_system,
+    /// \brief The 'extern_c' attribute.
+    AT_extern_c,
     /// \brief The 'exhaustive' attribute.
     AT_exhaustive
   };
@@ -1187,7 +1282,8 @@
       
       if (ActiveModule) {
         Diags.Report(Id[I].second, diag::err_mmap_missing_module_qualified)
-          << Id[I].first << ActiveModule->getTopLevelModule();
+          << Id[I].first
+          << ActiveModule->getTopLevelModule()->getFullModuleName();
       } else {
         Diags.Report(Id[I].second, diag::err_mmap_expected_module_name);
       }
@@ -1246,7 +1342,9 @@
   ActiveModule->DefinitionLoc = ModuleNameLoc;
   if (Attrs.IsSystem || IsSystem)
     ActiveModule->IsSystem = true;
-  
+  if (Attrs.IsExternC)
+    ActiveModule->IsExternC = true;
+
   bool Done = false;
   do {
     switch (Tok.Kind) {
@@ -1474,12 +1572,13 @@
     HadError = true;
     return;
   }
-  std::string FileName = Tok.getString();
-  SourceLocation FileNameLoc = consumeToken();
+  Module::HeaderDirective Header;
+  Header.FileName = Tok.getString();
+  Header.FileNameLoc = consumeToken();
   
   // Check whether we already have an umbrella.
   if (LeadingToken == MMToken::UmbrellaKeyword && ActiveModule->Umbrella) {
-    Diags.Report(FileNameLoc, diag::err_mmap_umbrella_clash)
+    Diags.Report(Header.FileNameLoc, diag::err_mmap_umbrella_clash)
       << ActiveModule->getFullModuleName();
     HadError = true;
     return;
@@ -1489,12 +1588,12 @@
   const FileEntry *File = 0;
   const FileEntry *BuiltinFile = 0;
   SmallString<128> PathName;
-  if (llvm::sys::path::is_absolute(FileName)) {
-    PathName = FileName;
+  if (llvm::sys::path::is_absolute(Header.FileName)) {
+    PathName = Header.FileName;
     File = SourceMgr.getFileManager().getFile(PathName);
   } else if (const DirectoryEntry *Dir = getOverriddenHeaderSearchDir()) {
     PathName = Dir->getName();
-    llvm::sys::path::append(PathName, FileName);
+    llvm::sys::path::append(PathName, Header.FileName);
     File = SourceMgr.getFileManager().getFile(PathName);
   } else {
     // Search for the header file within the search directory.
@@ -1505,18 +1604,18 @@
       appendSubframeworkPaths(ActiveModule, PathName);
       
       // Check whether this file is in the public headers.
-      llvm::sys::path::append(PathName, "Headers", FileName);
+      llvm::sys::path::append(PathName, "Headers", Header.FileName);
       File = SourceMgr.getFileManager().getFile(PathName);
       
       if (!File) {
         // Check whether this file is in the private headers.
         PathName.resize(PathLength);
-        llvm::sys::path::append(PathName, "PrivateHeaders", FileName);
+        llvm::sys::path::append(PathName, "PrivateHeaders", Header.FileName);
         File = SourceMgr.getFileManager().getFile(PathName);
       }
     } else {
       // Lookup for normal headers.
-      llvm::sys::path::append(PathName, FileName);
+      llvm::sys::path::append(PathName, Header.FileName);
       File = SourceMgr.getFileManager().getFile(PathName);
       
       // If this is a system module with a top-level header, this header
@@ -1524,9 +1623,9 @@
       // supplied by Clang. Find that builtin header.
       if (ActiveModule->IsSystem && LeadingToken != MMToken::UmbrellaKeyword &&
           BuiltinIncludeDir && BuiltinIncludeDir != Directory &&
-          isBuiltinHeader(FileName)) {
+          isBuiltinHeader(Header.FileName)) {
         SmallString<128> BuiltinPathName(BuiltinIncludeDir->getName());
-        llvm::sys::path::append(BuiltinPathName, FileName);
+        llvm::sys::path::append(BuiltinPathName, Header.FileName);
         BuiltinFile = SourceMgr.getFileManager().getFile(BuiltinPathName);
         
         // If Clang supplies this header but the underlying system does not,
@@ -1571,10 +1670,13 @@
     }
   } else if (LeadingToken != MMToken::ExcludeKeyword) {
     // Ignore excluded header files. They're optional anyway.
-    
-    Diags.Report(FileNameLoc, diag::err_mmap_header_not_found)
-      << (LeadingToken == MMToken::UmbrellaKeyword) << FileName;
-    HadError = true;
+
+    // If we find a module that has a missing header, we mark this module as
+    // unavailable and store the header directive for displaying diagnostics.
+    // Other submodules in the same module can still be used.
+    Header.IsUmbrella = LeadingToken == MMToken::UmbrellaKeyword;
+    ActiveModule->IsAvailable = false;
+    ActiveModule->MissingHeaders.push_back(Header);
   }
 }
 
@@ -1687,25 +1789,7 @@
   consumeToken();
   // Parse the module-id.
   ModuleId ParsedModuleId;
-
-  do {
-    if (Tok.is(MMToken::Identifier)) {
-      ParsedModuleId.push_back(
-          std::make_pair(Tok.getString(), Tok.getLocation()));
-      consumeToken();
-
-      if (Tok.is(MMToken::Period)) {
-        consumeToken();
-        continue;
-      }
-
-      break;
-    }
-
-    Diags.Report(Tok.getLocation(), diag::err_mmap_module_id);
-    HadError = true;
-    return;
-  } while (true);
+  parseModuleId(ParsedModuleId);
 
   ActiveModule->UnresolvedDirectUses.push_back(ParsedModuleId);
 }
@@ -1913,6 +1997,7 @@
     // We'll be inferring framework modules for this directory.
     Map.InferredDirectories[Directory].InferModules = true;
     Map.InferredDirectories[Directory].InferSystemModules = Attrs.IsSystem;
+    // FIXME: Handle the 'framework' keyword.
   }
 
   // Parse the opening brace.
@@ -2023,6 +2108,7 @@
     AttributeKind Attribute
       = llvm::StringSwitch<AttributeKind>(Tok.getString())
           .Case("exhaustive", AT_exhaustive)
+          .Case("extern_c", AT_extern_c)
           .Case("system", AT_system)
           .Default(AT_unknown);
     switch (Attribute) {
@@ -2035,6 +2121,10 @@
       Attrs.IsSystem = true;
       break;
 
+    case AT_extern_c:
+      Attrs.IsExternC = true;
+      break;
+
     case AT_exhaustive:
       Attrs.IsExhaustive = true;
       break;
@@ -2124,18 +2214,28 @@
     return Known->second;
 
   assert(Target != 0 && "Missing target information");
-  FileID ID = SourceMgr.createFileID(File, SourceLocation(), SrcMgr::C_User);
+  auto FileCharacter = IsSystem ? SrcMgr::C_System : SrcMgr::C_User;
+  FileID ID = SourceMgr.createFileID(File, SourceLocation(), FileCharacter);
   const llvm::MemoryBuffer *Buffer = SourceMgr.getBuffer(ID);
   if (!Buffer)
     return ParsedModuleMap[File] = true;
+
+  // Find the directory for the module. For frameworks, that may require going
+  // up from the 'Modules' directory.
+  const DirectoryEntry *Dir = File->getDir();
+  StringRef DirName(Dir->getName());
+  if (llvm::sys::path::filename(DirName) == "Modules") {
+    DirName = llvm::sys::path::parent_path(DirName);
+    if (DirName.endswith(".framework"))
+      Dir = SourceMgr.getFileManager().getDirectory(DirName);
+    assert(Dir && "parent must exist");
+  }
   
   // Parse this module map file.
   Lexer L(ID, SourceMgr.getBuffer(ID), SourceMgr, MMapLangOpts);
-  Diags->getClient()->BeginSourceFile(MMapLangOpts);
-  ModuleMapParser Parser(L, SourceMgr, Target, *Diags, *this, File->getDir(),
+  ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, Dir,
                          BuiltinIncludeDir, IsSystem);
   bool Result = Parser.parseModuleMapFile();
-  Diags->getClient()->EndSourceFile();
   ParsedModuleMap[File] = Result;
   return Result;
 }
diff --git a/lib/Lex/PPCaching.cpp b/lib/Lex/PPCaching.cpp
index 6f4c189..bd48ae6 100644
--- a/lib/Lex/PPCaching.cpp
+++ b/lib/Lex/PPCaching.cpp
@@ -15,28 +15,28 @@
 #include "clang/Lex/Preprocessor.h"
 using namespace clang;
 
-/// EnableBacktrackAtThisPos - From the point that this method is called, and
-/// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
-/// keeps track of the lexed tokens so that a subsequent Backtrack() call will
-/// make the Preprocessor re-lex the same tokens.
-///
-/// Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
-/// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
-/// be combined with the EnableBacktrackAtThisPos calls in reverse order.
+// EnableBacktrackAtThisPos - From the point that this method is called, and
+// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
+// keeps track of the lexed tokens so that a subsequent Backtrack() call will
+// make the Preprocessor re-lex the same tokens.
+//
+// Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
+// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
+// be combined with the EnableBacktrackAtThisPos calls in reverse order.
 void Preprocessor::EnableBacktrackAtThisPos() {
   BacktrackPositions.push_back(CachedLexPos);
   EnterCachingLexMode();
 }
 
-/// CommitBacktrackedTokens - Disable the last EnableBacktrackAtThisPos call.
+// Disable the last EnableBacktrackAtThisPos call.
 void Preprocessor::CommitBacktrackedTokens() {
   assert(!BacktrackPositions.empty()
          && "EnableBacktrackAtThisPos was not called!");
   BacktrackPositions.pop_back();
 }
 
-/// Backtrack - Make Preprocessor re-lex the tokens that were lexed since
-/// EnableBacktrackAtThisPos() was previously called.
+// Make Preprocessor re-lex the tokens that were lexed since
+// EnableBacktrackAtThisPos() was previously called.
 void Preprocessor::Backtrack() {
   assert(!BacktrackPositions.empty()
          && "EnableBacktrackAtThisPos was not called!");
diff --git a/lib/Lex/PPConditionalDirectiveRecord.cpp b/lib/Lex/PPConditionalDirectiveRecord.cpp
index 16dc1d8..99b87a0 100644
--- a/lib/Lex/PPConditionalDirectiveRecord.cpp
+++ b/lib/Lex/PPConditionalDirectiveRecord.cpp
@@ -77,7 +77,7 @@
 
 void PPConditionalDirectiveRecord::If(SourceLocation Loc,
                                       SourceRange ConditionRange,
-                                      bool ConditionValue) {
+                                      ConditionValueKind ConditionValue) {
   addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
   CondDirectiveStack.push_back(Loc);
 }
@@ -98,7 +98,7 @@
 
 void PPConditionalDirectiveRecord::Elif(SourceLocation Loc,
                                         SourceRange ConditionRange,
-                                        bool ConditionValue,
+                                        ConditionValueKind ConditionValue,
                                         SourceLocation IfLoc) {
   addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
   CondDirectiveStack.back() = Loc;
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 86c508f..57dc495 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -25,6 +25,7 @@
 #include "clang/Lex/Pragma.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
 #include "llvm/Support/SaveAndRestore.h"
 using namespace clang;
 
@@ -60,8 +61,8 @@
 
 MacroInfo *Preprocessor::AllocateDeserializedMacroInfo(SourceLocation L,
                                                        unsigned SubModuleID) {
-  LLVM_STATIC_ASSERT(llvm::AlignOf<MacroInfo>::Alignment >= sizeof(SubModuleID),
-                     "alignment for MacroInfo is less than the ID");
+  static_assert(llvm::AlignOf<MacroInfo>::Alignment >= sizeof(SubModuleID),
+                "alignment for MacroInfo is less than the ID");
   DeserializedMacroInfoChain *MIChain =
       BP.Allocate<DeserializedMacroInfoChain>();
   MIChain->Next = DeserialMIChainHead;
@@ -162,7 +163,7 @@
     const IdentifierInfo &Info = Identifiers.get(Spelling);
 
     // Allow #defining |and| and friends in microsoft mode.
-    if (Info.isCPlusPlusOperatorKeyword() && getLangOpts().MicrosoftMode) {
+    if (Info.isCPlusPlusOperatorKeyword() && getLangOpts().MSVCCompat) {
       MacroNameTok.setIdentifierInfo(getIdentifierInfo(Spelling));
       return;
     }
@@ -404,35 +405,33 @@
       } else if (Sub == "lif") {  // "elif".
         PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
 
-        bool ShouldEnter;
-        const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
+        // If this is a #elif with a #else before it, report the error.
+        if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
+
         // If this is in a skipping block or if we're already handled this #if
         // block, don't bother parsing the condition.
         if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
           DiscardUntilEndOfDirective();
-          ShouldEnter = false;
         } else {
+          const SourceLocation CondBegin = CurPPLexer->getSourceLocation();
           // Restore the value of LexingRawMode so that identifiers are
           // looked up, etc, inside the #elif expression.
           assert(CurPPLexer->LexingRawMode && "We have to be skipping here!");
           CurPPLexer->LexingRawMode = false;
           IdentifierInfo *IfNDefMacro = 0;
-          ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro);
+          const bool CondValue = EvaluateDirectiveExpression(IfNDefMacro);
           CurPPLexer->LexingRawMode = true;
-        }
-        const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
-
-        // If this is a #elif with a #else before it, report the error.
-        if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
-
-        // If this condition is true, enter it!
-        if (ShouldEnter) {
-          CondInfo.FoundNonSkip = true;
-          if (Callbacks)
+          if (Callbacks) {
+            const SourceLocation CondEnd = CurPPLexer->getSourceLocation();
             Callbacks->Elif(Tok.getLocation(),
-                            SourceRange(ConditionalBegin, ConditionalEnd),
-                            ShouldEnter, CondInfo.IfLoc);
-          break;
+                            SourceRange(CondBegin, CondEnd),
+                            (CondValue ? PPCallbacks::CVK_True : PPCallbacks::CVK_False), CondInfo.IfLoc);
+          }
+          // If this condition is true, enter it!
+          if (CondValue) {
+            CondInfo.FoundNonSkip = true;
+            break;
+          }
         }
       }
     }
@@ -540,7 +539,8 @@
     return HeaderInfo.getModuleMap().SourceModule; // Compiling a source.
   }
   // Try to determine the module of the include directive.
-  FileID IDOfIncl = SourceMgr.getFileID(FilenameLoc);
+  // FIXME: Look into directly passing the FileEntry from LookupFile instead.
+  FileID IDOfIncl = SourceMgr.getFileID(SourceMgr.getExpansionLoc(FilenameLoc));
   if (const FileEntry *EntryOfIncl = SourceMgr.getFileEntryForID(IDOfIncl)) {
     // The include comes from a file.
     return ModMap.findModuleForHeader(EntryOfIncl).getModule();
@@ -551,69 +551,6 @@
   }
 }
 
-bool Preprocessor::violatesPrivateInclude(
-    Module *RequestingModule,
-    const FileEntry *IncFileEnt,
-    ModuleMap::ModuleHeaderRole Role,
-    Module *RequestedModule) {
-  #ifndef NDEBUG
-  // Check for consistency between the module header role
-  // as obtained from the lookup and as obtained from the module.
-  // This check is not cheap, so enable it only for debugging.
-  SmallVectorImpl<const FileEntry *> &PvtHdrs
-      = RequestedModule->PrivateHeaders;
-  SmallVectorImpl<const FileEntry *>::iterator Look
-      = std::find(PvtHdrs.begin(), PvtHdrs.end(), IncFileEnt);
-  bool IsPrivate = Look != PvtHdrs.end();
-  assert((IsPrivate && Role == ModuleMap::PrivateHeader)
-               || (!IsPrivate && Role != ModuleMap::PrivateHeader));
-  #endif
-  return Role == ModuleMap::PrivateHeader &&
-         RequestedModule->getTopLevelModule() != RequestingModule;
-}
-
-bool Preprocessor::violatesUseDeclarations(
-    Module *RequestingModule,
-    Module *RequestedModule) {
-  ModuleMap &ModMap = HeaderInfo.getModuleMap();
-  ModMap.resolveUses(RequestingModule, /*Complain=*/false);
-  const SmallVectorImpl<Module *> &AllowedUses = RequestingModule->DirectUses;
-  SmallVectorImpl<Module *>::const_iterator Declared =
-      std::find(AllowedUses.begin(), AllowedUses.end(), RequestedModule);
-  return Declared == AllowedUses.end();
-}
-
-void Preprocessor::verifyModuleInclude(SourceLocation FilenameLoc,
-                                       StringRef Filename,
-                                       const FileEntry *IncFileEnt) {
-  Module *RequestingModule = getModuleForLocation(FilenameLoc);
-  if (RequestingModule)
-    HeaderInfo.getModuleMap().resolveUses(RequestingModule, /*Complain=*/false);
-  ModuleMap::KnownHeader RequestedModule =
-      HeaderInfo.getModuleMap().findModuleForHeader(IncFileEnt,
-                                                    RequestingModule);
-
-  if (RequestingModule == RequestedModule.getModule())
-    return; // No faults wihin a module, or between files both not in modules.
-
-  if (RequestingModule != HeaderInfo.getModuleMap().SourceModule)
-    return; // No errors for indirect modules.
-            // This may be a bit of a problem for modules with no source files.
-
-  if (RequestedModule && violatesPrivateInclude(RequestingModule, IncFileEnt,
-                                                RequestedModule.getRole(),
-                                                RequestedModule.getModule()))
-    Diag(FilenameLoc, diag::error_use_of_private_header_outside_module)
-        << Filename;
-
-  // FIXME: Add support for FixIts in module map files and offer adding the
-  // required use declaration.
-  if (RequestingModule && getLangOpts().ModulesDeclUse &&
-      violatesUseDeclarations(RequestingModule, RequestedModule.getModule()))
-    Diag(FilenameLoc, diag::error_undeclared_use_of_module)
-        << Filename;
-}
-
 const FileEntry *Preprocessor::LookupFile(
     SourceLocation FilenameLoc,
     StringRef Filename,
@@ -624,12 +561,12 @@
     SmallVectorImpl<char> *RelativePath,
     ModuleMap::KnownHeader *SuggestedModule,
     bool SkipCache) {
-  // If the header lookup mechanism may be relative to the current file, pass in
-  // info about where the current file is.
-  const FileEntry *CurFileEnt = 0;
+  // If the header lookup mechanism may be relative to the current inclusion
+  // stack, record the parent #includes.
+  SmallVector<const FileEntry *, 16> Includers;
   if (!FromDir) {
     FileID FID = getCurrentFileLexer()->getFileID();
-    CurFileEnt = SourceMgr.getFileEntryForID(FID);
+    const FileEntry *FileEnt = SourceMgr.getFileEntryForID(FID);
 
     // If there is no file entry associated with this file, it must be the
     // predefines buffer.  Any other file is not lexed with a normal lexer, so
@@ -637,23 +574,39 @@
     // predefines buffer, resolve #include references (which come from the
     // -include command line argument) as if they came from the main file, this
     // affects file lookup etc.
-    if (CurFileEnt == 0) {
-      FID = SourceMgr.getMainFileID();
-      CurFileEnt = SourceMgr.getFileEntryForID(FID);
+    if (!FileEnt)
+      FileEnt = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID());
+
+    if (FileEnt)
+      Includers.push_back(FileEnt);
+
+    // MSVC searches the current include stack from top to bottom for
+    // headers included by quoted include directives.
+    // See: http://msdn.microsoft.com/en-us/library/36k2cdd4.aspx
+    if (LangOpts.MSVCCompat && !isAngled) {
+      for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
+        IncludeStackInfo &ISEntry = IncludeMacroStack[e - i - 1];
+        if (IsFileLexer(ISEntry))
+          if ((FileEnt = SourceMgr.getFileEntryForID(
+                   ISEntry.ThePPLexer->getFileID())))
+            Includers.push_back(FileEnt);
+      }
     }
   }
 
   // Do a standard file entry lookup.
   CurDir = CurDirLookup;
   const FileEntry *FE = HeaderInfo.LookupFile(
-      Filename, isAngled, FromDir, CurDir, CurFileEnt,
-      SearchPath, RelativePath, SuggestedModule, SkipCache);
+      Filename, FilenameLoc, isAngled, FromDir, CurDir, Includers, SearchPath,
+      RelativePath, SuggestedModule, SkipCache);
   if (FE) {
-    if (SuggestedModule)
-      verifyModuleInclude(FilenameLoc, Filename, FE);
+    if (SuggestedModule && !LangOpts.AsmPreprocessor)
+      HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
+          getModuleForLocation(FilenameLoc), FilenameLoc, Filename, FE);
     return FE;
   }
 
+  const FileEntry *CurFileEnt;
   // Otherwise, see if this is a subframework header.  If so, this is relative
   // to one of the headers on the #include stack.  Walk the list of the current
   // headers on the #include stack and pass them to HeaderInfo.
@@ -1327,21 +1280,20 @@
   return isAngled;
 }
 
-/// \brief Handle cases where the \#include name is expanded from a macro
-/// as multiple tokens, which need to be glued together.
-///
-/// This occurs for code like:
-/// \code
-///    \#define FOO <a/b.h>
-///    \#include FOO
-/// \endcode
-/// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
-///
-/// This code concatenates and consumes tokens up to the '>' token.  It returns
-/// false if the > was found, otherwise it returns true if it finds and consumes
-/// the EOD marker.
-bool Preprocessor::ConcatenateIncludeName(
-                                        SmallString<128> &FilenameBuffer,
+// \brief Handle cases where the \#include name is expanded from a macro
+// as multiple tokens, which need to be glued together.
+//
+// This occurs for code like:
+// \code
+//    \#define FOO <a/b.h>
+//    \#include FOO
+// \endcode
+// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
+//
+// This code concatenates and consumes tokens up to the '>' token.  It returns
+// false if the > was found, otherwise it returns true if it finds and consumes
+// the EOD marker.
+bool Preprocessor::ConcatenateIncludeName(SmallString<128> &FilenameBuffer,
                                           SourceLocation &End) {
   Token CurTok;
 
@@ -1389,6 +1341,19 @@
   return true;
 }
 
+/// \brief Push a token onto the token stream containing an annotation.
+static void EnterAnnotationToken(Preprocessor &PP,
+                                 SourceLocation Begin, SourceLocation End,
+                                 tok::TokenKind Kind, void *AnnotationVal) {
+  Token *Tok = new Token[1];
+  Tok[0].startToken();
+  Tok[0].setKind(Kind);
+  Tok[0].setLocation(Begin);
+  Tok[0].setAnnotationEndLoc(End);
+  Tok[0].setAnnotationValue(AnnotationVal);
+  PP.EnterTokenStream(Tok, 1, true, true);
+}
+
 /// HandleIncludeDirective - The "\#include" tokens have just been read, read
 /// the file to be included from the lexer, then include it!  This is a common
 /// routine with functionality shared between \#include, \#include_next and
@@ -1485,9 +1450,15 @@
   // the path.
   ModuleMap::KnownHeader SuggestedModule;
   SourceLocation FilenameLoc = FilenameTok.getLocation();
+  SmallString<128> NormalizedPath;
+  if (LangOpts.MSVCCompat) {
+    NormalizedPath = Filename.str();
+    llvm::sys::fs::normalize_separators(NormalizedPath);
+  }
   const FileEntry *File = LookupFile(
-      FilenameLoc, Filename, isAngled, LookupFrom, CurDir,
-      Callbacks ? &SearchPath : NULL, Callbacks ? &RelativePath : NULL,
+      FilenameLoc, LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename,
+      isAngled, LookupFrom, CurDir, Callbacks ? &SearchPath : NULL,
+      Callbacks ? &RelativePath : NULL,
       HeaderInfo.getHeaderSearchOpts().ModuleMaps ? &SuggestedModule : 0);
 
   if (Callbacks) {
@@ -1501,8 +1472,11 @@
           HeaderInfo.AddSearchPath(DL, isAngled);
           
           // Try the lookup again, skipping the cache.
-          File = LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, CurDir,
-                            0, 0, HeaderInfo.getHeaderSearchOpts().ModuleMaps
+          File = LookupFile(FilenameLoc,
+                            LangOpts.MSVCCompat ? NormalizedPath.c_str()
+                                                : Filename,
+                            isAngled, LookupFrom, CurDir, 0, 0,
+                            HeaderInfo.getHeaderSearchOpts().ModuleMaps
                                       ? &SuggestedModule
                                       : 0,
                             /*SkipCache*/ true);
@@ -1512,10 +1486,11 @@
     
     if (!SuggestedModule || !getLangOpts().Modules) {
       // Notify the callback object that we've seen an inclusion directive.
-      Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled,
-                                    FilenameRange, File,
-                                    SearchPath, RelativePath,
-                                    /*ImportedModule=*/0);
+      Callbacks->InclusionDirective(HashLoc, IncludeTok,
+                                    LangOpts.MSVCCompat ? NormalizedPath.c_str()
+                                                        : Filename,
+                                    isAngled, FilenameRange, File, SearchPath,
+                                    RelativePath, /*ImportedModule=*/0);
     }
   }
   
@@ -1526,8 +1501,9 @@
       // provide the user with a possible fixit.
       if (isAngled) {
         File = LookupFile(
-            FilenameLoc, Filename, false, LookupFrom, CurDir,
-            Callbacks ? &SearchPath : 0, Callbacks ? &RelativePath : 0,
+            FilenameLoc, LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename,
+            false, LookupFrom, CurDir, Callbacks ? &SearchPath : 0,
+            Callbacks ? &RelativePath : 0,
             HeaderInfo.getHeaderSearchOpts().ModuleMaps ? &SuggestedModule : 0);
         if (File) {
           SourceRange Range(FilenameTok.getLocation(), CharEnd);
@@ -1590,7 +1566,7 @@
     // include directive maps to.
     bool BuildingImportedModule
       = Path[0].first->getName() == getLangOpts().CurrentModule;
-    
+
     if (!BuildingImportedModule && getLangOpts().ObjC2) {
       // If we're not building the imported module, warn that we're going
       // to automatically turn this inclusion directive into a module import.
@@ -1639,13 +1615,8 @@
         // make the module visible.
         // FIXME: Produce this as the current token directly, rather than
         // allocating a new token for it.
-        Token *Tok = new Token[1];
-        Tok[0].startToken();
-        Tok[0].setKind(tok::annot_module_include);
-        Tok[0].setLocation(HashLoc);
-        Tok[0].setAnnotationEndLoc(End);
-        Tok[0].setAnnotationValue(Imported);
-        EnterTokenStream(Tok, 1, true, true);
+        EnterAnnotationToken(*this, HashLoc, End, tok::annot_module_include,
+                             Imported);
       }
       return;
     }
@@ -1692,8 +1663,27 @@
   FileID FID = SourceMgr.createFileID(File, IncludePos, FileCharacter);
   assert(!FID.isInvalid() && "Expected valid file ID");
 
-  // Finally, if all is good, enter the new file!
-  EnterSourceFile(FID, CurDir, FilenameTok.getLocation());
+  // Determine if we're switching to building a new submodule, and which one.
+  ModuleMap::KnownHeader BuildingModule;
+  if (getLangOpts().Modules && !getLangOpts().CurrentModule.empty()) {
+    Module *RequestingModule = getModuleForLocation(FilenameLoc);
+    BuildingModule =
+        HeaderInfo.getModuleMap().findModuleForHeader(File, RequestingModule);
+  }
+
+  // If all is good, enter the new file!
+  if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation()))
+    return;
+
+  // If we're walking into another part of the same module, let the parser
+  // know that any future declarations are within that other submodule.
+  if (BuildingModule) {
+    assert(!CurSubmodule && "should not have marked this as a module yet");
+    CurSubmodule = BuildingModule.getModule();
+
+    EnterAnnotationToken(*this, HashLoc, End, tok::annot_module_begin,
+                         CurSubmodule);
+  }
 }
 
 /// HandleIncludeNextDirective - Implements \#include_next.
@@ -1738,7 +1728,7 @@
 void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
                                          Token &ImportTok) {
   if (!LangOpts.ObjC1) {  // #import is standard for ObjC.
-    if (LangOpts.MicrosoftMode)
+    if (LangOpts.MSVCCompat)
       return HandleMicrosoftImportDirective(ImportTok);
     Diag(ImportTok, diag::ext_pp_import_directive);
   }
@@ -2259,7 +2249,7 @@
   if (Callbacks)
     Callbacks->If(IfToken.getLocation(),
                   SourceRange(ConditionalBegin, ConditionalEnd),
-                  ConditionalTrue);
+                  (ConditionalTrue ? PPCallbacks::CVK_True : PPCallbacks::CVK_False));
 
   // Should we include the stuff contained by this directive?
   if (ConditionalTrue) {
@@ -2356,7 +2346,7 @@
   if (Callbacks)
     Callbacks->Elif(ElifToken.getLocation(),
                     SourceRange(ConditionalBegin, ConditionalEnd),
-                    true, CI.IfLoc);
+                    PPCallbacks::CVK_NotEvaluated, CI.IfLoc);
 
   // Finally, skip the rest of the contents of this block.
   SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index 87c0a6a..6975d31 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -130,8 +130,9 @@
     PP.LexUnexpandedNonComment(PeekTok);
 
     if (PeekTok.isNot(tok::r_paren)) {
-      PP.Diag(PeekTok.getLocation(), diag::err_pp_missing_rparen) << "defined";
-      PP.Diag(LParenLoc, diag::note_matching) << "(";
+      PP.Diag(PeekTok.getLocation(), diag::err_pp_expected_after)
+          << "'defined'" << tok::r_paren;
+      PP.Diag(LParenLoc, diag::note_matching) << tok::l_paren;
       return true;
     }
     // Consume the ).
@@ -257,9 +258,10 @@
       // large that it is unsigned" e.g. on 12345678901234567890 where intmax_t
       // is 64-bits.
       if (!Literal.isUnsigned && Result.Val.isNegative()) {
-        // Don't warn for a hex or octal literal: 0x8000..0 shouldn't warn.
+        // Octal, hexadecimal, and binary literals are implicitly unsigned if
+        // the value does not fit into a signed integer type.
         if (ValueLive && Literal.getRadix() == 10)
-          PP.Diag(PeekTok, diag::warn_integer_too_large_for_signed);
+          PP.Diag(PeekTok, diag::ext_integer_too_large_for_signed);
         Result.Val.setIsUnsigned(true);
       }
     }
@@ -342,7 +344,7 @@
       if (PeekTok.isNot(tok::r_paren)) {
         PP.Diag(PeekTok.getLocation(), diag::err_pp_expected_rparen)
           << Result.getRange();
-        PP.Diag(Start, diag::note_matching) << "(";
+        PP.Diag(Start, diag::note_matching) << tok::l_paren;
         return true;
       }
       DT.State = DefinedTracker::Unknown;
@@ -680,9 +682,9 @@
     case tok::question: {
       // Parse the : part of the expression.
       if (PeekTok.isNot(tok::colon)) {
-        PP.Diag(PeekTok.getLocation(), diag::err_expected_colon)
-          << LHS.getRange(), RHS.getRange();
-        PP.Diag(OpLoc, diag::note_matching) << "?";
+        PP.Diag(PeekTok.getLocation(), diag::err_expected)
+            << tok::colon << LHS.getRange() << RHS.getRange();
+        PP.Diag(OpLoc, diag::note_matching) << tok::question;
         return true;
       }
       // Consume the :.
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index 1f970a4..949cd63 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -68,7 +68,7 @@
 
 /// EnterSourceFile - Add a source file to the top of the include stack and
 /// start lexing tokens from it instead of the current buffer.
-void Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
+bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
                                    SourceLocation Loc) {
   assert(!CurTokenLexer && "Cannot #include a file inside a macro!");
   ++NumEnteredSourceFiles;
@@ -79,7 +79,7 @@
   if (PTH) {
     if (PTHLexer *PL = PTH->CreateLexer(FID)) {
       EnterSourceFileWithPTH(PL, CurDir);
-      return;
+      return false;
     }
   }
   
@@ -91,7 +91,7 @@
     SourceLocation FileStart = SourceMgr.getLocForStartOfFile(FID);
     Diag(Loc, diag::err_pp_error_opening_file)
       << std::string(SourceMgr.getBufferName(FileStart)) << "";
-    return;
+    return true;
   }
 
   if (isCodeCompletionEnabled() &&
@@ -102,7 +102,7 @@
   }
 
   EnterSourceFileWithLexer(new Lexer(FID, InputFile, *this), CurDir);
-  return;
+  return false;
 }
 
 /// EnterSourceFileWithLexer - Add a source file to the top of the include stack
@@ -117,6 +117,7 @@
   CurLexer.reset(TheLexer);
   CurPPLexer = TheLexer;
   CurDirLookup = CurDir;
+  CurSubmodule = 0;
   if (CurLexerKind != CLK_LexAfterModuleImport)
     CurLexerKind = CLK_Lexer;
   
@@ -141,6 +142,7 @@
   CurDirLookup = CurDir;
   CurPTHLexer.reset(PL);
   CurPPLexer = CurPTHLexer.get();
+  CurSubmodule = 0;
   if (CurLexerKind != CLK_LexAfterModuleImport)
     CurLexerKind = CLK_PTHLexer;
   
@@ -244,6 +246,29 @@
   // but it might if they're empty?
 }
 
+/// \brief Determine the location to use as the end of the buffer for a lexer.
+///
+/// If the file ends with a newline, form the EOF token on the newline itself,
+/// rather than "on the line following it", which doesn't exist.  This makes
+/// diagnostics relating to the end of file include the last file that the user
+/// actually typed, which is goodness.
+const char *Preprocessor::getCurLexerEndPos() {
+  const char *EndPos = CurLexer->BufferEnd;
+  if (EndPos != CurLexer->BufferStart &&
+      (EndPos[-1] == '\n' || EndPos[-1] == '\r')) {
+    --EndPos;
+
+    // Handle \n\r and \r\n:
+    if (EndPos != CurLexer->BufferStart &&
+        (EndPos[-1] == '\n' || EndPos[-1] == '\r') &&
+        EndPos[-1] != EndPos[0])
+      --EndPos;
+  }
+
+  return EndPos;
+}
+
+
 /// HandleEndOfFile - This callback is invoked when the lexer hits the end of
 /// the current file.  This either returns the EOF token or pops a level off
 /// the include stack and keeps going.
@@ -342,7 +367,18 @@
     FileID ExitedFID;
     if (Callbacks && !isEndOfMacro && CurPPLexer)
       ExitedFID = CurPPLexer->getFileID();
-    
+
+    bool LeavingSubmodule = CurSubmodule && CurLexer;
+    if (LeavingSubmodule) {
+      // Notify the parser that we've left the module.
+      const char *EndPos = getCurLexerEndPos();
+      Result.startToken();
+      CurLexer->BufferPtr = EndPos;
+      CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_module_end);
+      Result.setAnnotationEndLoc(Result.getLocation());
+      Result.setAnnotationValue(CurSubmodule);
+    }
+
     // We're done with the #included file.
     RemoveTopOfLexerStack();
 
@@ -357,27 +393,13 @@
                              PPCallbacks::ExitFile, FileType, ExitedFID);
     }
 
-    // Client should lex another token.
-    return false;
+    // Client should lex another token unless we generated an EOM.
+    return LeavingSubmodule;
   }
 
-  // If the file ends with a newline, form the EOF token on the newline itself,
-  // rather than "on the line following it", which doesn't exist.  This makes
-  // diagnostics relating to the end of file include the last file that the user
-  // actually typed, which is goodness.
+  // If this is the end of the main file, form an EOF token.
   if (CurLexer) {
-    const char *EndPos = CurLexer->BufferEnd;
-    if (EndPos != CurLexer->BufferStart &&
-        (EndPos[-1] == '\n' || EndPos[-1] == '\r')) {
-      --EndPos;
-
-      // Handle \n\r and \r\n:
-      if (EndPos != CurLexer->BufferStart &&
-          (EndPos[-1] == '\n' || EndPos[-1] == '\r') &&
-          EndPos[-1] != EndPos[0])
-        --EndPos;
-    }
-
+    const char *EndPos = getCurLexerEndPos();
     Result.startToken();
     CurLexer->BufferPtr = EndPos;
     CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
@@ -405,11 +427,15 @@
   if (!isIncrementalProcessingEnabled())
     CurPPLexer = 0;
 
-  // This is the end of the top-level file. 'WarnUnusedMacroLocs' has collected
-  // all macro locations that we need to warn because they are not used.
-  for (WarnUnusedMacroLocsTy::iterator
-         I=WarnUnusedMacroLocs.begin(), E=WarnUnusedMacroLocs.end(); I!=E; ++I)
-    Diag(*I, diag::pp_macro_not_used);
+  if (TUKind == TU_Complete) {
+    // This is the end of the top-level file. 'WarnUnusedMacroLocs' has
+    // collected all macro locations that we need to warn because they are not
+    // used.
+    for (WarnUnusedMacroLocsTy::iterator
+           I=WarnUnusedMacroLocs.begin(), E=WarnUnusedMacroLocs.end();
+           I!=E; ++I)
+      Diag(*I, diag::pp_macro_not_used);
+  }
 
   // If we are building a module that has an umbrella header, make sure that
   // each of the headers within the directory covered by the umbrella header
@@ -498,7 +524,7 @@
   if (NumCachedTokenLexers == TokenLexerCacheSize)
     CurTokenLexer.reset();
   else
-    TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.take();
+    TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.release();
 
   // Handle this like a #include file being popped off the stack.
   return HandleEndOfFile(Result, true);
@@ -515,7 +541,7 @@
     if (NumCachedTokenLexers == TokenLexerCacheSize)
       CurTokenLexer.reset();
     else
-      TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.take();
+      TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.release();
   }
 
   PopIncludeMacroStack();
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index f20633f..35f9192 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -7,19 +7,20 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file implements the top level handling of macro expasion for the
+// This file implements the top level handling of macro expansion for the
 // preprocessor.
 //
 //===----------------------------------------------------------------------===//
 
 #include "clang/Lex/Preprocessor.h"
-#include "clang/Lex/MacroArgs.h"
+#include "clang/Basic/Attributes.h"
 #include "clang/Basic/FileManager.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Basic/TargetInfo.h"
 #include "clang/Lex/CodeCompletionHandler.h"
 #include "clang/Lex/ExternalPreprocessorSource.h"
 #include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/MacroArgs.h"
 #include "clang/Lex/MacroInfo.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallString.h"
@@ -97,6 +98,15 @@
   Ident__INCLUDE_LEVEL__ = RegisterBuiltinMacro(*this, "__INCLUDE_LEVEL__");
   Ident__TIMESTAMP__     = RegisterBuiltinMacro(*this, "__TIMESTAMP__");
 
+  // Microsoft Extensions.
+  if (LangOpts.MicrosoftExt) {
+    Ident__identifier = RegisterBuiltinMacro(*this, "__identifier");
+    Ident__pragma = RegisterBuiltinMacro(*this, "__pragma");
+  } else {
+    Ident__identifier = 0;
+    Ident__pragma = 0;
+  }
+
   // Clang Extensions.
   Ident__has_feature      = RegisterBuiltinMacro(*this, "__has_feature");
   Ident__has_extension    = RegisterBuiltinMacro(*this, "__has_extension");
@@ -119,12 +129,6 @@
     Ident__building_module = 0;
     Ident__MODULE__ = 0;
   }
-  
-  // Microsoft Extensions.
-  if (LangOpts.MicrosoftExt) 
-    Ident__pragma = RegisterBuiltinMacro(*this, "__pragma");
-  else
-    Ident__pragma = 0;
 }
 
 /// isTrivialSingleTokenExpansion - Return true if MI, which has a single token
@@ -293,11 +297,11 @@
     for (MacroDirective::DefInfo PrevDef = Def.getPreviousDefinition();
          PrevDef && !PrevDef.isUndefined();
          PrevDef = PrevDef.getPreviousDefinition()) {
-      if (PrevDef.getDirective()->isAmbiguous()) {
-        Diag(PrevDef.getMacroInfo()->getDefinitionLoc(),
-             diag::note_pp_ambiguous_macro_other)
-          << Identifier.getIdentifierInfo();
-      }
+      Diag(PrevDef.getMacroInfo()->getDefinitionLoc(),
+           diag::note_pp_ambiguous_macro_other)
+        << Identifier.getIdentifierInfo();
+      if (!PrevDef.getDirective()->isAmbiguous())
+        break;
     }
   }
 
@@ -796,7 +800,7 @@
     for (unsigned i = 0, e = MacroExpandingLexersStack.size(); i != e; ++i) {
       TokenLexer *prevLexer;
       size_t tokIndex;
-      llvm::tie(prevLexer, tokIndex) = MacroExpandingLexersStack[i];
+      std::tie(prevLexer, tokIndex) = MacroExpandingLexersStack[i];
       prevLexer->Tokens = MacroExpandedTokens.data() + tokIndex;
     }
   }
@@ -975,6 +979,7 @@
            .Case("is_abstract", LangOpts.CPlusPlus)
            .Case("is_base_of", LangOpts.CPlusPlus)
            .Case("is_class", LangOpts.CPlusPlus)
+           .Case("is_constructible", LangOpts.CPlusPlus)
            .Case("is_convertible_to", LangOpts.CPlusPlus)
            .Case("is_empty", LangOpts.CPlusPlus)
            .Case("is_enum", LangOpts.CPlusPlus)
@@ -1039,24 +1044,10 @@
            // C++1y features supported by other languages as extensions.
            .Case("cxx_binary_literals", true)
            .Case("cxx_init_captures", LangOpts.CPlusPlus11)
-           .Case("cxx_variable_templates", true)
+           .Case("cxx_variable_templates", LangOpts.CPlusPlus)
            .Default(false);
 }
 
-/// HasAttribute -  Return true if we recognize and implement the attribute
-/// specified by the given identifier.
-static bool HasAttribute(const IdentifierInfo *II) {
-  StringRef Name = II->getName();
-  // Normalize the attribute name, __foo__ becomes foo.
-  if (Name.startswith("__") && Name.endswith("__") && Name.size() >= 4)
-    Name = Name.substr(2, Name.size() - 4);
-
-  // FIXME: Do we need to handle namespaces here?
-  return llvm::StringSwitch<bool>(Name)
-#include "clang/Lex/AttrSpellings.inc"
-        .Default(false);
-}
-
 /// EvaluateHasIncludeCommon - Process a '__has_include("path")'
 /// or '__has_include_next("path")' expression.
 /// Returns true if successful.
@@ -1080,7 +1071,7 @@
   if (Tok.isNot(tok::l_paren)) {
     // No '(', use end of last token.
     LParenLoc = PP.getLocForEndOfToken(LParenLoc);
-    PP.Diag(LParenLoc, diag::err_pp_missing_lparen) << II->getName();
+    PP.Diag(LParenLoc, diag::err_pp_expected_after) << II << tok::l_paren;
     // If the next token looks like a filename or the start of one,
     // assume it is and process it as such.
     if (!Tok.is(tok::angle_string_literal) && !Tok.is(tok::string_literal) &&
@@ -1142,9 +1133,9 @@
 
   // Ensure we have a trailing ).
   if (Tok.isNot(tok::r_paren)) {
-    PP.Diag(PP.getLocForEndOfToken(FilenameLoc), diag::err_pp_missing_rparen)
-        << II->getName();
-    PP.Diag(LParenLoc, diag::note_matching) << "(";
+    PP.Diag(PP.getLocForEndOfToken(FilenameLoc), diag::err_pp_expected_after)
+        << II << tok::r_paren;
+    PP.Diag(LParenLoc, diag::note_matching) << tok::l_paren;
     return false;
   }
 
@@ -1201,7 +1192,8 @@
 
   // Ensure we have a '('.
   if (Tok.isNot(tok::l_paren)) {
-    PP.Diag(Tok.getLocation(), diag::err_pp_missing_lparen) << II->getName();
+    PP.Diag(Tok.getLocation(), diag::err_pp_expected_after) << II
+                                                            << tok::l_paren;
     return false;
   }
 
@@ -1225,8 +1217,9 @@
 
   // Ensure we have a trailing ).
   if (Tok.isNot(tok::r_paren)) {
-    PP.Diag(Tok.getLocation(), diag::err_pp_missing_rparen) << II->getName();
-    PP.Diag(LParenLoc, diag::note_matching) << "(";
+    PP.Diag(Tok.getLocation(), diag::err_pp_expected_after) << II
+                                                            << tok::r_paren;
+    PP.Diag(LParenLoc, diag::note_matching) << tok::l_paren;
     return false;
   }
 
@@ -1393,7 +1386,8 @@
       // Check for a builtin is trivial.
       Value = FeatureII->getBuiltinID() != 0;
     } else if (II == Ident__has_attribute)
-      Value = HasAttribute(FeatureII);
+      Value = hasAttribute(AttrSyntax::Generic, nullptr, FeatureII,
+                           getTargetInfo().getTriple(), getLangOpts());
     else if (II == Ident__has_extension)
       Value = HasExtension(*this, FeatureII);
     else {
@@ -1479,6 +1473,44 @@
     IdentifierInfo *ModuleII = getIdentifierInfo(getLangOpts().CurrentModule);
     Tok.setIdentifierInfo(ModuleII);
     Tok.setKind(ModuleII->getTokenID());
+  } else if (II == Ident__identifier) {
+    SourceLocation Loc = Tok.getLocation();
+
+    // We're expecting '__identifier' '(' identifier ')'. Try to recover
+    // if the parens are missing.
+    LexNonComment(Tok);
+    if (Tok.isNot(tok::l_paren)) {
+      // No '(', use end of last token.
+      Diag(getLocForEndOfToken(Loc), diag::err_pp_expected_after)
+        << II << tok::l_paren;
+      // If the next token isn't valid as our argument, we can't recover.
+      if (!Tok.isAnnotation() && Tok.getIdentifierInfo())
+        Tok.setKind(tok::identifier);
+      return;
+    }
+
+    SourceLocation LParenLoc = Tok.getLocation();
+    LexNonComment(Tok);
+
+    if (!Tok.isAnnotation() && Tok.getIdentifierInfo())
+      Tok.setKind(tok::identifier);
+    else {
+      Diag(Tok.getLocation(), diag::err_pp_identifier_arg_not_identifier)
+        << Tok.getKind();
+      // Don't walk past anything that's not a real token.
+      if (Tok.is(tok::eof) || Tok.is(tok::eod) || Tok.isAnnotation())
+        return;
+    }
+
+    // Discard the ')', preserving 'Tok' as our result.
+    Token RParen;
+    LexNonComment(RParen);
+    if (RParen.isNot(tok::r_paren)) {
+      Diag(getLocForEndOfToken(Tok.getLocation()), diag::err_pp_expected_after)
+        << Tok.getKind() << tok::r_paren;
+      Diag(LParenLoc, diag::note_matching) << tok::l_paren;
+    }
+    return;
   } else {
     llvm_unreachable("Unknown identifier!");
   }
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
index e2629a3..1ca16d3 100644
--- a/lib/Lex/PTHLexer.cpp
+++ b/lib/Lex/PTHLexer.cpp
@@ -21,11 +21,12 @@
 #include "clang/Lex/PTHManager.h"
 #include "clang/Lex/Preprocessor.h"
 #include "clang/Lex/Token.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringMap.h"
+#include "llvm/Support/EndianStream.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/system_error.h"
+#include <memory>
 using namespace clang;
 using namespace clang::io;
 
@@ -47,14 +48,17 @@
   //===--------------------------------------==//
   // Read the raw token data.
   //===--------------------------------------==//
+  using namespace llvm::support;
 
   // Shadow CurPtr into an automatic variable.
   const unsigned char *CurPtrShadow = CurPtr;
 
   // Read in the data for the token.
-  unsigned Word0 = ReadLE32(CurPtrShadow);
-  uint32_t IdentifierID = ReadLE32(CurPtrShadow);
-  uint32_t FileOffset = ReadLE32(CurPtrShadow);
+  unsigned Word0 = endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
+  uint32_t IdentifierID =
+      endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
+  uint32_t FileOffset =
+      endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
 
   tok::TokenKind TKind = (tok::TokenKind) (Word0 & 0xFF);
   Token::TokenFlags TFlags = (Token::TokenFlags) ((Word0 >> 8) & 0xFF);
@@ -184,6 +188,7 @@
 
 /// SkipBlock - Used by Preprocessor to skip the current conditional block.
 bool PTHLexer::SkipBlock() {
+  using namespace llvm::support;
   assert(CurPPCondPtr && "No cached PP conditional information.");
   assert(LastHashTokPtr && "No known '#' token.");
 
@@ -192,10 +197,10 @@
 
   do {
     // Read the token offset from the side-table.
-    uint32_t Offset = ReadLE32(CurPPCondPtr);
+    uint32_t Offset = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
 
     // Read the target table index from the side-table.
-    TableIdx = ReadLE32(CurPPCondPtr);
+    TableIdx = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
 
     // Compute the actual memory address of the '#' token data for this entry.
     HashEntryI = TokBuf + Offset;
@@ -212,12 +217,13 @@
         PPCond + TableIdx*(sizeof(uint32_t)*2);
       assert(NextPPCondPtr >= CurPPCondPtr);
       // Read where we should jump to.
-      const unsigned char* HashEntryJ = TokBuf + ReadLE32(NextPPCondPtr);
+      const unsigned char *HashEntryJ =
+          TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
 
       if (HashEntryJ <= LastHashTokPtr) {
         // Jump directly to the next entry in the side table.
         HashEntryI = HashEntryJ;
-        TableIdx = ReadLE32(NextPPCondPtr);
+        TableIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
         CurPPCondPtr = NextPPCondPtr;
       }
     }
@@ -232,8 +238,9 @@
   CurPPCondPtr = NextPPCondPtr;
 
   // Read where we should jump to.
-  HashEntryI = TokBuf + ReadLE32(NextPPCondPtr);
-  uint32_t NextIdx = ReadLE32(NextPPCondPtr);
+  HashEntryI =
+      TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
+  uint32_t NextIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
 
   // By construction NextIdx will be zero if this is a #endif.  This is useful
   // to know to obviate lexing another token.
@@ -282,8 +289,10 @@
   // handling a #included file.  Just read the necessary data from the token
   // data buffer to construct the SourceLocation object.
   // NOTE: This is a virtual function; hence it is defined out-of-line.
+  using namespace llvm::support;
+
   const unsigned char *OffsetPtr = CurPtr + (DISK_TOKEN_SIZE - 4);
-  uint32_t Offset = ReadLE32(OffsetPtr);
+  uint32_t Offset = endian::readNext<uint32_t, little, aligned>(OffsetPtr);
   return FileStartLoc.getLocWithOffset(Offset);
 }
 
@@ -317,7 +326,9 @@
 
   static std::pair<unsigned, unsigned>
   ReadKeyDataLength(const unsigned char*& d) {
-    unsigned keyLen = (unsigned) ReadUnalignedLE16(d);
+    using namespace llvm::support;
+    unsigned keyLen =
+        (unsigned)endian::readNext<uint16_t, little, unaligned>(d);
     unsigned dataLen = (unsigned) *(d++);
     return std::make_pair(keyLen, dataLen);
   }
@@ -344,8 +355,9 @@
   static PTHFileData ReadData(const internal_key_type& k,
                               const unsigned char* d, unsigned) {
     assert(k.first == 0x1 && "Only file lookups can match!");
-    uint32_t x = ::ReadUnalignedLE32(d);
-    uint32_t y = ::ReadUnalignedLE32(d);
+    using namespace llvm::support;
+    uint32_t x = endian::readNext<uint32_t, little, unaligned>(d);
+    uint32_t y = endian::readNext<uint32_t, little, unaligned>(d);
     return PTHFileData(x, y);
   }
 };
@@ -376,7 +388,10 @@
 
   static std::pair<unsigned, unsigned>
   ReadKeyDataLength(const unsigned char*& d) {
-    return std::make_pair((unsigned) ReadUnalignedLE16(d), sizeof(uint32_t));
+    using namespace llvm::support;
+    return std::make_pair(
+        (unsigned)endian::readNext<uint16_t, little, unaligned>(d),
+        sizeof(uint32_t));
   }
 
   static std::pair<const char*, unsigned>
@@ -387,7 +402,8 @@
 
   static uint32_t ReadData(const internal_key_type& k, const unsigned char* d,
                            unsigned) {
-    return ::ReadUnalignedLE32(d);
+    using namespace llvm::support;
+    return endian::readNext<uint32_t, little, unaligned>(d);
   }
 };
 
@@ -419,13 +435,13 @@
 }
 
 static void InvalidPTH(DiagnosticsEngine &Diags, const char *Msg) {
-  Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, Msg));
+  Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0")) << Msg;
 }
 
 PTHManager *PTHManager::Create(const std::string &file,
                                DiagnosticsEngine &Diags) {
   // Memory map the PTH file.
-  OwningPtr<llvm::MemoryBuffer> File;
+  std::unique_ptr<llvm::MemoryBuffer> File;
 
   if (llvm::MemoryBuffer::getFile(file, File)) {
     // FIXME: Add ec.message() to this diag.
@@ -433,6 +449,8 @@
     return 0;
   }
 
+  using namespace llvm::support;
+
   // Get the buffer ranges and check if there are at least three 32-bit
   // words at the end of the file.
   const unsigned char *BufBeg = (const unsigned char*)File->getBufferStart();
@@ -447,7 +465,7 @@
 
   // Read the PTH version.
   const unsigned char *p = BufBeg + (sizeof("cfe-pth"));
-  unsigned Version = ReadLE32(p);
+  unsigned Version = endian::readNext<uint32_t, little, aligned>(p);
 
   if (Version < PTHManager::Version) {
     InvalidPTH(Diags,
@@ -468,14 +486,15 @@
   // Construct the file lookup table.  This will be used for mapping from
   // FileEntry*'s to cached tokens.
   const unsigned char* FileTableOffset = PrologueOffset + sizeof(uint32_t)*2;
-  const unsigned char* FileTable = BufBeg + ReadLE32(FileTableOffset);
+  const unsigned char *FileTable =
+      BufBeg + endian::readNext<uint32_t, little, aligned>(FileTableOffset);
 
   if (!(FileTable > BufBeg && FileTable < BufEnd)) {
     Diags.Report(diag::err_invalid_pth_file) << file;
     return 0; // FIXME: Proper error diagnostic?
   }
 
-  OwningPtr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
+  std::unique_ptr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
 
   // Warn if the PTH file is empty.  We still want to create a PTHManager
   // as the PTH could be used with -include-pth.
@@ -485,7 +504,8 @@
   // Get the location of the table mapping from persistent ids to the
   // data needed to reconstruct identifiers.
   const unsigned char* IDTableOffset = PrologueOffset + sizeof(uint32_t)*0;
-  const unsigned char* IData = BufBeg + ReadLE32(IDTableOffset);
+  const unsigned char *IData =
+      BufBeg + endian::readNext<uint32_t, little, aligned>(IDTableOffset);
 
   if (!(IData >= BufBeg && IData < BufEnd)) {
     Diags.Report(diag::err_invalid_pth_file) << file;
@@ -495,25 +515,27 @@
   // Get the location of the hashtable mapping between strings and
   // persistent IDs.
   const unsigned char* StringIdTableOffset = PrologueOffset + sizeof(uint32_t)*1;
-  const unsigned char* StringIdTable = BufBeg + ReadLE32(StringIdTableOffset);
+  const unsigned char *StringIdTable =
+      BufBeg + endian::readNext<uint32_t, little, aligned>(StringIdTableOffset);
   if (!(StringIdTable >= BufBeg && StringIdTable < BufEnd)) {
     Diags.Report(diag::err_invalid_pth_file) << file;
     return 0;
   }
 
-  OwningPtr<PTHStringIdLookup> SL(PTHStringIdLookup::Create(StringIdTable,
-                                                                  BufBeg));
+  std::unique_ptr<PTHStringIdLookup> SL(
+      PTHStringIdLookup::Create(StringIdTable, BufBeg));
 
   // Get the location of the spelling cache.
   const unsigned char* spellingBaseOffset = PrologueOffset + sizeof(uint32_t)*3;
-  const unsigned char* spellingBase = BufBeg + ReadLE32(spellingBaseOffset);
+  const unsigned char *spellingBase =
+      BufBeg + endian::readNext<uint32_t, little, aligned>(spellingBaseOffset);
   if (!(spellingBase >= BufBeg && spellingBase < BufEnd)) {
     Diags.Report(diag::err_invalid_pth_file) << file;
     return 0;
   }
 
   // Get the number of IdentifierInfos and pre-allocate the identifier cache.
-  uint32_t NumIds = ReadLE32(IData);
+  uint32_t NumIds = endian::readNext<uint32_t, little, aligned>(IData);
 
   // Pre-allocate the persistent ID -> IdentifierInfo* cache.  We use calloc()
   // so that we in the best case only zero out memory once when the OS returns
@@ -530,20 +552,23 @@
 
   // Compute the address of the original source file.
   const unsigned char* originalSourceBase = PrologueOffset + sizeof(uint32_t)*4;
-  unsigned len = ReadUnalignedLE16(originalSourceBase);
+  unsigned len =
+      endian::readNext<uint16_t, little, unaligned>(originalSourceBase);
   if (!len) originalSourceBase = 0;
 
   // Create the new PTHManager.
-  return new PTHManager(File.take(), FL.take(), IData, PerIDCache,
-                        SL.take(), NumIds, spellingBase,
-                        (const char*) originalSourceBase);
+  return new PTHManager(File.release(), FL.release(), IData, PerIDCache,
+                        SL.release(), NumIds, spellingBase,
+                        (const char *)originalSourceBase);
 }
 
 IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
+  using namespace llvm::support;
   // Look in the PTH file for the string data for the IdentifierInfo object.
   const unsigned char* TableEntry = IdDataTable + sizeof(uint32_t)*PersistentID;
-  const unsigned char* IDData =
-    (const unsigned char*)Buf->getBufferStart() + ReadLE32(TableEntry);
+  const unsigned char *IDData =
+      (const unsigned char *)Buf->getBufferStart() +
+      endian::readNext<uint32_t, little, aligned>(TableEntry);
   assert(IDData < (const unsigned char*)Buf->getBufferEnd());
 
   // Allocate the object.
@@ -579,6 +604,8 @@
   if (!FE)
     return 0;
 
+  using namespace llvm::support;
+
   // Lookup the FileEntry object in our file lookup data structure.  It will
   // return a variant that indicates whether or not there is an offset within
   // the PTH file that contains cached tokens.
@@ -596,7 +623,7 @@
 
   // Get the location of pp-conditional table.
   const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
-  uint32_t Len = ReadLE32(ppcond);
+  uint32_t Len = endian::readNext<uint32_t, little, aligned>(ppcond);
   if (Len == 0) ppcond = 0;
 
   assert(PP && "No preprocessor set yet!");
@@ -650,11 +677,13 @@
         d += 4 * 2; // Skip the first 2 words.
       }
 
-      uint64_t File = ReadUnalignedLE64(d);
-      uint64_t Device = ReadUnalignedLE64(d);
+      using namespace llvm::support;
+
+      uint64_t File = endian::readNext<uint64_t, little, unaligned>(d);
+      uint64_t Device = endian::readNext<uint64_t, little, unaligned>(d);
       llvm::sys::fs::UniqueID UniqueID(File, Device);
-      time_t ModTime = ReadUnalignedLE64(d);
-      uint64_t Size = ReadUnalignedLE64(d);
+      time_t ModTime = endian::readNext<uint64_t, little, unaligned>(d);
+      uint64_t Size = endian::readNext<uint64_t, little, unaligned>(d);
       return data_type(Size, ModTime, UniqueID, IsDirectory);
     }
 
@@ -675,19 +704,20 @@
   ~PTHStatCache() {}
 
   LookupResult getStat(const char *Path, FileData &Data, bool isFile,
-                       int *FileDescriptor) {
+                       vfs::File **F, vfs::FileSystem &FS) override {
     // Do the lookup for the file's data in the PTH file.
     CacheTy::iterator I = Cache.find(Path);
 
     // If we don't get a hit in the PTH file just forward to 'stat'.
     if (I == Cache.end())
-      return statChained(Path, Data, isFile, FileDescriptor);
+      return statChained(Path, Data, isFile, F, FS);
 
     const PTHStatData &D = *I;
 
     if (!D.HasData)
       return CacheMissing;
 
+    Data.Name = Path;
     Data.Size = D.Size;
     Data.ModTime = D.ModTime;
     Data.UniqueID = D.UniqueID;
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index e4059ee..99ba8de 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -48,9 +48,7 @@
 //===----------------------------------------------------------------------===//
 
 PragmaNamespace::~PragmaNamespace() {
-  for (llvm::StringMap<PragmaHandler*>::iterator
-         I = Handlers.begin(), E = Handlers.end(); I != E; ++I)
-    delete I->second;
+  llvm::DeleteContainerSeconds(Handlers);
 }
 
 /// FindHandler - Check to see if there is already a handler for the
@@ -812,8 +810,8 @@
 /// PragmaOnceHandler - "\#pragma once" marks the file as atomically included.
 struct PragmaOnceHandler : public PragmaHandler {
   PragmaOnceHandler() : PragmaHandler("once") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &OnceTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &OnceTok) override {
     PP.CheckEndOfDirective("pragma once");
     PP.HandlePragmaOnce(OnceTok);
   }
@@ -823,8 +821,8 @@
 /// rest of the line is not lexed.
 struct PragmaMarkHandler : public PragmaHandler {
   PragmaMarkHandler() : PragmaHandler("mark") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &MarkTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &MarkTok) override {
     PP.HandlePragmaMark();
   }
 };
@@ -832,8 +830,8 @@
 /// PragmaPoisonHandler - "\#pragma poison x" marks x as not usable.
 struct PragmaPoisonHandler : public PragmaHandler {
   PragmaPoisonHandler() : PragmaHandler("poison") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &PoisonTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &PoisonTok) override {
     PP.HandlePragmaPoison(PoisonTok);
   }
 };
@@ -842,24 +840,24 @@
 /// as a system header, which silences warnings in it.
 struct PragmaSystemHeaderHandler : public PragmaHandler {
   PragmaSystemHeaderHandler() : PragmaHandler("system_header") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &SHToken) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &SHToken) override {
     PP.HandlePragmaSystemHeader(SHToken);
     PP.CheckEndOfDirective("pragma");
   }
 };
 struct PragmaDependencyHandler : public PragmaHandler {
   PragmaDependencyHandler() : PragmaHandler("dependency") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &DepToken) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &DepToken) override {
     PP.HandlePragmaDependency(DepToken);
   }
 };
 
 struct PragmaDebugHandler : public PragmaHandler {
   PragmaDebugHandler() : PragmaHandler("__debug") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &DepToken) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &DepToken) override {
     Token Tok;
     PP.LexUnexpandedToken(Tok);
     if (Tok.isNot(tok::identifier)) {
@@ -926,8 +924,9 @@
 #ifdef _MSC_VER
     #pragma warning(disable : 4717)
 #endif
-  void DebugOverflowStack() {
-    DebugOverflowStack();
+  static void DebugOverflowStack() {
+    void (*volatile Self)() = DebugOverflowStack;
+    Self();
   }
 #ifdef _MSC_VER
     #pragma warning(default : 4717)
@@ -942,8 +941,8 @@
 public:
   explicit PragmaDiagnosticHandler(const char *NS) :
     PragmaHandler("diagnostic"), Namespace(NS) {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &DiagToken) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &DiagToken) override {
     SourceLocation DiagLoc = DiagToken.getLocation();
     Token Tok;
     PP.LexUnexpandedToken(Tok);
@@ -1007,32 +1006,14 @@
   }
 };
 
-// Returns -1 on failure.
-static int LexSimpleInt(Preprocessor &PP, Token &Tok) {
-  assert(Tok.is(tok::numeric_constant));
-  SmallString<8> IntegerBuffer;
-  bool NumberInvalid = false;
-  StringRef Spelling = PP.getSpelling(Tok, IntegerBuffer, &NumberInvalid);
-  if (NumberInvalid)
-    return -1;
-  NumericLiteralParser Literal(Spelling, Tok.getLocation(), PP);
-  if (Literal.hadError || !Literal.isIntegerLiteral() || Literal.hasUDSuffix())
-    return -1;
-  llvm::APInt APVal(32, 0);
-  if (Literal.GetIntegerValue(APVal))
-    return -1;
-  PP.Lex(Tok);
-  return int(APVal.getLimitedValue(INT_MAX));
-}
-
 /// "\#pragma warning(...)".  MSVC's diagnostics do not map cleanly to clang's
 /// diagnostics, so we don't really implement this pragma.  We parse it and
 /// ignore it to avoid -Wunknown-pragma warnings.
 struct PragmaWarningHandler : public PragmaHandler {
   PragmaWarningHandler() : PragmaHandler("warning") {}
 
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &Tok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &Tok) override {
     // Parse things like:
     // warning(push, 1)
     // warning(pop)
@@ -1059,8 +1040,10 @@
       PP.Lex(Tok);
       if (Tok.is(tok::comma)) {
         PP.Lex(Tok);
-        if (Tok.is(tok::numeric_constant))
-          Level = LexSimpleInt(PP, Tok);
+        uint64_t Value;
+        if (Tok.is(tok::numeric_constant) &&
+            PP.parseSimpleIntegerLiteral(Tok, Value))
+          Level = int(Value);
         if (Level < 0 || Level > 4) {
           PP.Diag(Tok, diag::warn_pragma_warning_push_level);
           return;
@@ -1104,12 +1087,13 @@
         SmallVector<int, 4> Ids;
         PP.Lex(Tok);
         while (Tok.is(tok::numeric_constant)) {
-          int Id = LexSimpleInt(PP, Tok);
-          if (Id <= 0) {
+          uint64_t Value;
+          if (!PP.parseSimpleIntegerLiteral(Tok, Value) || Value == 0 ||
+              Value > INT_MAX) {
             PP.Diag(Tok, diag::warn_pragma_warning_expected_number);
             return;
           }
-          Ids.push_back(Id);
+          Ids.push_back(int(Value));
         }
         if (Callbacks)
           Callbacks->PragmaWarning(DiagLoc, Specifier, Ids);
@@ -1135,8 +1119,8 @@
 /// PragmaIncludeAliasHandler - "\#pragma include_alias("...")".
 struct PragmaIncludeAliasHandler : public PragmaHandler {
   PragmaIncludeAliasHandler() : PragmaHandler("include_alias") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &IncludeAliasTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &IncludeAliasTok) override {
     PP.HandlePragmaIncludeAlias(IncludeAliasTok);
   }
 };
@@ -1177,8 +1161,8 @@
                        StringRef Namespace = StringRef())
     : PragmaHandler(PragmaKind(Kind, true)), Kind(Kind), Namespace(Namespace) {}
 
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &Tok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &Tok) override {
     SourceLocation MessageLoc = Tok.getLocation();
     PP.Lex(Tok);
     bool ExpectClosingParen = false;
@@ -1230,8 +1214,8 @@
 /// macro on the top of the stack.
 struct PragmaPushMacroHandler : public PragmaHandler {
   PragmaPushMacroHandler() : PragmaHandler("push_macro") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &PushMacroTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &PushMacroTok) override {
     PP.HandlePragmaPushMacro(PushMacroTok);
   }
 };
@@ -1241,8 +1225,8 @@
 /// macro to the value on the top of the stack.
 struct PragmaPopMacroHandler : public PragmaHandler {
   PragmaPopMacroHandler() : PragmaHandler("pop_macro") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &PopMacroTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &PopMacroTok) override {
     PP.HandlePragmaPopMacro(PopMacroTok);
   }
 };
@@ -1252,8 +1236,8 @@
 /// PragmaSTDC_FENV_ACCESSHandler - "\#pragma STDC FENV_ACCESS ...".
 struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
   PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &Tok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &Tok) override {
     tok::OnOffSwitch OOS;
     if (PP.LexOnOffSwitch(OOS))
      return;
@@ -1266,8 +1250,8 @@
 struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
   PragmaSTDC_CX_LIMITED_RANGEHandler()
     : PragmaHandler("CX_LIMITED_RANGE") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &Tok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &Tok) override {
     tok::OnOffSwitch OOS;
     PP.LexOnOffSwitch(OOS);
   }
@@ -1276,8 +1260,8 @@
 /// PragmaSTDC_UnknownHandler - "\#pragma STDC ...".
 struct PragmaSTDC_UnknownHandler : public PragmaHandler {
   PragmaSTDC_UnknownHandler() {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &UnknownTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &UnknownTok) override {
     // C99 6.10.6p2, unknown forms are not allowed.
     PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
   }
@@ -1287,8 +1271,8 @@
 ///   \#pragma clang arc_cf_code_audited begin/end
 struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
   PragmaARCCFCodeAuditedHandler() : PragmaHandler("arc_cf_code_audited") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &NameTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &NameTok) override {
     SourceLocation Loc = NameTok.getLocation();
     bool IsBegin;
 
@@ -1351,8 +1335,8 @@
 struct PragmaRegionHandler : public PragmaHandler {
   PragmaRegionHandler(const char *pragma) : PragmaHandler(pragma) { }
 
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &NameTok) {
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &NameTok) override {
     // #pragma region: endregion matches can be verified
     // __pragma(region): no sense, but ignored by msvc
     // _Pragma is not valid for MSVC, but there isn't any point
diff --git a/lib/Lex/PreprocessingRecord.cpp b/lib/Lex/PreprocessingRecord.cpp
index 090aeed..db5a9f9 100644
--- a/lib/Lex/PreprocessingRecord.cpp
+++ b/lib/Lex/PreprocessingRecord.cpp
@@ -406,6 +406,10 @@
                       MacroNameTok.getLocation());
 }
 
+void PreprocessingRecord::SourceRangeSkipped(SourceRange Range) {
+  SkippedRanges.push_back(Range);
+}
+
 void PreprocessingRecord::MacroExpands(const Token &Id,const MacroDirective *MD,
                                        SourceRange Range,
                                        const MacroArgs *Args) {
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index b500efe..d85df38 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -26,7 +26,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Lex/Preprocessor.h"
-#include "clang/Lex/MacroArgs.h"
 #include "clang/Basic/FileManager.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Basic/TargetInfo.h"
@@ -35,6 +34,7 @@
 #include "clang/Lex/HeaderSearch.h"
 #include "clang/Lex/LexDiagnostic.h"
 #include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/MacroArgs.h"
 #include "clang/Lex/MacroInfo.h"
 #include "clang/Lex/ModuleLoader.h"
 #include "clang/Lex/Pragma.h"
@@ -42,8 +42,8 @@
 #include "clang/Lex/PreprocessorOptions.h"
 #include "clang/Lex/ScratchBuffer.h"
 #include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Support/Capacity.h"
 #include "llvm/Support/ConvertUTF.h"
@@ -59,16 +59,18 @@
                            const TargetInfo *target, SourceManager &SM,
                            HeaderSearch &Headers, ModuleLoader &TheModuleLoader,
                            IdentifierInfoLookup *IILookup, bool OwnsHeaders,
-                           bool DelayInitialization, bool IncrProcessing)
+                           bool DelayInitialization, bool IncrProcessing,
+                           TranslationUnitKind TUKind)
     : PPOpts(PPOpts), Diags(&diags), LangOpts(opts), Target(target),
       FileMgr(Headers.getFileMgr()), SourceMgr(SM), HeaderInfo(Headers),
       TheModuleLoader(TheModuleLoader), ExternalSource(0),
       Identifiers(opts, IILookup), IncrementalProcessing(IncrProcessing),
+      TUKind(TUKind),
       CodeComplete(0), CodeCompletionFile(0), CodeCompletionOffset(0),
       LastTokenWasAt(false), ModuleImportExpectsIdentifier(false),
       CodeCompletionReached(0), SkipMainFilePreamble(0, true), CurPPLexer(0),
-      CurDirLookup(0), CurLexerKind(CLK_Lexer), Callbacks(0),
-      MacroArgCache(0), Record(0), MIChainHead(0), MICache(0),
+      CurDirLookup(0), CurLexerKind(CLK_Lexer), CurSubmodule(0),
+      Callbacks(0), MacroArgCache(0), Record(0), MIChainHead(0), MICache(0),
       DeserialMIChainHead(0) {
   OwnsHeaderSearch = OwnsHeaders;
   
@@ -141,11 +143,7 @@
 Preprocessor::~Preprocessor() {
   assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
 
-  while (!IncludeMacroStack.empty()) {
-    delete IncludeMacroStack.back().TheLexer;
-    delete IncludeMacroStack.back().TheTokenLexer;
-    IncludeMacroStack.pop_back();
-  }
+  IncludeMacroStack.clear();
 
   // Free any macro definitions.
   for (MacroInfoChain *I = MIChainHead ; I ; I = I->Next)
@@ -503,48 +501,6 @@
 // Lexer Event Handling.
 //===----------------------------------------------------------------------===//
 
-static void appendCodePoint(unsigned Codepoint,
-                            llvm::SmallVectorImpl<char> &Str) {
-  char ResultBuf[4];
-  char *ResultPtr = ResultBuf;
-  bool Res = llvm::ConvertCodePointToUTF8(Codepoint, ResultPtr);
-  (void)Res;
-  assert(Res && "Unexpected conversion failure");
-  Str.append(ResultBuf, ResultPtr);
-}
-
-static void expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
-  for (StringRef::iterator I = Input.begin(), E = Input.end(); I != E; ++I) {
-    if (*I != '\\') {
-      Buf.push_back(*I);
-      continue;
-    }
-
-    ++I;
-    assert(*I == 'u' || *I == 'U');
-
-    unsigned NumHexDigits;
-    if (*I == 'u')
-      NumHexDigits = 4;
-    else
-      NumHexDigits = 8;
-
-    assert(I + NumHexDigits <= E);
-
-    uint32_t CodePoint = 0;
-    for (++I; NumHexDigits != 0; ++I, --NumHexDigits) {
-      unsigned Value = llvm::hexDigitValue(*I);
-      assert(Value != -1U);
-
-      CodePoint <<= 4;
-      CodePoint += Value;
-    }
-
-    appendCodePoint(CodePoint, Buf);
-    --I;
-  }
-}
-
 /// LookUpIdentifierInfo - Given a tok::raw_identifier token, look up the
 /// identifier information for the token and install it into the token,
 /// updating the token kind accordingly.
@@ -669,7 +625,7 @@
   // name of a macro.
   // FIXME: This warning is disabled in cases where it shouldn't be, like
   //   "#define constexpr constexpr", "int constexpr;"
-  if (II.isCXX11CompatKeyword() & !DisableMacroExpansion) {
+  if (II.isCXX11CompatKeyword() && !DisableMacroExpansion) {
     Diag(Identifier, diag::warn_cxx11_keyword) << II.getName();
     // Don't diagnose this keyword again in this translation unit.
     II.setIsCXX11CompatKeyword(false);
@@ -819,6 +775,24 @@
   return true;
 }
 
+bool Preprocessor::parseSimpleIntegerLiteral(Token &Tok, uint64_t &Value) {
+  assert(Tok.is(tok::numeric_constant));
+  SmallString<8> IntegerBuffer;
+  bool NumberInvalid = false;
+  StringRef Spelling = getSpelling(Tok, IntegerBuffer, &NumberInvalid);
+  if (NumberInvalid)
+    return false;
+  NumericLiteralParser Literal(Spelling, Tok.getLocation(), *this);
+  if (Literal.hadError || !Literal.isIntegerLiteral() || Literal.hasUDSuffix())
+    return false;
+  llvm::APInt APVal(64, 0);
+  if (Literal.GetIntegerValue(APVal))
+    return false;
+  Lex(Tok);
+  Value = APVal.getLimitedValue();
+  return true;
+}
+
 void Preprocessor::addCommentHandler(CommentHandler *Handler) {
   assert(Handler && "NULL comment handler");
   assert(std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler) ==
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index 0213afc..40e9707 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -12,9 +12,9 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Lex/TokenLexer.h"
-#include "clang/Lex/MacroArgs.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/MacroArgs.h"
 #include "clang/Lex/MacroInfo.h"
 #include "clang/Lex/Preprocessor.h"
 #include "llvm/ADT/SmallString.h"
@@ -37,6 +37,7 @@
   ExpandLocEnd = ELEnd;
   AtStartOfLine = Tok.isAtStartOfLine();
   HasLeadingSpace = Tok.hasLeadingSpace();
+  NextTokGetsSpace = false;
   Tokens = &*Macro->tokens_begin();
   OwnsTokens = false;
   DisableMacroExpansion = false;
@@ -95,6 +96,7 @@
   ExpandLocStart = ExpandLocEnd = SourceLocation();
   AtStartOfLine = false;
   HasLeadingSpace = false;
+  NextTokGetsSpace = false;
   MacroExpansionStart = SourceLocation();
 
   // Set HasLeadingSpace/AtStartOfLine so that the first token will be
@@ -119,13 +121,10 @@
   if (ActualArgs) ActualArgs->destroy(PP);
 }
 
-/// Remove comma ahead of __VA_ARGS__, if present, according to compiler dialect
-/// settings.  Returns true if the comma is removed.
-static bool MaybeRemoveCommaBeforeVaArgs(SmallVectorImpl<Token> &ResultToks,
-                                         bool &NextTokGetsSpace,
-                                         bool HasPasteOperator,
-                                         MacroInfo *Macro, unsigned MacroArgNo,
-                                         Preprocessor &PP) {
+bool TokenLexer::MaybeRemoveCommaBeforeVaArgs(SmallVectorImpl<Token> &ResultToks,
+                                              bool HasPasteOperator,
+                                              MacroInfo *Macro, unsigned MacroArgNo,
+                                              Preprocessor &PP) {
   // Is the macro argument __VA_ARGS__?
   if (!Macro->isVariadic() || MacroArgNo != Macro->getNumArgs()-1)
     return false;
@@ -133,7 +132,7 @@
   // In Microsoft-compatibility mode, a comma is removed in the expansion
   // of " ... , __VA_ARGS__ " if __VA_ARGS__ is empty.  This extension is
   // not supported by gcc.
-  if (!HasPasteOperator && !PP.getLangOpts().MicrosoftMode)
+  if (!HasPasteOperator && !PP.getLangOpts().MSVCCompat)
     return false;
 
   // GCC removes the comma in the expansion of " ... , ## __VA_ARGS__ " if
@@ -179,16 +178,14 @@
   // we install the newly expanded sequence as the new 'Tokens' list.
   bool MadeChange = false;
 
-  // NextTokGetsSpace - When this is true, the next token appended to the
-  // output list will get a leading space, regardless of whether it had one to
-  // begin with or not.  This is used for placemarker support.
-  bool NextTokGetsSpace = false;
-
   for (unsigned i = 0, e = NumTokens; i != e; ++i) {
     // If we found the stringify operator, get the argument stringified.  The
     // preprocessor already verified that the following token is a macro name
     // when the #define was parsed.
     const Token &CurTok = Tokens[i];
+    if (i != 0 && !Tokens[i-1].is(tok::hashhash) && CurTok.hasLeadingSpace())
+      NextTokGetsSpace = true;
+
     if (CurTok.is(tok::hash) || CurTok.is(tok::hashat)) {
       int ArgNo = Macro->getArgumentNum(Tokens[i+1].getIdentifierInfo());
       assert(ArgNo != -1 && "Token following # is not an argument?");
@@ -213,7 +210,7 @@
 
       // The stringified/charified string leading space flag gets set to match
       // the #/#@ operator.
-      if (CurTok.hasLeadingSpace() || NextTokGetsSpace)
+      if (NextTokGetsSpace)
         Res.setFlag(Token::LeadingSpace);
 
       ResultToks.push_back(Res);
@@ -223,6 +220,13 @@
       continue;
     }
 
+    // Find out if there is a paste (##) operator before or after the token.
+    bool NonEmptyPasteBefore =
+      !ResultToks.empty() && ResultToks.back().is(tok::hashhash);
+    bool PasteBefore = i != 0 && Tokens[i-1].is(tok::hashhash);
+    bool PasteAfter = i+1 != e && Tokens[i+1].is(tok::hashhash);
+    assert(!NonEmptyPasteBefore || PasteBefore);
+
     // Otherwise, if this is not an argument token, just add the token to the
     // output buffer.
     IdentifierInfo *II = CurTok.getIdentifierInfo();
@@ -234,7 +238,9 @@
       if (NextTokGetsSpace) {
         ResultToks.back().setFlag(Token::LeadingSpace);
         NextTokGetsSpace = false;
-      }
+      } else if (PasteBefore && !NonEmptyPasteBefore)
+        ResultToks.back().clearFlag(Token::LeadingSpace);
+
       continue;
     }
 
@@ -242,18 +248,12 @@
     // input.
     MadeChange = true;
 
-    // Otherwise, this is a use of the argument.  Find out if there is a paste
-    // (##) operator before or after the argument.
-    bool NonEmptyPasteBefore =
-      !ResultToks.empty() && ResultToks.back().is(tok::hashhash);
-    bool PasteBefore = i != 0 && Tokens[i-1].is(tok::hashhash);
-    bool PasteAfter = i+1 != e && Tokens[i+1].is(tok::hashhash);
-    assert(!NonEmptyPasteBefore || PasteBefore);
+    // Otherwise, this is a use of the argument.
 
     // In Microsoft mode, remove the comma before __VA_ARGS__ to ensure there
     // are no trailing commas if __VA_ARGS__ is empty.
     if (!PasteBefore && ActualArgs->isVarargsElidedUse() &&
-        MaybeRemoveCommaBeforeVaArgs(ResultToks, NextTokGetsSpace,
+        MaybeRemoveCommaBeforeVaArgs(ResultToks,
                                      /*HasPasteOperator=*/false,
                                      Macro, ArgNo, PP))
       continue;
@@ -282,7 +282,7 @@
         // behavior by not considering single commas from nested macro
         // expansions as argument separators. Set a flag on the token so we can
         // test for this later when the macro expansion is processed.
-        if (PP.getLangOpts().MicrosoftMode && NumToks == 1 &&
+        if (PP.getLangOpts().MSVCCompat && NumToks == 1 &&
             ResultToks.back().is(tok::comma))
           ResultToks.back().setFlag(Token::IgnoredComma);
 
@@ -304,13 +304,8 @@
         // before the first token should match the whitespace of the arg
         // identifier.
         ResultToks[FirstResult].setFlagValue(Token::LeadingSpace,
-                                             CurTok.hasLeadingSpace() ||
                                              NextTokGetsSpace);
         NextTokGetsSpace = false;
-      } else {
-        // If this is an empty argument, and if there was whitespace before the
-        // formal token, make sure the next token gets whitespace before it.
-        NextTokGetsSpace = CurTok.hasLeadingSpace();
       }
       continue;
     }
@@ -358,8 +353,7 @@
       // assembler-with-cpp mode, invalid pastes are allowed through: in this
       // case, we do not want the extra whitespace to be added.  For example,
       // we want ". ## foo" -> ".foo" not ". foo".
-      if ((CurTok.hasLeadingSpace() || NextTokGetsSpace) &&
-          !NonEmptyPasteBefore)
+      if (NextTokGetsSpace)
         ResultToks[ResultToks.size()-NumToks].setFlag(Token::LeadingSpace);
 
       NextTokGetsSpace = false;
@@ -370,11 +364,9 @@
     // 6.10.3.3p2,3) calls for a bunch of placemarker stuff to occur.  We
     // implement this by eating ## operators when a LHS or RHS expands to
     // empty.
-    NextTokGetsSpace |= CurTok.hasLeadingSpace();
     if (PasteAfter) {
       // Discard the argument token and skip (don't copy to the expansion
       // buffer) the paste operator after it.
-      NextTokGetsSpace |= Tokens[i+1].hasLeadingSpace();
       ++i;
       continue;
     }
@@ -385,7 +377,7 @@
     assert(PasteBefore);
     if (NonEmptyPasteBefore) {
       assert(ResultToks.back().is(tok::hashhash));
-      NextTokGetsSpace |= ResultToks.pop_back_val().hasLeadingSpace();
+      ResultToks.pop_back();
     }
 
     // If this is the __VA_ARGS__ token, and if the argument wasn't provided,
@@ -393,7 +385,7 @@
     // the ## was a comma, remove the comma.  This is a GCC extension which is
     // disabled when using -std=c99.
     if (ActualArgs->isVarargsElidedUse())
-      MaybeRemoveCommaBeforeVaArgs(ResultToks, NextTokGetsSpace,
+      MaybeRemoveCommaBeforeVaArgs(ResultToks,
                                    /*HasPasteOperator=*/true,
                                    Macro, ArgNo, PP);
 
@@ -425,7 +417,7 @@
 
     Tok.startToken();
     Tok.setFlagValue(Token::StartOfLine , AtStartOfLine);
-    Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
+    Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace || NextTokGetsSpace);
     if (CurToken == 0)
       Tok.setFlag(Token::LeadingEmptyMacro);
     return PP.HandleEndOfTokenLexer(Tok);
@@ -479,9 +471,14 @@
   if (isFirstToken) {
     Tok.setFlagValue(Token::StartOfLine , AtStartOfLine);
     Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
-    AtStartOfLine = false;
-    HasLeadingSpace = false;
+  } else {
+    // If this is not the first token, we may still need to pass through
+    // leading whitespace if we've expanded a macro.
+    if (AtStartOfLine) Tok.setFlag(Token::StartOfLine);
+    if (HasLeadingSpace) Tok.setFlag(Token::LeadingSpace);
   }
+  AtStartOfLine = false;
+  HasLeadingSpace = false;
 
   // Handle recursive expansion!
   if (!Tok.isAnnotation() && Tok.getIdentifierInfo() != 0) {
@@ -620,12 +617,11 @@
           SourceLocation Loc =
             SM.createExpansionLoc(PasteOpLoc, ExpandLocStart, ExpandLocEnd, 2);
           // If we're in microsoft extensions mode, downgrade this from a hard
-          // error to a warning that defaults to an error.  This allows
+          // error to an extension that defaults to an error.  This allows
           // disabling it.
-          PP.Diag(Loc,
-                  PP.getLangOpts().MicrosoftExt ? diag::err_pp_bad_paste_ms 
-                                                   : diag::err_pp_bad_paste)
-            << Buffer.str();
+          PP.Diag(Loc, PP.getLangOpts().MicrosoftExt ? diag::ext_pp_bad_paste_ms
+                                                     : diag::err_pp_bad_paste)
+              << Buffer.str();
         }
 
         // An error has occurred so exit loop.
diff --git a/lib/Lex/UnicodeCharSets.h b/lib/Lex/UnicodeCharSets.h
index 01ae7e8..12b2456 100644
--- a/lib/Lex/UnicodeCharSets.h
+++ b/lib/Lex/UnicodeCharSets.h
@@ -298,7 +298,7 @@
   // Malayam
   { 0x0D02, 0x0D03 }, { 0x0D05, 0x0D0C }, { 0x0D0E, 0x0D10 },
   { 0x0D12, 0x0D28 }, { 0x0D2A, 0x0D39 }, { 0x0D3E, 0x0D43 },
-  { 0x0D46, 0x0D48 }, { 0x0D4A, 0x0D4D }, { 0x0D60, 0x0D60 },
+  { 0x0D46, 0x0D48 }, { 0x0D4A, 0x0D4D }, { 0x0D60, 0x0D61 },
 
   // Digits (11)
   { 0x0D66, 0x0D6F },
diff --git a/lib/Parse/Android.mk b/lib/Parse/Android.mk
index 3f8f456..5bc2200 100644
--- a/lib/Parse/Android.mk
+++ b/lib/Parse/Android.mk
@@ -6,13 +6,11 @@
 include $(CLEAR_TBLGEN_VARS)
 
 TBLGEN_TABLES :=  \
-	AttrExprArgs.inc \
-	AttrIdentifierArg.inc \
 	AttrList.inc  \
-	AttrLateParsed.inc  \
 	AttrParsedAttrList.inc  \
-	AttrTypeArg.inc  \
 	Attrs.inc  \
+        AttrVisitor.inc \
+        AttrParserStringSwitches.inc \
 	CommentCommandList.inc \
 	CommentNodes.inc \
 	DeclNodes.inc  \
diff --git a/lib/Parse/CMakeLists.txt b/lib/Parse/CMakeLists.txt
index 08bf4e1..10b146f 100644
--- a/lib/Parse/CMakeLists.txt
+++ b/lib/Parse/CMakeLists.txt
@@ -1,3 +1,9 @@
+set(LLVM_LINK_COMPONENTS
+  MCParser
+  MC
+  Support
+  )
+
 add_clang_library(clangParse
   ParseAST.cpp
   ParseCXXInlineMethods.cpp
@@ -13,23 +19,8 @@
   ParseTemplate.cpp
   ParseTentative.cpp
   Parser.cpp
-  )
 
-add_dependencies(clangParse
-  ClangAttrClasses
-  ClangAttrIdentifierArg
-  ClangAttrLateParsed
-  ClangAttrList
-  ClangAttrParsedAttrList
-  ClangAttrTypeArg
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticParse
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangParse
+  LINK_LIBS
   clangBasic
   clangAST
   clangLex
diff --git a/lib/Parse/ParseAST.cpp b/lib/Parse/ParseAST.cpp
index 5678ece..0f5a1b3 100644
--- a/lib/Parse/ParseAST.cpp
+++ b/lib/Parse/ParseAST.cpp
@@ -23,9 +23,9 @@
 #include "clang/Sema/ExternalSemaSource.h"
 #include "clang/Sema/Sema.h"
 #include "clang/Sema/SemaConsumer.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/Support/CrashRecoveryContext.h"
 #include <cstdio>
+#include <memory>
 
 using namespace clang;
 
@@ -36,7 +36,7 @@
   const Parser &P;
 public:
   PrettyStackTraceParserEntry(const Parser &p) : P(p) {}
-  virtual void print(raw_ostream &OS) const;
+  void print(raw_ostream &OS) const override;
 };
 
 /// If a crash happens while the parser is active, print out a line indicating
@@ -88,7 +88,8 @@
                      CodeCompleteConsumer *CompletionConsumer,
                      bool SkipFunctionBodies) {
 
-  OwningPtr<Sema> S(new Sema(PP, Ctx, *Consumer, TUKind, CompletionConsumer));
+  std::unique_ptr<Sema> S(
+      new Sema(PP, Ctx, *Consumer, TUKind, CompletionConsumer));
 
   // Recover resources if we crash before exiting this method.
   llvm::CrashRecoveryContextCleanupRegistrar<Sema> CleanupSema(S.get());
@@ -109,8 +110,8 @@
 
   ASTConsumer *Consumer = &S.getASTConsumer();
 
-  OwningPtr<Parser> ParseOP(new Parser(S.getPreprocessor(), S,
-                                       SkipFunctionBodies));
+  std::unique_ptr<Parser> ParseOP(
+      new Parser(S.getPreprocessor(), S, SkipFunctionBodies));
   Parser &P = *ParseOP.get();
 
   PrettyStackTraceParserEntry CrashInfo(P);
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index 7792305..4cf87e5 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -19,13 +19,6 @@
 #include "clang/Sema/Scope.h"
 using namespace clang;
 
-/// Get the FunctionDecl for a function or function template decl.
-static FunctionDecl *getFunctionDecl(Decl *D) {
-  if (FunctionDecl *fn = dyn_cast<FunctionDecl>(D))
-    return fn;
-  return cast<FunctionTemplateDecl>(D)->getTemplatedDecl();
-}
-
 /// ParseCXXInlineMethodDef - We parsed and verified that the specified
 /// Declarator is a well formed C++ inline method definition. Now lex its body
 /// and store its tokens for parsing after the C++ class is complete.
@@ -69,9 +62,7 @@
 
   D.complete(FnD);
 
-  if (Tok.is(tok::equal)) {
-    ConsumeToken();
-
+  if (TryConsumeToken(tok::equal)) {
     if (!FnD) {
       SkipUntil(tok::semi);
       return 0;
@@ -79,20 +70,16 @@
 
     bool Delete = false;
     SourceLocation KWLoc;
-    if (Tok.is(tok::kw_delete)) {
-      Diag(Tok, getLangOpts().CPlusPlus11 ?
-           diag::warn_cxx98_compat_deleted_function :
-           diag::ext_deleted_function);
-
-      KWLoc = ConsumeToken();
+    if (TryConsumeToken(tok::kw_delete, KWLoc)) {
+      Diag(KWLoc, getLangOpts().CPlusPlus11
+                      ? diag::warn_cxx98_compat_deleted_function
+                      : diag::ext_deleted_function);
       Actions.SetDeclDeleted(FnD, KWLoc);
       Delete = true;
-    } else if (Tok.is(tok::kw_default)) {
-      Diag(Tok, getLangOpts().CPlusPlus11 ?
-           diag::warn_cxx98_compat_defaulted_function :
-           diag::ext_defaulted_function);
-
-      KWLoc = ConsumeToken();
+    } else if (TryConsumeToken(tok::kw_default, KWLoc)) {
+      Diag(KWLoc, getLangOpts().CPlusPlus11
+                      ? diag::warn_cxx98_compat_defaulted_function
+                      : diag::ext_defaulted_function);
       Actions.SetDeclDefaulted(FnD, KWLoc);
     } else {
       llvm_unreachable("function definition after = not 'delete' or 'default'");
@@ -102,9 +89,9 @@
       Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
         << Delete;
       SkipUntil(tok::semi);
-    } else {
-      ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
-                       Delete ? "delete" : "default", tok::semi);
+    } else if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+                                Delete ? "delete" : "default")) {
+      SkipUntil(tok::semi);
     }
 
     return FnD;
@@ -115,9 +102,9 @@
   // the tokens and store them for parsing at the end of the translation unit.
   if (getLangOpts().DelayedTemplateParsing &&
       DefinitionKind == FDK_Definition &&
-      !D.getDeclSpec().isConstexprSpecified() && 
-      !(FnD && getFunctionDecl(FnD) && 
-          getFunctionDecl(FnD)->getResultType()->getContainedAutoType()) &&
+      !D.getDeclSpec().isConstexprSpecified() &&
+      !(FnD && FnD->getAsFunction() &&
+        FnD->getAsFunction()->getReturnType()->getContainedAutoType()) &&
       ((Actions.CurContext->isDependentContext() ||
         (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
          TemplateInfo.Kind != ParsedTemplateInfo::ExplicitSpecialization)) &&
@@ -127,7 +114,7 @@
     LexTemplateFunctionForLateParsing(Toks);
 
     if (FnD) {
-      FunctionDecl *FD = getFunctionDecl(FnD);
+      FunctionDecl *FD = FnD->getAsFunction();
       Actions.CheckForFunctionRedefinition(FD);
       Actions.MarkAsLateParsedTemplate(FD, FnD, Toks);
     }
@@ -179,7 +166,7 @@
     // If you remove this, you can remove the code that clears the flag
     // after parsing the member.
     if (D.getDeclSpec().isFriendSpecified()) {
-      FunctionDecl *FD = getFunctionDecl(FnD);
+      FunctionDecl *FD = FnD->getAsFunction();
       Actions.CheckForFunctionRedefinition(FD);
       FD->setLateTemplateParsed(true);
     }
@@ -348,9 +335,7 @@
       if (DefArgResult.isInvalid())
         Actions.ActOnParamDefaultArgumentError(LM.DefaultArgs[I].Param);
       else {
-        if (Tok.is(tok::cxx_defaultarg_end))
-          ConsumeToken();
-        else {
+        if (!TryConsumeToken(tok::cxx_defaultarg_end)) {
           // The last two tokens are the terminator and the saved value of
           // Tok; the last token in the default argument is the one before
           // those.
@@ -464,7 +449,8 @@
   ParseFunctionStatementBody(LM.D, FnScope);
 
   // Clear the late-template-parsed bit if we set it before.
-  if (LM.D) getFunctionDecl(LM.D)->setLateTemplateParsed(false);
+  if (LM.D)
+    LM.D->getAsFunction()->setLateTemplateParsed(false);
 
   if (Tok.getLocation() != origLoc) {
     // Due to parsing error, we either went over the cached tokens or
@@ -536,10 +522,13 @@
 
   SourceLocation EqualLoc;
 
+  Actions.ActOnStartCXXInClassMemberInitializer();
+
   ExprResult Init = ParseCXXMemberInitializer(MI.Field, /*IsFunction=*/false, 
                                               EqualLoc);
 
-  Actions.ActOnCXXInClassMemberInitializer(MI.Field, EqualLoc, Init.release());
+  Actions.ActOnFinishCXXInClassMemberInitializer(MI.Field, EqualLoc,
+                                                 Init.release());
 
   // The next token should be our artificial terminating EOF token.
   if (Tok.isNot(tok::eof)) {
@@ -580,6 +569,9 @@
 
     switch (Tok.getKind()) {
     case tok::eof:
+    case tok::annot_module_begin:
+    case tok::annot_module_end:
+    case tok::annot_module_include:
       // Ran out of tokens.
       return false;
 
@@ -675,7 +667,7 @@
                          /*StopAtSemi=*/true,
                          /*ConsumeFinalToken=*/false);
     if (Tok.isNot(tok::l_brace))
-      return Diag(Tok.getLocation(), diag::err_expected_lbrace);
+      return Diag(Tok.getLocation(), diag::err_expected) << tok::l_brace;
 
     Toks.push_back(Tok);
     ConsumeBrace();
@@ -708,8 +700,8 @@
       Toks.push_back(Tok);
       ConsumeParen();
       if (!ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/true)) {
-        Diag(Tok.getLocation(), diag::err_expected_rparen);
-        Diag(OpenLoc, diag::note_matching) << "(";
+        Diag(Tok.getLocation(), diag::err_expected) << tok::r_paren;
+        Diag(OpenLoc, diag::note_matching) << tok::l_paren;
         return true;
       }
     }
@@ -756,13 +748,15 @@
                                 /*ConsumeFinalToken=*/false)) {
         // We're not just missing the initializer, we're also missing the
         // function body!
-        return Diag(Tok.getLocation(), diag::err_expected_lbrace);
+        return Diag(Tok.getLocation(), diag::err_expected) << tok::l_brace;
       }
     } else if (Tok.isNot(tok::l_paren) && Tok.isNot(tok::l_brace)) {
       // We found something weird in a mem-initializer-id.
-      return Diag(Tok.getLocation(), getLangOpts().CPlusPlus11
-                                         ? diag::err_expected_lparen_or_lbrace
-                                         : diag::err_expected_lparen);
+      if (getLangOpts().CPlusPlus11)
+        return Diag(Tok.getLocation(), diag::err_expected_either)
+               << tok::l_paren << tok::l_brace;
+      else
+        return Diag(Tok.getLocation(), diag::err_expected) << tok::l_paren;
     }
 
     tok::TokenKind kind = Tok.getKind();
@@ -784,11 +778,10 @@
     // Grab the initializer (or the subexpression of the template argument).
     // FIXME: If we support lambdas here, we'll need to set StopAtSemi to false
     //        if we might be inside the braces of a lambda-expression.
-    if (!ConsumeAndStoreUntil(IsLParen ? tok::r_paren : tok::r_brace,
-                              Toks, /*StopAtSemi=*/true)) {
-      Diag(Tok, IsLParen ? diag::err_expected_rparen :
-                           diag::err_expected_rbrace);
-      Diag(OpenLoc, diag::note_matching) << (IsLParen ? "(" : "{");
+    tok::TokenKind CloseKind = IsLParen ? tok::r_paren : tok::r_brace;
+    if (!ConsumeAndStoreUntil(CloseKind, Toks, /*StopAtSemi=*/true)) {
+      Diag(Tok, diag::err_expected) << CloseKind;
+      Diag(OpenLoc, diag::note_matching) << kind;
       return true;
     }
 
@@ -822,7 +815,8 @@
       ConsumeBrace();
       return false;
     } else if (!MightBeTemplateArgument) {
-      return Diag(Tok.getLocation(), diag::err_expected_lbrace_or_comma);
+      return Diag(Tok.getLocation(), diag::err_expected_either) << tok::l_brace
+                                                                << tok::comma;
     }
   }
 }
@@ -965,6 +959,9 @@
       goto consume_token;
 
     case tok::eof:
+    case tok::annot_module_begin:
+    case tok::annot_module_end:
+    case tok::annot_module_include:
       // Ran out of tokens.
       return false;
 
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index 7b80934..331c5e4 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -13,10 +13,12 @@
 
 #include "clang/Parse/Parser.h"
 #include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/Attributes.h"
 #include "clang/Basic/CharInfo.h"
-#include "clang/Basic/OpenCL.h"
+#include "clang/Basic/TargetInfo.h"
 #include "clang/Parse/ParseDiagnostic.h"
 #include "clang/Sema/Lookup.h"
 #include "clang/Sema/ParsedTemplate.h"
@@ -69,9 +71,11 @@
 /// isAttributeLateParsed - Return true if the attribute has arguments that
 /// require late parsing.
 static bool isAttributeLateParsed(const IdentifierInfo &II) {
+#define CLANG_ATTR_LATE_PARSED_LIST
     return llvm::StringSwitch<bool>(II.getName())
-#include "clang/Parse/AttrLateParsed.inc"
+#include "clang/Parse/AttrParserStringSwitches.inc"
         .Default(false);
+#undef CLANG_ATTR_LATE_PARSED_LIST
 }
 
 /// ParseGNUAttributes - Parse a non-empty attributes list.
@@ -117,7 +121,8 @@
 /// We follow the C++ model, but don't allow junk after the identifier.
 void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
                                 SourceLocation *endLoc,
-                                LateParsedAttrList *LateAttrs) {
+                                LateParsedAttrList *LateAttrs,
+                                Declarator *D) {
   assert(Tok.is(tok::kw___attribute) && "Not a GNU attribute list!");
 
   while (Tok.is(tok::kw___attribute)) {
@@ -132,49 +137,54 @@
       return;
     }
     // Parse the attribute-list. e.g. __attribute__(( weak, alias("__f") ))
-    while (Tok.is(tok::identifier) || isDeclarationSpecifier() ||
-           Tok.is(tok::comma)) {
-      if (Tok.is(tok::comma)) {
-        // allows for empty/non-empty attributes. ((__vector_size__(16),,,,))
-        ConsumeToken();
+    while (true) {
+      // Allow empty/non-empty attributes. ((__vector_size__(16),,,,))
+      if (TryConsumeToken(tok::comma))
         continue;
-      }
-      // we have an identifier or declaration specifier (const, int, etc.)
+
+      // Expect an identifier or declaration specifier (const, int, etc.)
+      if (Tok.isNot(tok::identifier) && !isDeclarationSpecifier())
+        break;
+
       IdentifierInfo *AttrName = Tok.getIdentifierInfo();
       SourceLocation AttrNameLoc = ConsumeToken();
 
-      if (Tok.is(tok::l_paren)) {
-        // handle "parameterized" attributes
-        if (LateAttrs && isAttributeLateParsed(*AttrName)) {
-          LateParsedAttribute *LA =
-            new LateParsedAttribute(this, *AttrName, AttrNameLoc);
-          LateAttrs->push_back(LA);
-
-          // Attributes in a class are parsed at the end of the class, along
-          // with other late-parsed declarations.
-          if (!ClassStack.empty() && !LateAttrs->parseSoon())
-            getCurrentClass().LateParsedDeclarations.push_back(LA);
-
-          // consume everything up to and including the matching right parens
-          ConsumeAndStoreUntil(tok::r_paren, LA->Toks, true, false);
-
-          Token Eof;
-          Eof.startToken();
-          Eof.setLocation(Tok.getLocation());
-          LA->Toks.push_back(Eof);
-        } else {
-          ParseGNUAttributeArgs(AttrName, AttrNameLoc, attrs, endLoc,
-                                0, SourceLocation(), AttributeList::AS_GNU);
-        }
-      } else {
+      if (Tok.isNot(tok::l_paren)) {
         attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0, 0,
                      AttributeList::AS_GNU);
+        continue;
       }
+
+      // Handle "parameterized" attributes
+      if (!LateAttrs || !isAttributeLateParsed(*AttrName)) {
+        ParseGNUAttributeArgs(AttrName, AttrNameLoc, attrs, endLoc, 0,
+                              SourceLocation(), AttributeList::AS_GNU, D);
+        continue;
+      }
+
+      // Handle attributes with arguments that require late parsing.
+      LateParsedAttribute *LA =
+          new LateParsedAttribute(this, *AttrName, AttrNameLoc);
+      LateAttrs->push_back(LA);
+
+      // Attributes in a class are parsed at the end of the class, along
+      // with other late-parsed declarations.
+      if (!ClassStack.empty() && !LateAttrs->parseSoon())
+        getCurrentClass().LateParsedDeclarations.push_back(LA);
+
+      // consume everything up to and including the matching right parens
+      ConsumeAndStoreUntil(tok::r_paren, LA->Toks, true, false);
+
+      Token Eof;
+      Eof.startToken();
+      Eof.setLocation(Tok.getLocation());
+      LA->Toks.push_back(Eof);
     }
-    if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
+
+    if (ExpectAndConsume(tok::r_paren))
       SkipUntil(tok::r_paren, StopAtSemi);
     SourceLocation Loc = Tok.getLocation();
-    if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
+    if (ExpectAndConsume(tok::r_paren))
       SkipUntil(tok::r_paren, StopAtSemi);
     if (endLoc)
       *endLoc = Loc;
@@ -190,16 +200,30 @@
 
 /// \brief Determine whether the given attribute has an identifier argument.
 static bool attributeHasIdentifierArg(const IdentifierInfo &II) {
+#define CLANG_ATTR_IDENTIFIER_ARG_LIST
   return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
-#include "clang/Parse/AttrIdentifierArg.inc"
+#include "clang/Parse/AttrParserStringSwitches.inc"
            .Default(false);
+#undef CLANG_ATTR_IDENTIFIER_ARG_LIST
 }
 
 /// \brief Determine whether the given attribute parses a type argument.
 static bool attributeIsTypeArgAttr(const IdentifierInfo &II) {
+#define CLANG_ATTR_TYPE_ARG_LIST
   return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
-#include "clang/Parse/AttrTypeArg.inc"
+#include "clang/Parse/AttrParserStringSwitches.inc"
            .Default(false);
+#undef CLANG_ATTR_TYPE_ARG_LIST
+}
+
+/// \brief Determine whether the given attribute requires parsing its arguments
+/// in an unevaluated context or not.
+static bool attributeParsedArgsUnevaluated(const IdentifierInfo &II) {
+#define CLANG_ATTR_ARG_CONTEXT_LIST
+  return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+           .Default(false);
+#undef CLANG_ATTR_ARG_CONTEXT_LIST
 }
 
 IdentifierLoc *Parser::ParseIdentifierLoc() {
@@ -237,54 +261,19 @@
                  0, AttrNameLoc, 0, 0, AttributeList::AS_GNU);
 }
 
-/// Parse the arguments to a parameterized GNU attribute or
-/// a C++11 attribute in "gnu" namespace.
-void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
-                                   SourceLocation AttrNameLoc,
-                                   ParsedAttributes &Attrs,
-                                   SourceLocation *EndLoc,
-                                   IdentifierInfo *ScopeName,
-                                   SourceLocation ScopeLoc,
-                                   AttributeList::Syntax Syntax) {
-
-  assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
-
-  AttributeList::Kind AttrKind =
-      AttributeList::getKind(AttrName, ScopeName, Syntax);
-
-  // Availability attributes have their own grammar.
-  // FIXME: All these cases fail to pass in the syntax and scope, and might be
-  // written as C++11 gnu:: attributes.
-  if (AttrKind == AttributeList::AT_Availability) {
-    ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
-    return;
-  }
-  // Thread safety attributes are parsed in an unevaluated context.
-  // FIXME: Share the bulk of the parsing code here and just pull out
-  // the unevaluated context.
-  if (IsThreadSafetyAttribute(AttrName->getName())) {
-    ParseThreadSafetyAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
-    return;
-  }
-  // Type safety attributes have their own grammar.
-  if (AttrKind == AttributeList::AT_TypeTagForDatatype) {
-    ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
-    return;
-  }
-  // Some attributes expect solely a type parameter.
-  if (attributeIsTypeArgAttr(*AttrName)) {
-    ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, EndLoc);
-    return;
-  }
-
+void Parser::ParseAttributeArgsCommon(
+    IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
+    ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+    SourceLocation ScopeLoc, AttributeList::Syntax Syntax) {
   // Ignore the left paren location for now.
   ConsumeParen();
 
   ArgsVector ArgExprs;
-
   if (Tok.is(tok::identifier)) {
     // If this attribute wants an 'identifier' argument, make it so.
     bool IsIdentifierArg = attributeHasIdentifierArg(*AttrName);
+    AttributeList::Kind AttrKind =
+        AttributeList::getKind(AttrName, ScopeName, Syntax);
 
     // If we don't know how to parse this attribute, but this is the only
     // token in this argument, assume it's meant to be an identifier.
@@ -304,112 +293,120 @@
       ConsumeToken();
 
     // Parse the non-empty comma-separated list of expressions.
-    while (1) {
+    do {
+      std::unique_ptr<EnterExpressionEvaluationContext> Unevaluated;
+      if (attributeParsedArgsUnevaluated(*AttrName))
+        Unevaluated.reset(
+            new EnterExpressionEvaluationContext(Actions, Sema::Unevaluated));
+
       ExprResult ArgExpr(ParseAssignmentExpression());
       if (ArgExpr.isInvalid()) {
         SkipUntil(tok::r_paren, StopAtSemi);
         return;
       }
       ArgExprs.push_back(ArgExpr.release());
-      if (Tok.isNot(tok::comma))
-        break;
-      ConsumeToken(); // Eat the comma, move to the next argument
-    }
+      // Eat the comma, move to the next argument
+    } while (TryConsumeToken(tok::comma));
   }
 
   SourceLocation RParen = Tok.getLocation();
-  if (!ExpectAndConsume(tok::r_paren, diag::err_expected_rparen)) {
+  if (!ExpectAndConsume(tok::r_paren)) {
     SourceLocation AttrLoc = ScopeLoc.isValid() ? ScopeLoc : AttrNameLoc;
     Attrs.addNew(AttrName, SourceRange(AttrLoc, RParen), ScopeName, ScopeLoc,
                  ArgExprs.data(), ArgExprs.size(), Syntax);
   }
+
+  if (EndLoc)
+    *EndLoc = RParen;
 }
 
-/// \brief Parses a single argument for a declspec, including the
-/// surrounding parens.
-void Parser::ParseMicrosoftDeclSpecWithSingleArg(IdentifierInfo *AttrName,
-                                                 SourceLocation AttrNameLoc,
-                                                 ParsedAttributes &Attrs)
-{
-  BalancedDelimiterTracker T(*this, tok::l_paren);
-  if (T.expectAndConsume(diag::err_expected_lparen_after,
-                         AttrName->getNameStart(), tok::r_paren))
-    return;
+/// Parse the arguments to a parameterized GNU attribute or
+/// a C++11 attribute in "gnu" namespace.
+void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
+                                   SourceLocation AttrNameLoc,
+                                   ParsedAttributes &Attrs,
+                                   SourceLocation *EndLoc,
+                                   IdentifierInfo *ScopeName,
+                                   SourceLocation ScopeLoc,
+                                   AttributeList::Syntax Syntax,
+                                   Declarator *D) {
 
-  ExprResult ArgExpr(ParseConstantExpression());
-  if (ArgExpr.isInvalid()) {
-    T.skipToEnd();
+  assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
+
+  AttributeList::Kind AttrKind =
+      AttributeList::getKind(AttrName, ScopeName, Syntax);
+
+  // Availability attributes have their own grammar.
+  // FIXME: All these cases fail to pass in the syntax and scope, and might be
+  // written as C++11 gnu:: attributes.
+  if (AttrKind == AttributeList::AT_Availability) {
+    ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
     return;
   }
-  ArgsUnion ExprList = ArgExpr.take();
-  Attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, &ExprList, 1,
-               AttributeList::AS_Declspec);
 
-  T.consumeClose();
+  if (AttrKind == AttributeList::AT_ObjCBridgeRelated) {
+    ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
+    return;
+  }
+
+  // Type safety attributes have their own grammar.
+  if (AttrKind == AttributeList::AT_TypeTagForDatatype) {
+    ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
+    return;
+  }
+
+  // Some attributes expect solely a type parameter.
+  if (attributeIsTypeArgAttr(*AttrName)) {
+    ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, EndLoc);
+    return;
+  }
+
+  // These may refer to the function arguments, but need to be parsed early to
+  // participate in determining whether it's a redeclaration.
+  std::unique_ptr<ParseScope> PrototypeScope;
+  if (AttrName->isStr("enable_if") && D && D->isFunctionDeclarator()) {
+    DeclaratorChunk::FunctionTypeInfo FTI = D->getFunctionTypeInfo();
+    PrototypeScope.reset(new ParseScope(this, Scope::FunctionPrototypeScope |
+                                        Scope::FunctionDeclarationScope |
+                                        Scope::DeclScope));
+    for (unsigned i = 0; i != FTI.NumParams; ++i) {
+      ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
+      Actions.ActOnReenterCXXMethodParameter(getCurScope(), Param);
+    }
+  }
+
+  ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+                           ScopeLoc, Syntax);
 }
 
-/// \brief Determines whether a declspec is a "simple" one requiring no
-/// arguments.
-bool Parser::IsSimpleMicrosoftDeclSpec(IdentifierInfo *Ident) {
-  return llvm::StringSwitch<bool>(Ident->getName())
-    .Case("dllimport", true)
-    .Case("dllexport", true)
-    .Case("noreturn", true)
-    .Case("nothrow", true)
-    .Case("noinline", true)
-    .Case("naked", true)
-    .Case("appdomain", true)
-    .Case("process", true)
-    .Case("jitintrinsic", true)
-    .Case("noalias", true)
-    .Case("restrict", true)
-    .Case("novtable", true)
-    .Case("selectany", true)
-    .Case("thread", true)
-    .Case("safebuffers", true )
-    .Default(false);
-}
+bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
+                                        SourceLocation AttrNameLoc,
+                                        ParsedAttributes &Attrs) {
+  // If the attribute isn't known, we will not attempt to parse any
+  // arguments.
+  if (!hasAttribute(AttrSyntax::Declspec, nullptr, AttrName,
+                    getTargetInfo().getTriple(), getLangOpts())) {
+    // Eat the left paren, then skip to the ending right paren.
+    ConsumeParen();
+    SkipUntil(tok::r_paren);
+    return false;
+  }
 
-/// \brief Attempts to parse a declspec which is not simple (one that takes
-/// parameters).  Will return false if we properly handled the declspec, or
-/// true if it is an unknown declspec.
-void Parser::ParseComplexMicrosoftDeclSpec(IdentifierInfo *Ident,
-                                           SourceLocation Loc,
-                                           ParsedAttributes &Attrs) {
-  // Try to handle the easy case first -- these declspecs all take a single
-  // parameter as their argument.
-  if (llvm::StringSwitch<bool>(Ident->getName())
-      .Case("uuid", true)
-      .Case("align", true)
-      .Case("allocate", true)
-      .Default(false)) {
-    ParseMicrosoftDeclSpecWithSingleArg(Ident, Loc, Attrs);
-  } else if (Ident->getName() == "deprecated") {
-    // The deprecated declspec has an optional single argument, so we will
-    // check for a l-paren to decide whether we should parse an argument or
-    // not.
-    if (Tok.getKind() == tok::l_paren)
-      ParseMicrosoftDeclSpecWithSingleArg(Ident, Loc, Attrs);
-    else
-      Attrs.addNew(Ident, Loc, 0, Loc, 0, 0, AttributeList::AS_Declspec);
-  } else if (Ident->getName() == "property") {
+  if (AttrName->getName() == "property") {
     // The property declspec is more complex in that it can take one or two
     // assignment expressions as a parameter, but the lhs of the assignment
     // must be named get or put.
-    if (Tok.isNot(tok::l_paren)) {
-      Diag(Tok.getLocation(), diag::err_expected_lparen_after)
-        << Ident->getNameStart();
-      return;
-    }
+
     BalancedDelimiterTracker T(*this, tok::l_paren);
     T.expectAndConsume(diag::err_expected_lparen_after,
-                       Ident->getNameStart(), tok::r_paren);
+                       AttrName->getNameStart(), tok::r_paren);
 
     enum AccessorKind {
       AK_Invalid = -1,
-      AK_Put = 0, AK_Get = 1 // indices into AccessorNames
+      AK_Put = 0,
+      AK_Get = 1 // indices into AccessorNames
     };
-    IdentifierInfo *AccessorNames[] = { 0, 0 };
+    IdentifierInfo *AccessorNames[] = {0, 0};
     bool HasInvalidAccessor = false;
 
     // Parse the accessor specifications.
@@ -419,7 +416,7 @@
         // If the user wrote a completely empty list, use a special diagnostic.
         if (Tok.is(tok::r_paren) && !HasInvalidAccessor &&
             AccessorNames[AK_Put] == 0 && AccessorNames[AK_Get] == 0) {
-          Diag(Loc, diag::err_ms_property_no_getter_or_putter);
+          Diag(AttrNameLoc, diag::err_ms_property_no_getter_or_putter);
           break;
         }
 
@@ -435,39 +432,38 @@
       } else if (KindStr == "put") {
         Kind = AK_Put;
 
-      // Recover from the common mistake of using 'set' instead of 'put'.
+        // Recover from the common mistake of using 'set' instead of 'put'.
       } else if (KindStr == "set") {
         Diag(KindLoc, diag::err_ms_property_has_set_accessor)
-          << FixItHint::CreateReplacement(KindLoc, "put");
+            << FixItHint::CreateReplacement(KindLoc, "put");
         Kind = AK_Put;
 
-      // Handle the mistake of forgetting the accessor kind by skipping
-      // this accessor.
+        // Handle the mistake of forgetting the accessor kind by skipping
+        // this accessor.
       } else if (NextToken().is(tok::comma) || NextToken().is(tok::r_paren)) {
         Diag(KindLoc, diag::err_ms_property_missing_accessor_kind);
         ConsumeToken();
         HasInvalidAccessor = true;
         goto next_property_accessor;
 
-      // Otherwise, complain about the unknown accessor kind.
+        // Otherwise, complain about the unknown accessor kind.
       } else {
         Diag(KindLoc, diag::err_ms_property_unknown_accessor);
         HasInvalidAccessor = true;
         Kind = AK_Invalid;
 
         // Try to keep parsing unless it doesn't look like an accessor spec.
-        if (!NextToken().is(tok::equal)) break;
+        if (!NextToken().is(tok::equal))
+          break;
       }
 
       // Consume the identifier.
       ConsumeToken();
 
       // Consume the '='.
-      if (Tok.is(tok::equal)) {
-        ConsumeToken();
-      } else {
+      if (!TryConsumeToken(tok::equal)) {
         Diag(Tok.getLocation(), diag::err_ms_property_expected_equal)
-          << KindStr;
+            << KindStr;
         break;
       }
 
@@ -489,41 +485,29 @@
 
     next_property_accessor:
       // Keep processing accessors until we run out.
-      if (Tok.is(tok::comma)) {
-        ConsumeAnyToken();
+      if (TryConsumeToken(tok::comma))
         continue;
 
       // If we run into the ')', stop without consuming it.
-      } else if (Tok.is(tok::r_paren)) {
+      if (Tok.is(tok::r_paren))
         break;
-      } else {
-        Diag(Tok.getLocation(), diag::err_ms_property_expected_comma_or_rparen);
-        break;
-      }
+
+      Diag(Tok.getLocation(), diag::err_ms_property_expected_comma_or_rparen);
+      break;
     }
 
     // Only add the property attribute if it was well-formed.
-    if (!HasInvalidAccessor) {
-      Attrs.addNewPropertyAttr(Ident, Loc, 0, SourceLocation(),
+    if (!HasInvalidAccessor)
+      Attrs.addNewPropertyAttr(AttrName, AttrNameLoc, 0, SourceLocation(),
                                AccessorNames[AK_Get], AccessorNames[AK_Put],
                                AttributeList::AS_Declspec);
-    }
     T.skipToEnd();
-  } else {
-    // We don't recognize this as a valid declspec, but instead of creating the
-    // attribute and allowing sema to warn about it, we will warn here instead.
-    // This is because some attributes have multiple spellings, but we need to
-    // disallow that for declspecs (such as align vs aligned).  If we made the
-    // attribute, we'd have to split the valid declspec spelling logic into
-    // both locations.
-    Diag(Loc, diag::warn_ms_declspec_unknown) << Ident;
-
-    // If there's an open paren, we should eat the open and close parens under
-    // the assumption that this unknown declspec has parameters.
-    BalancedDelimiterTracker T(*this, tok::l_paren);
-    if (!T.consumeOpen())
-      T.skipToEnd();
+    return !HasInvalidAccessor;
   }
+
+  ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, nullptr, nullptr,
+                           SourceLocation(), AttributeList::AS_Declspec);
+  return true;
 }
 
 /// [MS] decl-specifier:
@@ -543,7 +527,11 @@
 
   // An empty declspec is perfectly legal and should not warn.  Additionally,
   // you can specify multiple attributes per declspec.
-  while (Tok.getKind() != tok::r_paren) {
+  while (Tok.isNot(tok::r_paren)) {
+    // Attribute not present.
+    if (TryConsumeToken(tok::comma))
+      continue;
+
     // We expect either a well-known identifier or a generic string.  Anything
     // else is a malformed declspec.
     bool IsString = Tok.getKind() == tok::string_literal ? true : false;
@@ -571,17 +559,19 @@
       AttrNameLoc = ConsumeToken();
     }
 
-    if (IsString || IsSimpleMicrosoftDeclSpec(AttrName))
-      // If we have a generic string, we will allow it because there is no
-      // documented list of allowable string declspecs, but we know they exist
-      // (for instance, SAL declspecs in older versions of MSVC).
-      //
-      // Alternatively, if the identifier is a simple one, then it requires no
-      // arguments and can be turned into an attribute directly.
+    bool AttrHandled = false;
+
+    // Parse attribute arguments.
+    if (Tok.is(tok::l_paren))
+      AttrHandled = ParseMicrosoftDeclSpecArgs(AttrName, AttrNameLoc, Attrs);
+    else if (AttrName->getName() == "property")
+      // The property attribute must have an argument list.
+      Diag(Tok.getLocation(), diag::err_expected_lparen_after)
+          << AttrName->getName();
+
+    if (!AttrHandled)
       Attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0, 0,
                    AttributeList::AS_Declspec);
-    else
-      ParseComplexMicrosoftDeclSpec(AttrName, AttrNameLoc, Attrs);
   }
   T.consumeClose();
 }
@@ -620,56 +610,11 @@
   }
 }
 
-void Parser::ParseOpenCLQualifiers(DeclSpec &DS) {
-  // FIXME: The mapping from attribute spelling to semantics should be
-  //        performed in Sema, not here.
-  SourceLocation Loc = Tok.getLocation();
-  switch(Tok.getKind()) {
-    // OpenCL qualifiers:
-    case tok::kw___private:
-    case tok::kw_private:
-      DS.getAttributes().addNewInteger(
-          Actions.getASTContext(),
-          PP.getIdentifierInfo("address_space"), Loc, 0);
-      break;
-
-    case tok::kw___global:
-      DS.getAttributes().addNewInteger(
-          Actions.getASTContext(),
-          PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_global);
-      break;
-
-    case tok::kw___local:
-      DS.getAttributes().addNewInteger(
-          Actions.getASTContext(),
-          PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_local);
-      break;
-
-    case tok::kw___constant:
-      DS.getAttributes().addNewInteger(
-          Actions.getASTContext(),
-          PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_constant);
-      break;
-
-    case tok::kw___read_only:
-      DS.getAttributes().addNewInteger(
-          Actions.getASTContext(),
-          PP.getIdentifierInfo("opencl_image_access"), Loc, CLIA_read_only);
-      break;
-
-    case tok::kw___write_only:
-      DS.getAttributes().addNewInteger(
-          Actions.getASTContext(),
-          PP.getIdentifierInfo("opencl_image_access"), Loc, CLIA_write_only);
-      break;
-
-    case tok::kw___read_write:
-      DS.getAttributes().addNewInteger(
-          Actions.getASTContext(),
-          PP.getIdentifierInfo("opencl_image_access"), Loc, CLIA_read_write);
-      break;
-    default: break;
-  }
+void Parser::ParseOpenCLQualifiers(ParsedAttributes &Attrs) {
+  IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+  SourceLocation AttrNameLoc = Tok.getLocation();
+  Attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0, 0,
+               AttributeList::AS_Keyword);
 }
 
 /// \brief Parse a version number.
@@ -812,7 +757,7 @@
   // Opening '('.
   BalancedDelimiterTracker T(*this, tok::l_paren);
   if (T.consumeOpen()) {
-    Diag(Tok, diag::err_expected_lparen);
+    Diag(Tok, diag::err_expected) << tok::l_paren;
     return;
   }
 
@@ -825,8 +770,10 @@
   IdentifierLoc *Platform = ParseIdentifierLoc();
 
   // Parse the ',' following the platform name.
-  if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "", tok::r_paren))
+  if (ExpectAndConsume(tok::comma)) {
+    SkipUntil(tok::r_paren, StopAtSemi);
     return;
+  }
 
   // If we haven't grabbed the pointers for the identifiers
   // "introduced", "deprecated", and "obsoleted", do so now.
@@ -855,17 +802,11 @@
           << Keyword << SourceRange(UnavailableLoc);
       }
       UnavailableLoc = KeywordLoc;
-
-      if (Tok.isNot(tok::comma))
-        break;
-
-      ConsumeToken();
       continue;
     }
 
     if (Tok.isNot(tok::equal)) {
-      Diag(Tok, diag::err_expected_equal_after)
-        << Keyword;
+      Diag(Tok, diag::err_expected_after) << Keyword << tok::equal;
       SkipUntil(tok::r_paren, StopAtSemi);
       return;
     }
@@ -915,11 +856,7 @@
         << Keyword << VersionRange;
     }
 
-    if (Tok.isNot(tok::comma))
-      break;
-
-    ConsumeToken();
-  } while (true);
+  } while (TryConsumeToken(tok::comma));
 
   // Closing ')'.
   if (T.consumeClose())
@@ -959,6 +896,86 @@
                AttributeList::AS_GNU);
 }
 
+/// \brief Parse the contents of the "objc_bridge_related" attribute.
+/// objc_bridge_related '(' related_class ',' opt-class_method ',' opt-instance_method ')'
+/// related_class:
+///     Identifier
+///
+/// opt-class_method:
+///     Identifier: | <empty>
+///
+/// opt-instance_method:
+///     Identifier | <empty>
+///
+void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
+                                SourceLocation ObjCBridgeRelatedLoc,
+                                ParsedAttributes &attrs,
+                                SourceLocation *endLoc) {
+  // Opening '('.
+  BalancedDelimiterTracker T(*this, tok::l_paren);
+  if (T.consumeOpen()) {
+    Diag(Tok, diag::err_expected) << tok::l_paren;
+    return;
+  }
+  
+  // Parse the related class name.
+  if (Tok.isNot(tok::identifier)) {
+    Diag(Tok, diag::err_objcbridge_related_expected_related_class);
+    SkipUntil(tok::r_paren, StopAtSemi);
+    return;
+  }
+  IdentifierLoc *RelatedClass = ParseIdentifierLoc();
+  if (ExpectAndConsume(tok::comma)) {
+    SkipUntil(tok::r_paren, StopAtSemi);
+    return;
+  }
+
+  // Parse optional class method name.
+  IdentifierLoc *ClassMethod = 0;
+  if (Tok.is(tok::identifier)) {
+    ClassMethod = ParseIdentifierLoc();
+    if (!TryConsumeToken(tok::colon)) {
+      Diag(Tok, diag::err_objcbridge_related_selector_name);
+      SkipUntil(tok::r_paren, StopAtSemi);
+      return;
+    }
+  }
+  if (!TryConsumeToken(tok::comma)) {
+    if (Tok.is(tok::colon))
+      Diag(Tok, diag::err_objcbridge_related_selector_name);
+    else
+      Diag(Tok, diag::err_expected) << tok::comma;
+    SkipUntil(tok::r_paren, StopAtSemi);
+    return;
+  }
+  
+  // Parse optional instance method name.
+  IdentifierLoc *InstanceMethod = 0;
+  if (Tok.is(tok::identifier))
+    InstanceMethod = ParseIdentifierLoc();
+  else if (Tok.isNot(tok::r_paren)) {
+    Diag(Tok, diag::err_expected) << tok::r_paren;
+    SkipUntil(tok::r_paren, StopAtSemi);
+    return;
+  }
+  
+  // Closing ')'.
+  if (T.consumeClose())
+    return;
+  
+  if (endLoc)
+    *endLoc = T.getCloseLocation();
+  
+  // Record this attribute
+  attrs.addNew(&ObjCBridgeRelated,
+               SourceRange(ObjCBridgeRelatedLoc, T.getCloseLocation()),
+               0, ObjCBridgeRelatedLoc,
+               RelatedClass,
+               ClassMethod,
+               InstanceMethod,
+               AttributeList::AS_GNU);
+  
+}
 
 // Late Parsed Attributes:
 // See other examples of late parsing in lib/Parse/ParseCXXInlineMethods
@@ -1038,13 +1055,6 @@
   // Consume the previously pushed token.
   ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
 
-  if (OnDefinition && !IsThreadSafetyAttribute(LA.AttrName.getName())) {
-    // FIXME: Do not warn on C++11 attributes, once we start supporting
-    // them here.
-    Diag(Tok, diag::warn_attribute_on_function_definition)
-      << LA.AttrName.getName();
-  }
-
   ParsedAttributes Attrs(AttrFactory);
   SourceLocation endLoc;
 
@@ -1071,7 +1081,7 @@
         Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
 
       ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
-                            0, SourceLocation(), AttributeList::AS_GNU);
+                            0, SourceLocation(), AttributeList::AS_GNU, 0);
 
       if (HasFunScope) {
         Actions.ActOnExitFunctionContext();
@@ -1084,15 +1094,20 @@
       // If there are multiple decls, then the decl cannot be within the
       // function scope.
       ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
-                            0, SourceLocation(), AttributeList::AS_GNU);
+                            0, SourceLocation(), AttributeList::AS_GNU, 0);
     }
   } else {
     Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
   }
 
-  for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i) {
+  const AttributeList *AL = Attrs.getList();
+  if (OnDefinition && AL && !AL->isCXX11Attribute() &&
+      AL->isKnownToGCC())
+    Diag(Tok, diag::warn_attribute_on_function_definition)
+      << &LA.AttrName;
+
+  for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i)
     Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
-  }
 
   if (Tok.getLocation() != OrigLoc) {
     // Due to a parsing error, we either went over the cached tokens or
@@ -1106,79 +1121,6 @@
   }
 }
 
-/// \brief Wrapper around a case statement checking if AttrName is
-/// one of the thread safety attributes
-bool Parser::IsThreadSafetyAttribute(StringRef AttrName) {
-  return llvm::StringSwitch<bool>(AttrName)
-      .Case("guarded_by", true)
-      .Case("guarded_var", true)
-      .Case("pt_guarded_by", true)
-      .Case("pt_guarded_var", true)
-      .Case("lockable", true)
-      .Case("scoped_lockable", true)
-      .Case("no_thread_safety_analysis", true)
-      .Case("acquired_after", true)
-      .Case("acquired_before", true)
-      .Case("exclusive_lock_function", true)
-      .Case("shared_lock_function", true)
-      .Case("exclusive_trylock_function", true)
-      .Case("shared_trylock_function", true)
-      .Case("unlock_function", true)
-      .Case("lock_returned", true)
-      .Case("locks_excluded", true)
-      .Case("exclusive_locks_required", true)
-      .Case("shared_locks_required", true)
-      .Default(false);
-}
-
-/// \brief Parse the contents of thread safety attributes. These
-/// should always be parsed as an expression list.
-///
-/// We need to special case the parsing due to the fact that if the first token
-/// of the first argument is an identifier, the main parse loop will store
-/// that token as a "parameter" and the rest of
-/// the arguments will be added to a list of "arguments". However,
-/// subsequent tokens in the first argument are lost. We instead parse each
-/// argument as an expression and add all arguments to the list of "arguments".
-/// In future, we will take advantage of this special case to also
-/// deal with some argument scoping issues here (for example, referring to a
-/// function parameter in the attribute on that function).
-void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
-                                        SourceLocation AttrNameLoc,
-                                        ParsedAttributes &Attrs,
-                                        SourceLocation *EndLoc) {
-  assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
-
-  BalancedDelimiterTracker T(*this, tok::l_paren);
-  T.consumeOpen();
-
-  ArgsVector ArgExprs;
-  bool ArgExprsOk = true;
-
-  // now parse the list of expressions
-  while (Tok.isNot(tok::r_paren)) {
-    EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
-    ExprResult ArgExpr(ParseAssignmentExpression());
-    if (ArgExpr.isInvalid()) {
-      ArgExprsOk = false;
-      T.consumeClose();
-      break;
-    } else {
-      ArgExprs.push_back(ArgExpr.release());
-    }
-    if (Tok.isNot(tok::comma))
-      break;
-    ConsumeToken(); // Eat the comma, move to the next argument
-  }
-  // Match the ')'.
-  if (ArgExprsOk && !T.consumeClose()) {
-    Attrs.addNew(&AttrName, AttrNameLoc, 0, AttrNameLoc, ArgExprs.data(),
-                 ArgExprs.size(), AttributeList::AS_GNU);
-  }
-  if (EndLoc)
-    *EndLoc = T.getCloseLocation();
-}
-
 void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
                                               SourceLocation AttrNameLoc,
                                               ParsedAttributes &Attrs,
@@ -1189,18 +1131,16 @@
   T.consumeOpen();
 
   if (Tok.isNot(tok::identifier)) {
-    Diag(Tok, diag::err_expected_ident);
+    Diag(Tok, diag::err_expected) << tok::identifier;
     T.skipToEnd();
     return;
   }
   IdentifierLoc *ArgumentKind = ParseIdentifierLoc();
 
-  if (Tok.isNot(tok::comma)) {
-    Diag(Tok, diag::err_expected_comma);
+  if (ExpectAndConsume(tok::comma)) {
     T.skipToEnd();
     return;
   }
-  ConsumeToken();
 
   SourceRange MatchingCTypeRange;
   TypeResult MatchingCType = ParseTypeName(&MatchingCTypeRange);
@@ -1211,10 +1151,9 @@
 
   bool LayoutCompatible = false;
   bool MustBeNull = false;
-  while (Tok.is(tok::comma)) {
-    ConsumeToken();
+  while (TryConsumeToken(tok::comma)) {
     if (Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident);
+      Diag(Tok, diag::err_expected) << tok::identifier;
       T.skipToEnd();
       return;
     }
@@ -1508,8 +1447,7 @@
         // This declaration isn't over yet. Keep skipping.
         continue;
       }
-      if (Tok.is(tok::semi))
-        ConsumeToken();
+      TryConsumeToken(tok::semi);
       return;
 
     case tok::l_square:
@@ -1562,6 +1500,9 @@
       break;
 
     case tok::eof:
+    case tok::annot_module_begin:
+    case tok::annot_module_end:
+    case tok::annot_module_include:
       return;
 
     default:
@@ -1632,7 +1573,8 @@
     } else {
       if (Tok.is(tok::l_brace)) {
         Diag(Tok, diag::err_function_definition_not_allowed);
-        SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+        SkipMalformedDecl();
+        return DeclGroupPtrTy();
       }
     }
   }
@@ -1648,9 +1590,8 @@
   // don't need to parse the container in advance.
   if (FRI && (Tok.is(tok::colon) || isTokIdentifier_in())) {
     bool IsForRangeLoop = false;
-    if (Tok.is(tok::colon)) {
+    if (TryConsumeToken(tok::colon, FRI->ColonLoc)) {
       IsForRangeLoop = true;
-      FRI->ColonLoc = ConsumeToken();
       if (Tok.is(tok::l_brace))
         FRI->RangeExpr = ParseBraceInitializer();
       else
@@ -1677,9 +1618,8 @@
   
   // If we don't have a comma, it is either the end of the list (a ';') or an
   // error, bail out.
-  while (Tok.is(tok::comma)) {
-    SourceLocation CommaLoc = ConsumeToken();
-
+  SourceLocation CommaLoc;
+  while (TryConsumeToken(tok::comma, CommaLoc)) {
     if (Tok.isAtStartOfLine() && ExpectSemi && !MightBeDeclarator(Context)) {
       // This comma was followed by a line-break and something which can't be
       // the start of a declarator. The comma was probably a typo for a
@@ -1724,8 +1664,7 @@
     // Otherwise things are very confused and we skip to recover.
     if (!isDeclarationSpecifier()) {
       SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
-      if (Tok.is(tok::semi))
-        ConsumeToken();
+      TryConsumeToken(tok::semi);
     }
   }
 
@@ -1975,8 +1914,7 @@
 
   // Validate declspec for type-name.
   unsigned Specs = DS.getParsedSpecifiers();
-  if ((DSC == DSC_type_specifier || DSC == DSC_trailing) &&
-      !DS.hasTypeSpecifier()) {
+  if (isTypeSpecifier(DSC) && !DS.hasTypeSpecifier()) {
     Diag(Tok, diag::err_expected_type);
     DS.SetTypeSpecError();
   } else if (Specs == DeclSpec::PQ_None && !DS.getNumProtocolQualifiers() &&
@@ -2074,8 +2012,7 @@
   // within a type specifier. Outside of C++, we allow this even if the
   // language doesn't "officially" support implicit int -- we support
   // implicit int as an extension in C99 and C11.
-  if (DSC != DSC_type_specifier && DSC != DSC_trailing &&
-      !getLangOpts().CPlusPlus &&
+  if (!isTypeSpecifier(DSC) && !getLangOpts().CPlusPlus &&
       isValidAfterIdentifierInDeclarator(NextToken())) {
     // If this token is valid for implicit int, e.g. "static x = 4", then
     // we just avoid eating the identifier, so it will be parsed as the
@@ -2145,7 +2082,7 @@
 
   // Determine whether this identifier could plausibly be the name of something
   // being declared (with a missing type).
-  if (DSC != DSC_type_specifier && DSC != DSC_trailing &&
+  if (!isTypeSpecifier(DSC) &&
       (!SS || DSC == DSC_top_level || DSC == DSC_class)) {
     // Look ahead to the next token to try to figure out what this declaration
     // was supposed to be.
@@ -2206,7 +2143,9 @@
   // diagnostic and attempt to recover.
   ParsedType T;
   IdentifierInfo *II = Tok.getIdentifierInfo();
-  if (Actions.DiagnoseUnknownTypeName(II, Loc, getCurScope(), SS, T)) {
+  if (Actions.DiagnoseUnknownTypeName(II, Loc, getCurScope(), SS, T,
+                                      getLangOpts().CPlusPlus &&
+                                          NextToken().is(tok::less))) {
     // The action emitted a diagnostic, so we don't have to.
     if (T) {
       // The action has suggested that the type T could be used. Set that as
@@ -2214,7 +2153,8 @@
       // name token, and we're done.
       const char *PrevSpec;
       unsigned DiagID;
-      DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID, T);
+      DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID, T,
+                         Actions.getASTContext().getPrintingPolicy());
       DS.SetRangeEnd(Tok.getLocation());
       ConsumeToken();
       // There may be other declaration specifiers after this.
@@ -2258,6 +2198,9 @@
     return DSC_top_level;
   if (Context == Declarator::TrailingReturnContext)
     return DSC_trailing;
+  if (Context == Declarator::AliasDeclContext ||
+      Context == Declarator::AliasTemplateContext)
+    return DSC_alias_declaration;
   return DSC_normal;
 }
 
@@ -2282,8 +2225,8 @@
   } else
     ER = ParseConstantExpression();
 
-  if (getLangOpts().CPlusPlus11 && Tok.is(tok::ellipsis))
-    EllipsisLoc = ConsumeToken();
+  if (getLangOpts().CPlusPlus11)
+    TryConsumeToken(tok::ellipsis, EllipsisLoc);
 
   return ER;
 }
@@ -2305,7 +2248,7 @@
   SourceLocation KWLoc = ConsumeToken();
 
   BalancedDelimiterTracker T(*this, tok::l_paren);
-  if (T.expectAndConsume(diag::err_expected_lparen))
+  if (T.expectAndConsume())
     return;
 
   SourceLocation EllipsisLoc;
@@ -2339,7 +2282,6 @@
   assert(DS.hasTagDefinition() && "shouldn't call this");
 
   bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
-  bool HasMissingSemi = false;
 
   if (getLangOpts().CPlusPlus &&
       (Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
@@ -2349,63 +2291,77 @@
     return true;
   }
 
+  bool HasScope = Tok.is(tok::annot_cxxscope);
+  // Make a copy in case GetLookAheadToken invalidates the result of NextToken.
+  Token AfterScope = HasScope ? NextToken() : Tok;
+
   // Determine whether the following tokens could possibly be a
   // declarator.
-  if (Tok.is(tok::identifier) || Tok.is(tok::annot_template_id)) {
-    const Token &Next = NextToken();
+  bool MightBeDeclarator = true;
+  if (Tok.is(tok::kw_typename) || Tok.is(tok::annot_typename)) {
+    // A declarator-id can't start with 'typename'.
+    MightBeDeclarator = false;
+  } else if (AfterScope.is(tok::annot_template_id)) {
+    // If we have a type expressed as a template-id, this cannot be a
+    // declarator-id (such a type cannot be redeclared in a simple-declaration).
+    TemplateIdAnnotation *Annot =
+        static_cast<TemplateIdAnnotation *>(AfterScope.getAnnotationValue());
+    if (Annot->Kind == TNK_Type_template)
+      MightBeDeclarator = false;
+  } else if (AfterScope.is(tok::identifier)) {
+    const Token &Next = HasScope ? GetLookAheadToken(2) : NextToken();
+
     // These tokens cannot come after the declarator-id in a
     // simple-declaration, and are likely to come after a type-specifier.
-    HasMissingSemi = Next.is(tok::star) || Next.is(tok::amp) ||
-                     Next.is(tok::ampamp) || Next.is(tok::identifier) ||
-                     Next.is(tok::annot_cxxscope) ||
-                     Next.is(tok::coloncolon);
-  } else if (Tok.is(tok::annot_cxxscope) &&
-             NextToken().is(tok::identifier) &&
-             DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
-    // We almost certainly have a missing semicolon. Look up the name and
-    // check; if it names a type, we're missing a semicolon.
-    CXXScopeSpec SS;
-    Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
-                                                 Tok.getAnnotationRange(), SS);
-    const Token &Next = NextToken();
-    IdentifierInfo *Name = Next.getIdentifierInfo();
-    Sema::NameClassification Classification =
-        Actions.ClassifyName(getCurScope(), SS, Name, Next.getLocation(),
-                             NextToken(), /*IsAddressOfOperand*/false);
-    switch (Classification.getKind()) {
-    case Sema::NC_Error:
-      SkipMalformedDecl();
-      return true;
+    if (Next.is(tok::star) || Next.is(tok::amp) || Next.is(tok::ampamp) ||
+        Next.is(tok::identifier) || Next.is(tok::annot_cxxscope) ||
+        Next.is(tok::coloncolon)) {
+      // Missing a semicolon.
+      MightBeDeclarator = false;
+    } else if (HasScope) {
+      // If the declarator-id has a scope specifier, it must redeclare a
+      // previously-declared entity. If that's a type (and this is not a
+      // typedef), that's an error.
+      CXXScopeSpec SS;
+      Actions.RestoreNestedNameSpecifierAnnotation(
+          Tok.getAnnotationValue(), Tok.getAnnotationRange(), SS);
+      IdentifierInfo *Name = AfterScope.getIdentifierInfo();
+      Sema::NameClassification Classification = Actions.ClassifyName(
+          getCurScope(), SS, Name, AfterScope.getLocation(), Next,
+          /*IsAddressOfOperand*/false);
+      switch (Classification.getKind()) {
+      case Sema::NC_Error:
+        SkipMalformedDecl();
+        return true;
 
-    case Sema::NC_Keyword:
-    case Sema::NC_NestedNameSpecifier:
-      llvm_unreachable("typo correction and nested name specifiers not "
-                       "possible here");
+      case Sema::NC_Keyword:
+      case Sema::NC_NestedNameSpecifier:
+        llvm_unreachable("typo correction and nested name specifiers not "
+                         "possible here");
 
-    case Sema::NC_Type:
-    case Sema::NC_TypeTemplate:
-      // Not a previously-declared non-type entity.
-      HasMissingSemi = true;
-      break;
+      case Sema::NC_Type:
+      case Sema::NC_TypeTemplate:
+        // Not a previously-declared non-type entity.
+        MightBeDeclarator = false;
+        break;
 
-    case Sema::NC_Unknown:
-    case Sema::NC_Expression:
-    case Sema::NC_VarTemplate:
-    case Sema::NC_FunctionTemplate:
-      // Might be a redeclaration of a prior entity.
-      HasMissingSemi = false;
-      break;
+      case Sema::NC_Unknown:
+      case Sema::NC_Expression:
+      case Sema::NC_VarTemplate:
+      case Sema::NC_FunctionTemplate:
+        // Might be a redeclaration of a prior entity.
+        break;
+      }
     }
-  } else if (Tok.is(tok::kw_typename) || Tok.is(tok::annot_typename)) {
-    HasMissingSemi = true;
   }
 
-  if (!HasMissingSemi)
+  if (MightBeDeclarator)
     return false;
 
+  const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
   Diag(PP.getLocForEndOfToken(DS.getRepAsDecl()->getLocEnd()),
-       diag::err_expected_semi_after_tagdecl)
-    << DeclSpec::getSpecifierName(DS.getTypeSpecType());
+       diag::err_expected_after)
+      << DeclSpec::getSpecifierName(DS.getTypeSpecType(), PPol) << tok::semi;
 
   // Try to recover from the typo, by dropping the tag definition and parsing
   // the problematic tokens as a type.
@@ -2461,6 +2417,7 @@
   bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
   bool AttrsLastTime = false;
   ParsedAttributesWithRange attrs(AttrFactory);
+  const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
   while (1) {
     bool isInvalid = false;
     const char *PrevSpec = 0;
@@ -2484,7 +2441,7 @@
 
       // If this is not a declaration specifier token, we're done reading decl
       // specifiers.  First verify that DeclSpec's are consistent.
-      DS.Finish(Diags, PP);
+      DS.Finish(Diags, PP, Policy);
       return;
 
     case tok::l_square:
@@ -2586,7 +2543,7 @@
         if ((DSContext == DSC_top_level || DSContext == DSC_class) &&
             TemplateId->Name &&
             Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
-          if (isConstructorDeclarator()) {
+          if (isConstructorDeclarator(/*Unqualified*/false)) {
             // The user meant this to be an out-of-line constructor
             // definition, but template arguments are not allowed
             // there.  Just allow this as a constructor; we'll
@@ -2618,7 +2575,7 @@
           ParsedType T = getTypeAnnotation(Tok);
           isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
                                          Tok.getAnnotationEndLoc(),
-                                         PrevSpec, DiagID, T);
+                                         PrevSpec, DiagID, T, Policy);
           if (isInvalid)
             break;
         }
@@ -2636,7 +2593,7 @@
       if ((DSContext == DSC_top_level || DSContext == DSC_class) &&
           Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(),
                                      &SS)) {
-        if (isConstructorDeclarator())
+        if (isConstructorDeclarator(/*Unqualified*/false))
           goto DoneWithDeclSpec;
 
         // As noted in C++ [class.qual]p2 (cited above), when the name
@@ -2677,7 +2634,7 @@
       ConsumeToken(); // The C++ scope.
 
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
-                                     DiagID, TypeRep);
+                                     DiagID, TypeRep, Policy);
       if (isInvalid)
         break;
 
@@ -2696,7 +2653,7 @@
       if (Tok.getAnnotationValue()) {
         ParsedType T = getTypeAnnotation(Tok);
         isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
-                                       DiagID, T);
+                                       DiagID, T, Policy);
       } else
         DS.SetTypeSpecError();
 
@@ -2783,11 +2740,11 @@
       // check whether this is a constructor declaration.
       if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
           Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) &&
-          isConstructorDeclarator())
+          isConstructorDeclarator(/*Unqualified*/true))
         goto DoneWithDeclSpec;
 
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
-                                     DiagID, TypeRep);
+                                     DiagID, TypeRep, Policy);
       if (isInvalid)
         break;
 
@@ -2819,7 +2776,7 @@
       // constructor declaration.
       if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
           Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) &&
-          isConstructorDeclarator())
+          isConstructorDeclarator(TemplateId->SS.isEmpty()))
         goto DoneWithDeclSpec;
 
       // Turn the template-id annotation token into a type annotation
@@ -2843,10 +2800,8 @@
       isInvalid = DS.setFunctionSpecForceInline(Loc, PrevSpec, DiagID);
       IdentifierInfo *AttrName = Tok.getIdentifierInfo();
       SourceLocation AttrNameLoc = Tok.getLocation();
-      // FIXME: This does not work correctly if it is set to be a declspec
-      //        attribute, and a GNU attribute is simply incorrect.
       DS.getAttributes().addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0, 0,
-                                AttributeList::AS_GNU);
+                                AttributeList::AS_Keyword);
       break;
     }
 
@@ -2876,46 +2831,46 @@
     // storage-class-specifier
     case tok::kw_typedef:
       isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_typedef, Loc,
-                                         PrevSpec, DiagID);
+                                         PrevSpec, DiagID, Policy);
       break;
     case tok::kw_extern:
       if (DS.getThreadStorageClassSpec() == DeclSpec::TSCS___thread)
         Diag(Tok, diag::ext_thread_before) << "extern";
       isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_extern, Loc,
-                                         PrevSpec, DiagID);
+                                         PrevSpec, DiagID, Policy);
       break;
     case tok::kw___private_extern__:
       isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_private_extern,
-                                         Loc, PrevSpec, DiagID);
+                                         Loc, PrevSpec, DiagID, Policy);
       break;
     case tok::kw_static:
       if (DS.getThreadStorageClassSpec() == DeclSpec::TSCS___thread)
         Diag(Tok, diag::ext_thread_before) << "static";
       isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_static, Loc,
-                                         PrevSpec, DiagID);
+                                         PrevSpec, DiagID, Policy);
       break;
     case tok::kw_auto:
       if (getLangOpts().CPlusPlus11) {
         if (isKnownToBeTypeSpecifier(GetLookAheadToken(1))) {
           isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
-                                             PrevSpec, DiagID);
+                                             PrevSpec, DiagID, Policy);
           if (!isInvalid)
             Diag(Tok, diag::ext_auto_storage_class)
               << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
         } else
           isInvalid = DS.SetTypeSpecType(DeclSpec::TST_auto, Loc, PrevSpec,
-                                         DiagID);
+                                         DiagID, Policy);
       } else
         isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
-                                           PrevSpec, DiagID);
+                                           PrevSpec, DiagID, Policy);
       break;
     case tok::kw_register:
       isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_register, Loc,
-                                         PrevSpec, DiagID);
+                                         PrevSpec, DiagID, Policy);
       break;
     case tok::kw_mutable:
       isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_mutable, Loc,
-                                         PrevSpec, DiagID);
+                                         PrevSpec, DiagID, Policy);
       break;
     case tok::kw___thread:
       isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS___thread, Loc,
@@ -2977,19 +2932,19 @@
     // type-specifier
     case tok::kw_short:
       isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec,
-                                      DiagID);
+                                      DiagID, Policy);
       break;
     case tok::kw_long:
       if (DS.getTypeSpecWidth() != DeclSpec::TSW_long)
         isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec,
-                                        DiagID);
+                                        DiagID, Policy);
       else
         isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
-                                        DiagID);
+                                        DiagID, Policy);
       break;
     case tok::kw___int64:
         isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
-                                        DiagID);
+                                        DiagID, Policy);
       break;
     case tok::kw_signed:
       isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec,
@@ -3009,43 +2964,43 @@
       break;
     case tok::kw_void:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_char:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_int:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw___int128:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_half:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_float:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_double:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_wchar_t:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_char16_t:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_char32_t:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char32, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw_bool:
     case tok::kw__Bool:
@@ -3059,62 +3014,30 @@
         isInvalid = true;
       } else {
         isInvalid = DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec,
-                                       DiagID);
+                                       DiagID, Policy);
       }
       break;
     case tok::kw__Decimal32:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal32, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw__Decimal64:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal64, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw__Decimal128:
       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal128, Loc, PrevSpec,
-                                     DiagID);
+                                     DiagID, Policy);
       break;
     case tok::kw___vector:
-      isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+      isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID, Policy);
       break;
     case tok::kw___pixel:
-      isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID);
-      break;
-    case tok::kw_image1d_t:
-       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_image1d_t, Loc,
-                                      PrevSpec, DiagID);
-      break;
-    case tok::kw_image1d_array_t:
-       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_image1d_array_t, Loc,
-                                      PrevSpec, DiagID);
-      break;
-    case tok::kw_image1d_buffer_t:
-       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_image1d_buffer_t, Loc,
-                                      PrevSpec, DiagID);
-      break;
-    case tok::kw_image2d_t:
-       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_image2d_t, Loc,
-                                      PrevSpec, DiagID);
-      break;
-    case tok::kw_image2d_array_t:
-       isInvalid = DS.SetTypeSpecType(DeclSpec::TST_image2d_array_t, Loc,
-                                      PrevSpec, DiagID);
-      break;
-    case tok::kw_image3d_t:
-      isInvalid = DS.SetTypeSpecType(DeclSpec::TST_image3d_t, Loc,
-                                     PrevSpec, DiagID);
-      break;
-    case tok::kw_sampler_t:
-      isInvalid = DS.SetTypeSpecType(DeclSpec::TST_sampler_t, Loc,
-                                     PrevSpec, DiagID);
-      break;
-    case tok::kw_event_t:
-      isInvalid = DS.SetTypeSpecType(DeclSpec::TST_event_t, Loc,
-                                     PrevSpec, DiagID);
+      isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID, Policy);
       break;
     case tok::kw___unknown_anytype:
       isInvalid = DS.SetTypeSpecType(TST_unknown_anytype, Loc,
-                                     PrevSpec, DiagID);
+                                     PrevSpec, DiagID, Policy);
       break;
 
     // class-specifier:
@@ -3198,9 +3121,6 @@
       break;
 
     // OpenCL qualifiers:
-    case tok::kw_private:
-      if (!getLangOpts().OpenCL)
-        goto DoneWithDeclSpec;
     case tok::kw___private:
     case tok::kw___global:
     case tok::kw___local:
@@ -3208,7 +3128,7 @@
     case tok::kw___read_only:
     case tok::kw___write_only:
     case tok::kw___read_write:
-      ParseOpenCLQualifiers(DS);
+      ParseOpenCLQualifiers(DS.getAttributes());
       break;
 
     case tok::less:
@@ -3305,8 +3225,7 @@
       ParseDeclarator(DeclaratorInfo.D);
     }
 
-    if (Tok.is(tok::colon)) {
-      ConsumeToken();
+    if (TryConsumeToken(tok::colon)) {
       ExprResult Res(ParseConstantExpression());
       if (Res.isInvalid())
         SkipUntil(tok::semi, StopBeforeMatch);
@@ -3322,12 +3241,9 @@
 
     // If we don't have a comma, it is either the end of the list (a ';')
     // or an error, bail out.
-    if (Tok.isNot(tok::comma))
+    if (!TryConsumeToken(tok::comma, CommaLoc))
       return;
 
-    // Consume the comma.
-    CommaLoc = ConsumeToken();
-
     FirstDeclarator = false;
   }
 }
@@ -3358,7 +3274,7 @@
   SmallVector<Decl *, 32> FieldDecls;
 
   // While we still have something to read, read the declarations in the struct.
-  while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+  while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
     // Each iteration of this loop reads one struct-declaration.
 
     // Check for extraneous top-level semicolon.
@@ -3394,7 +3310,7 @@
                        SmallVectorImpl<Decl *> &FieldDecls) :
           P(P), TagDecl(TagDecl), FieldDecls(FieldDecls) {}
 
-        void invoke(ParsingFieldDeclarator &FD) {
+        void invoke(ParsingFieldDeclarator &FD) override {
           // Install the declarator into the current TagDecl.
           Decl *Field = P.Actions.ActOnField(P.getCurScope(), TagDecl,
                               FD.D.getDeclSpec().getSourceRange().getBegin(),
@@ -3415,9 +3331,9 @@
         continue;
       }
       ConsumeToken();
-      ExpectAndConsume(tok::l_paren, diag::err_expected_lparen);
+      ExpectAndConsume(tok::l_paren);
       if (!Tok.is(tok::identifier)) {
-        Diag(Tok, diag::err_expected_ident);
+        Diag(Tok, diag::err_expected) << tok::identifier;
         SkipUntil(tok::semi);
         continue;
       }
@@ -3426,21 +3342,22 @@
                         Tok.getIdentifierInfo(), Fields);
       FieldDecls.insert(FieldDecls.end(), Fields.begin(), Fields.end());
       ConsumeToken();
-      ExpectAndConsume(tok::r_paren, diag::err_expected_rparen);
+      ExpectAndConsume(tok::r_paren);
     }
 
-    if (Tok.is(tok::semi)) {
-      ConsumeToken();
-    } else if (Tok.is(tok::r_brace)) {
+    if (TryConsumeToken(tok::semi))
+      continue;
+
+    if (Tok.is(tok::r_brace)) {
       ExpectAndConsume(tok::semi, diag::ext_expected_semi_decl_list);
       break;
-    } else {
-      ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list);
-      // Skip to end of block or statement to avoid ext-warning on extra ';'.
-      SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
-      // If we stopped at a ';', eat it.
-      if (Tok.is(tok::semi)) ConsumeToken();
     }
+
+    ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list);
+    // Skip to end of block or statement to avoid ext-warning on extra ';'.
+    SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+    // If we stopped at a ';', eat it.
+    TryConsumeToken(tok::semi);
   }
 
   T.consumeClose();
@@ -3557,7 +3474,7 @@
       return;
 
     if (SS.isSet() && Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident);
+      Diag(Tok, diag::err_expected) << tok::identifier;
       if (Tok.isNot(tok::l_brace)) {
         // Has no name and is not a definition.
         // Skip the rest of this declarator, up until the comma or semicolon.
@@ -3570,7 +3487,7 @@
   // Must have either 'enum name' or 'enum {...}'.
   if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace) &&
       !(AllowFixedUnderlyingType && Tok.is(tok::colon))) {
-    Diag(Tok, diag::err_expected_ident_lbrace);
+    Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
 
     // Skip the rest of this declarator, up until the comma or semicolon.
     SkipUntil(tok::comma, StopAtSemi);
@@ -3696,15 +3613,14 @@
     } else {
       TUK = Sema::TUK_Definition;
     }
-  } else if (DSC != DSC_type_specifier &&
+  } else if (!isTypeSpecifier(DSC) &&
              (Tok.is(tok::semi) ||
               (Tok.isAtStartOfLine() &&
                !isValidAfterTypeSpecifier(CanBeBitfield)))) {
     TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
     if (Tok.isNot(tok::semi)) {
       // A semicolon was missing after this declaration. Diagnose and recover.
-      ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
-                       "enum");
+      ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
       PP.EnterToken(Tok);
       Tok.setKind(tok::semi);
     }
@@ -3759,7 +3675,8 @@
                                    StartLoc, SS, Name, NameLoc, attrs.getList(),
                                    AS, DS.getModulePrivateSpecLoc(), TParams,
                                    Owned, IsDependent, ScopedEnumKWLoc,
-                                   IsScopedUsingClassTag, BaseType);
+                                   IsScopedUsingClassTag, BaseType,
+                                   DSC == DSC_type_specifier);
 
   if (IsDependent) {
     // This enum has a dependent nested-name-specifier. Handle it as a
@@ -3780,7 +3697,8 @@
 
     if (DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc,
                            NameLoc.isValid() ? NameLoc : StartLoc,
-                           PrevSpec, DiagID, Type.get()))
+                           PrevSpec, DiagID, Type.get(),
+                           Actions.getASTContext().getPrintingPolicy()))
       Diag(StartLoc, DiagID) << PrevSpec;
 
     return;
@@ -3803,7 +3721,8 @@
 
   if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc,
                          NameLoc.isValid() ? NameLoc : StartLoc,
-                         PrevSpec, DiagID, TagDecl, Owned))
+                         PrevSpec, DiagID, TagDecl, Owned,
+                         Actions.getASTContext().getPrintingPolicy()))
     Diag(StartLoc, DiagID) << PrevSpec;
 }
 
@@ -3834,7 +3753,16 @@
   Decl *LastEnumConstDecl = 0;
 
   // Parse the enumerator-list.
-  while (Tok.is(tok::identifier)) {
+  while (Tok.isNot(tok::r_brace)) {
+    // Parse enumerator. If failed, try skipping till the start of the next
+    // enumerator definition.
+    if (Tok.isNot(tok::identifier)) {
+      Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+      if (SkipUntil(tok::comma, tok::r_brace, StopBeforeMatch) &&
+          TryConsumeToken(tok::comma))
+        continue;
+      break;
+    }
     IdentifierInfo *Ident = Tok.getIdentifierInfo();
     SourceLocation IdentLoc = ConsumeToken();
 
@@ -3848,11 +3776,10 @@
     ExprResult AssignedVal;
     ParsingDeclRAIIObject PD(*this, ParsingDeclRAIIObject::NoParent);
 
-    if (Tok.is(tok::equal)) {
-      EqualLoc = ConsumeToken();
+    if (TryConsumeToken(tok::equal, EqualLoc)) {
       AssignedVal = ParseConstantExpression();
       if (AssignedVal.isInvalid())
-        SkipUntil(tok::comma, tok::r_brace, StopAtSemi | StopBeforeMatch);
+        SkipUntil(tok::comma, tok::r_brace, StopBeforeMatch);
     }
 
     // Install the enumerator constant into EnumDecl.
@@ -3874,11 +3801,25 @@
       continue;
     }
 
-    if (Tok.isNot(tok::comma))
-      break;
-    SourceLocation CommaLoc = ConsumeToken();
+    // Emumerator definition must be finished, only comma or r_brace are
+    // allowed here.
+    SourceLocation CommaLoc;
+    if (Tok.isNot(tok::r_brace) && !TryConsumeToken(tok::comma, CommaLoc)) {
+      if (EqualLoc.isValid())
+        Diag(Tok.getLocation(), diag::err_expected_either) << tok::r_brace
+                                                           << tok::comma;
+      else
+        Diag(Tok.getLocation(), diag::err_expected_end_of_enumerator);
+      if (SkipUntil(tok::comma, tok::r_brace, StopBeforeMatch)) {
+        if (TryConsumeToken(tok::comma, CommaLoc))
+          continue;
+      } else {
+        break;
+      }
+    }
 
-    if (Tok.isNot(tok::identifier)) {
+    // If comma is followed by r_brace, emit appropriate warning.
+    if (Tok.is(tok::r_brace) && CommaLoc.isValid()) {
       if (!getLangOpts().C99 && !getLangOpts().CPlusPlus11)
         Diag(CommaLoc, getLangOpts().CPlusPlus ?
                diag::ext_enumerator_list_comma_cxx :
@@ -3887,6 +3828,7 @@
       else if (getLangOpts().CPlusPlus11)
         Diag(CommaLoc, diag::warn_cxx98_compat_enumerator_list_comma)
           << FixItHint::CreateRemoval(CommaLoc);
+      break;
     }
   }
 
@@ -3910,7 +3852,7 @@
   // was probably forgotten.
   bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
   if (!isValidAfterTypeSpecifier(CanBeBitfield)) {
-    ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl, "enum");
+    ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
     // Push this token back into the preprocessor and change our current token
     // to ';' so that the rest of the code recovers as though there were an
     // ';' after the definition.
@@ -3924,12 +3866,7 @@
 bool Parser::isTypeQualifier() const {
   switch (Tok.getKind()) {
   default: return false;
-
-    // type-qualifier only in OpenCL
-  case tok::kw_private:
-    return getLangOpts().OpenCL;
-
-    // type-qualifier
+  // type-qualifier
   case tok::kw_const:
   case tok::kw_volatile:
   case tok::kw_restrict:
@@ -3975,16 +3912,6 @@
   case tok::kw__Decimal128:
   case tok::kw___vector:
 
-    // OpenCL specific types:
-  case tok::kw_image1d_t:
-  case tok::kw_image1d_array_t:
-  case tok::kw_image1d_buffer_t:
-  case tok::kw_image2d_t:
-  case tok::kw_image2d_array_t:
-  case tok::kw_image3d_t:
-  case tok::kw_sampler_t:
-  case tok::kw_event_t:
-
     // struct-or-union-specifier (C99) or class-specifier (C++)
   case tok::kw_class:
   case tok::kw_struct:
@@ -4057,16 +3984,6 @@
   case tok::kw__Decimal128:
   case tok::kw___vector:
 
-    // OpenCL specific types:
-  case tok::kw_image1d_t:
-  case tok::kw_image1d_array_t:
-  case tok::kw_image1d_buffer_t:
-  case tok::kw_image2d_t:
-  case tok::kw_image2d_array_t:
-  case tok::kw_image3d_t:
-  case tok::kw_sampler_t:
-  case tok::kw_event_t:
-
     // struct-or-union-specifier (C99) or class-specifier (C++)
   case tok::kw_class:
   case tok::kw_struct:
@@ -4111,9 +4028,6 @@
 
     return true;
 
-  case tok::kw_private:
-    return getLangOpts().OpenCL;
-
   // C11 _Atomic
   case tok::kw__Atomic:
     return true;
@@ -4129,9 +4043,6 @@
   switch (Tok.getKind()) {
   default: return false;
 
-  case tok::kw_private:
-    return getLangOpts().OpenCL;
-
   case tok::identifier:   // foo::bar
     // Unfortunate hack to support "Class.factoryMethod" notation.
     if (getLangOpts().ObjC1 && NextToken().is(tok::period))
@@ -4213,16 +4124,6 @@
   case tok::kw__Decimal128:
   case tok::kw___vector:
 
-    // OpenCL specific types:
-  case tok::kw_image1d_t:
-  case tok::kw_image1d_array_t:
-  case tok::kw_image1d_buffer_t:
-  case tok::kw_image2d_t:
-  case tok::kw_image2d_array_t:
-  case tok::kw_image3d_t:
-  case tok::kw_sampler_t:
-  case tok::kw_event_t:
-
     // struct-or-union-specifier (C99) or class-specifier (C++)
   case tok::kw_class:
   case tok::kw_struct:
@@ -4300,7 +4201,7 @@
   }
 }
 
-bool Parser::isConstructorDeclarator() {
+bool Parser::isConstructorDeclarator(bool IsUnqualified) {
   TentativeParsingAction TPA(*this);
 
   // Parse the C++ scope specifier.
@@ -4382,12 +4283,35 @@
     case tok::coloncolon:
       // C(X   ::   Y);
       // C(X   ::   *p);
-    case tok::r_paren:
-      // C(X   )
       // Assume this isn't a constructor, rather than assuming it's a
       // constructor with an unnamed parameter of an ill-formed type.
       break;
 
+    case tok::r_paren:
+      // C(X   )
+      if (NextToken().is(tok::colon) || NextToken().is(tok::kw_try)) {
+        // Assume these were meant to be constructors:
+        //   C(X)   :    (the name of a bit-field cannot be parenthesized).
+        //   C(X)   try  (this is otherwise ill-formed).
+        IsConstructor = true;
+      }
+      if (NextToken().is(tok::semi) || NextToken().is(tok::l_brace)) {
+        // If we have a constructor name within the class definition,
+        // assume these were meant to be constructors:
+        //   C(X)   {
+        //   C(X)   ;
+        // ... because otherwise we would be declaring a non-static data
+        // member that is ill-formed because it's of the same type as its
+        // surrounding class.
+        //
+        // FIXME: We can actually do this whether or not the name is qualified,
+        // because if it is qualified in this context it must be being used as
+        // a constructor name. However, we do not implement that rule correctly
+        // currently, so we're somewhat conservative here.
+        IsConstructor = IsUnqualified;
+      }
+      break;
+
     default:
       IsConstructor = true;
       break;
@@ -4455,9 +4379,6 @@
       break;
 
     // OpenCL qualifiers:
-    case tok::kw_private:
-      if (!getLangOpts().OpenCL)
-        goto DoneWithTypeQuals;
     case tok::kw___private:
     case tok::kw___global:
     case tok::kw___local:
@@ -4465,7 +4386,7 @@
     case tok::kw___read_only:
     case tok::kw___write_only:
     case tok::kw___read_write:
-      ParseOpenCLQualifiers(DS);
+      ParseOpenCLQualifiers(DS.getAttributes());
       break;
 
     case tok::kw___uptr:
@@ -4506,7 +4427,7 @@
       DoneWithTypeQuals:
       // If this is not a type-qualifier token, we're done reading type
       // qualifiers.  First verify that DeclSpec's are consistent.
-      DS.Finish(Diags, PP);
+      DS.Finish(Diags, PP, Actions.getASTContext().getPrintingPolicy());
       if (EndLoc.isValid())
         DS.SetRangeEnd(EndLoc);
       return;
@@ -4914,7 +4835,7 @@
               << getLangOpts().CPlusPlus;
       }
     } else
-      Diag(Tok, diag::err_expected_ident_lparen);
+      Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_paren;
     D.SetIdentifier(0, Tok.getLocation());
     D.setInvalidType(true);
   }
@@ -5122,7 +5043,6 @@
   ParsedAttributes FnAttrs(AttrFactory);
   TypeResult TrailingReturnType;
 
-  Actions.ActOnStartFunctionDeclarator();
   /* LocalEndLoc is the end location for the local FunctionTypeLoc.
      EndLoc is the end location for the function declarator.
      They differ for trailing return types. */
@@ -5191,6 +5111,7 @@
       // FIXME: currently, "static" case isn't handled correctly.
       bool IsCXX11MemberFunction =
         getLangOpts().CPlusPlus11 &&
+        D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
         (D.getContext() == Declarator::MemberContext
          ? !D.getDeclSpec().isFriendSpecified()
          : D.getContext() == Declarator::FileContext &&
@@ -5250,8 +5171,6 @@
                                              StartLoc, LocalEndLoc, D,
                                              TrailingReturnType),
                 FnAttrs, EndLoc);
-
-  Actions.ActOnEndFunctionDeclarator();
 }
 
 /// isFunctionDeclaratorIdentifierList - This parameter list may have an
@@ -5303,10 +5222,10 @@
   // Maintain an efficient lookup of params we have seen so far.
   llvm::SmallSet<const IdentifierInfo*, 16> ParamsSoFar;
 
-  while (1) {
+  do {
     // If this isn't an identifier, report the error and skip until ')'.
     if (Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident);
+      Diag(Tok, diag::err_expected) << tok::identifier;
       SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
       // Forget we parsed anything.
       ParamInfo.clear();
@@ -5331,12 +5250,8 @@
 
     // Eat the identifier.
     ConsumeToken();
-
     // The list continues if we see a comma.
-    if (Tok.isNot(tok::comma))
-      break;
-    ConsumeToken();
-  }
+  } while (TryConsumeToken(tok::comma));
 }
 
 /// ParseParameterDeclarationClause - Parse a (possibly empty) parameter-list
@@ -5375,13 +5290,11 @@
        ParsedAttributes &FirstArgAttrs,
        SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
        SourceLocation &EllipsisLoc) {
-  while (1) {
-    if (Tok.is(tok::ellipsis)) {
-      // FIXME: Issue a diagnostic if we parsed an attribute-specifier-seq
-      // before deciding this was a parameter-declaration-clause.
-      EllipsisLoc = ConsumeToken();     // Consume the ellipsis.
+  do {
+    // FIXME: Issue a diagnostic if we parsed an attribute-specifier-seq
+    // before deciding this was a parameter-declaration-clause.
+    if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
       break;
-    }
 
     // Parse the declaration-specifiers.
     // Just use the ParsingDeclaration "scope" of the declarator.
@@ -5500,26 +5413,17 @@
                                           Param, DefArgToks));
     }
 
-    // If the next token is a comma, consume it and keep reading arguments.
-    if (Tok.isNot(tok::comma)) {
-      if (Tok.is(tok::ellipsis)) {
-        EllipsisLoc = ConsumeToken();     // Consume the ellipsis.
-
-        if (!getLangOpts().CPlusPlus) {
-          // We have ellipsis without a preceding ',', which is ill-formed
-          // in C. Complain and provide the fix.
-          Diag(EllipsisLoc, diag::err_missing_comma_before_ellipsis)
-            << FixItHint::CreateInsertion(EllipsisLoc, ", ");
-        }
-      }
-
+    if (TryConsumeToken(tok::ellipsis, EllipsisLoc) &&
+        !getLangOpts().CPlusPlus) {
+      // We have ellipsis without a preceding ',', which is ill-formed
+      // in C. Complain and provide the fix.
+      Diag(EllipsisLoc, diag::err_missing_comma_before_ellipsis)
+          << FixItHint::CreateInsertion(EllipsisLoc, ", ");
       break;
     }
 
-    // Consume the comma.
-    ConsumeToken();
-  }
-
+    // If the next token is a comma, consume it and keep reading arguments.
+  } while (TryConsumeToken(tok::comma));
 }
 
 /// [C90]   direct-declarator '[' constant-expression[opt] ']'
@@ -5544,7 +5448,6 @@
     MaybeParseCXX11Attributes(attrs);
 
     // Remember that we parsed the empty array type.
-    ExprResult NumElements;
     D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false, 0,
                                             T.getOpenLocation(),
                                             T.getCloseLocation()),
@@ -5571,8 +5474,7 @@
 
   // If valid, this location is the position where we read the 'static' keyword.
   SourceLocation StaticLoc;
-  if (Tok.is(tok::kw_static))
-    StaticLoc = ConsumeToken();
+  TryConsumeToken(tok::kw_static, StaticLoc);
 
   // If there is a type-qualifier-list, read it now.
   // Type qualifiers in an array subscript are a C99 feature.
@@ -5581,8 +5483,8 @@
 
   // If we haven't already read 'static', check to see if there is one after the
   // type-qualifier-list.
-  if (!StaticLoc.isValid() && Tok.is(tok::kw_static))
-    StaticLoc = ConsumeToken();
+  if (!StaticLoc.isValid())
+    TryConsumeToken(tok::kw_static, StaticLoc);
 
   // Handle "direct-declarator [ type-qual-list[opt] * ]".
   bool isStar = false;
@@ -5678,7 +5580,8 @@
     unsigned DiagID;
     // Check for duplicate type specifiers (e.g. "int typeof(int)").
     if (DS.SetTypeSpecType(DeclSpec::TST_typeofType, StartLoc, PrevSpec,
-                           DiagID, CastTy))
+                           DiagID, CastTy,
+                           Actions.getASTContext().getPrintingPolicy()))
       Diag(StartLoc, DiagID) << PrevSpec;
     return;
   }
@@ -5700,7 +5603,8 @@
   unsigned DiagID;
   // Check for duplicate type specifiers (e.g. "int typeof(int)").
   if (DS.SetTypeSpecType(DeclSpec::TST_typeofExpr, StartLoc, PrevSpec,
-                         DiagID, Operand.get()))
+                         DiagID, Operand.get(),
+                         Actions.getASTContext().getPrintingPolicy()))
     Diag(StartLoc, DiagID) << PrevSpec;
 }
 
@@ -5734,7 +5638,8 @@
   const char *PrevSpec = 0;
   unsigned DiagID;
   if (DS.SetTypeSpecType(DeclSpec::TST_atomic, StartLoc, PrevSpec,
-                         DiagID, Result.release()))
+                         DiagID, Result.release(),
+                         Actions.getASTContext().getPrintingPolicy()))
     Diag(StartLoc, DiagID) << PrevSpec;
 }
 
@@ -5774,6 +5679,7 @@
 bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
                                       const char *&PrevSpec, unsigned &DiagID,
                                       bool &isInvalid) {
+  const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
   if (Tok.getIdentifierInfo() == Ident_vector) {
     Token Next = NextToken();
     switch (Next.getKind()) {
@@ -5788,15 +5694,15 @@
     case tok::kw_double:
     case tok::kw_bool:
     case tok::kw___pixel:
-      isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+      isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID, Policy);
       return true;
     case tok::identifier:
       if (Next.getIdentifierInfo() == Ident_pixel) {
-        isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+        isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID,Policy);
         return true;
       }
       if (Next.getIdentifierInfo() == Ident_bool) {
-        isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+        isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID,Policy);
         return true;
       }
       break;
@@ -5805,11 +5711,11 @@
     }
   } else if ((Tok.getIdentifierInfo() == Ident_pixel) &&
              DS.isTypeAltiVecVector()) {
-    isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID);
+    isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID, Policy);
     return true;
   } else if ((Tok.getIdentifierInfo() == Ident_bool) &&
              DS.isTypeAltiVecVector()) {
-    isInvalid = DS.SetTypeAltiVecBool(true, Loc, PrevSpec, DiagID);
+    isInvalid = DS.SetTypeAltiVecBool(true, Loc, PrevSpec, DiagID, Policy);
     return true;
   }
   return false;
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index dd29f99..cef748f 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -13,9 +13,12 @@
 
 #include "clang/Parse/Parser.h"
 #include "RAIIObjectsForParser.h"
-#include "clang/Basic/CharInfo.h"
-#include "clang/Basic/OperatorKinds.h"
+#include "clang/AST/ASTContext.h"
 #include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/Attributes.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/OperatorKinds.h"
 #include "clang/Parse/ParseDiagnostic.h"
 #include "clang/Sema/DeclSpec.h"
 #include "clang/Sema/ParsedTemplate.h"
@@ -91,7 +94,7 @@
 
   if (Tok.is(tok::equal)) {
     if (Ident == 0) {
-      Diag(Tok, diag::err_expected_ident);
+      Diag(Tok, diag::err_expected) << tok::identifier;
       // Skip to end of the definition and eat the ';'.
       SkipUntil(tok::semi);
       return 0;
@@ -111,8 +114,12 @@
       Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
           << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
     }
-    Diag(Tok, Ident ? diag::err_expected_lbrace :
-         diag::err_expected_ident_lbrace);
+
+    if (Ident)
+      Diag(Tok, diag::err_expected) << tok::l_brace;
+    else
+      Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
+
     return 0;
   }
 
@@ -195,7 +202,7 @@
                                  ParsedAttributes& attrs,
                                  BalancedDelimiterTracker &Tracker) {
   if (index == Ident.size()) {
-    while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+    while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
       ParsedAttributesWithRange attrs(AttrFactory);
       MaybeParseCXX11Attributes(attrs);
       MaybeParseMicrosoftAttributes(attrs);
@@ -259,8 +266,8 @@
 
   // Eat the ';'.
   DeclEnd = Tok.getLocation();
-  ExpectAndConsume(tok::semi, diag::err_expected_semi_after_namespace_name,
-                   "", tok::semi);
+  if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after_namespace_name))
+    SkipUntil(tok::semi);
 
   return Actions.ActOnNamespaceAliasDef(getCurScope(), NamespaceLoc, AliasLoc, Alias,
                                         SS, IdentLoc, Ident);
@@ -274,27 +281,16 @@
 ///         'extern' string-literal declaration
 ///
 Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
-  assert(Tok.is(tok::string_literal) && "Not a string literal!");
-  SmallString<8> LangBuffer;
-  bool Invalid = false;
-  StringRef Lang = PP.getSpelling(Tok, LangBuffer, &Invalid);
-  if (Invalid)
-    return 0;
-
-  // FIXME: This is incorrect: linkage-specifiers are parsed in translation
-  // phase 7, so string-literal concatenation is supposed to occur.
-  //   extern "" "C" "" "+" "+" { } is legal.
-  if (Tok.hasUDSuffix())
-    Diag(Tok, diag::err_invalid_string_udl);
-  SourceLocation Loc = ConsumeStringToken();
+  assert(isTokenStringLiteral() && "Not a string literal!");
+  ExprResult Lang = ParseStringLiteralExpression(false);
 
   ParseScope LinkageScope(this, Scope::DeclScope);
-  Decl *LinkageSpec
-    = Actions.ActOnStartLinkageSpecification(getCurScope(),
-                                             DS.getSourceRange().getBegin(),
-                                             Loc, Lang,
-                                      Tok.is(tok::l_brace) ? Tok.getLocation()
-                                                           : SourceLocation());
+  Decl *LinkageSpec =
+      Lang.isInvalid()
+          ? 0
+          : Actions.ActOnStartLinkageSpecification(
+                getCurScope(), DS.getSourceRange().getBegin(), Lang.take(),
+                Tok.is(tok::l_brace) ? Tok.getLocation() : SourceLocation());
 
   ParsedAttributesWithRange attrs(AttrFactory);
   MaybeParseCXX11Attributes(attrs);
@@ -308,8 +304,9 @@
     // ... but anyway remember that such an "extern" was seen.
     DS.setExternInLinkageSpec(true);
     ParseExternalDeclaration(attrs, &DS);
-    return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec,
-                                                   SourceLocation());
+    return LinkageSpec ? Actions.ActOnFinishLinkageSpecification(
+                             getCurScope(), LinkageSpec, SourceLocation())
+                       : 0;
   }
 
   DS.abort();
@@ -318,16 +315,48 @@
 
   BalancedDelimiterTracker T(*this, tok::l_brace);
   T.consumeOpen();
-  while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
-    ParsedAttributesWithRange attrs(AttrFactory);
-    MaybeParseCXX11Attributes(attrs);
-    MaybeParseMicrosoftAttributes(attrs);
-    ParseExternalDeclaration(attrs);
+
+  unsigned NestedModules = 0;
+  while (true) {
+    switch (Tok.getKind()) {
+    case tok::annot_module_begin:
+      ++NestedModules;
+      ParseTopLevelDecl();
+      continue;
+
+    case tok::annot_module_end:
+      if (!NestedModules)
+        break;
+      --NestedModules;
+      ParseTopLevelDecl();
+      continue;
+
+    case tok::annot_module_include:
+      ParseTopLevelDecl();
+      continue;
+
+    case tok::eof:
+      break;
+
+    case tok::r_brace:
+      if (!NestedModules)
+        break;
+      // Fall through.
+    default:
+      ParsedAttributesWithRange attrs(AttrFactory);
+      MaybeParseCXX11Attributes(attrs);
+      MaybeParseMicrosoftAttributes(attrs);
+      ParseExternalDeclaration(attrs);
+      continue;
+    }
+
+    break;
   }
 
   T.consumeClose();
-  return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec,
-                                                 T.getCloseLocation());
+  return LinkageSpec ? Actions.ActOnFinishLinkageSpecification(
+                           getCurScope(), LinkageSpec, T.getCloseLocation())
+                     : 0;
 }
 
 /// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
@@ -424,10 +453,10 @@
 
   // Eat ';'.
   DeclEnd = Tok.getLocation();
-  ExpectAndConsume(tok::semi,
-                   GNUAttr ? diag::err_expected_semi_after_attribute_list
-                           : diag::err_expected_semi_after_namespace_name, 
-                   "", tok::semi);
+  if (ExpectAndConsume(tok::semi,
+                       GNUAttr ? diag::err_expected_semi_after_attribute_list
+                               : diag::err_expected_semi_after_namespace_name))
+    SkipUntil(tok::semi);
 
   return Actions.ActOnUsingDirective(getCurScope(), UsingLoc, NamespcLoc, SS,
                                      IdentLoc, NamespcName, attrs.getList());
@@ -461,10 +490,8 @@
 
   // Ignore optional 'typename'.
   // FIXME: This is wrong; we should parse this as a typename-specifier.
-  if (Tok.is(tok::kw_typename)) {
-    TypenameLoc = ConsumeToken();
+  if (TryConsumeToken(tok::kw_typename, TypenameLoc))
     HasTypenameKeyword = true;
-  }
 
   // Parse nested-name-specifier.
   IdentifierInfo *LastII = 0;
@@ -584,10 +611,11 @@
 
   // Eat ';'.
   DeclEnd = Tok.getLocation();
-  ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
-                   !Attrs.empty() ? "attributes list" :
-                   IsAliasDecl ? "alias declaration" : "using declaration",
-                   tok::semi);
+  if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+                       !Attrs.empty() ? "attributes list"
+                                      : IsAliasDecl ? "alias declaration"
+                                                    : "using declaration"))
+    SkipUntil(tok::semi);
 
   // Diagnose an attempt to declare a templated using-declaration.
   // In C++11, alias-declarations can be templates:
@@ -649,7 +677,7 @@
 
   BalancedDelimiterTracker T(*this, tok::l_paren);
   if (T.consumeOpen()) {
-    Diag(Tok, diag::err_expected_lparen);
+    Diag(Tok, diag::err_expected) << tok::l_paren;
     SkipMalformedDecl();
     return 0;
   }
@@ -660,8 +688,10 @@
     return 0;
   }
 
-  if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "", tok::semi))
+  if (ExpectAndConsume(tok::comma)) {
+    SkipUntil(tok::semi);
     return 0;
+  }
 
   if (!isTokenStringLiteral()) {
     Diag(Tok, diag::err_expected_string_literal)
@@ -781,12 +811,13 @@
 
   const char *PrevSpec = 0;
   unsigned DiagID;
+  const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
   // Check for duplicate type specifiers (e.g. "int decltype(a)").
   if (Result.get()
         ? DS.SetTypeSpecType(DeclSpec::TST_decltype, StartLoc, PrevSpec,
-                             DiagID, Result.release())
+                             DiagID, Result.release(), Policy)
         : DS.SetTypeSpecType(DeclSpec::TST_decltype_auto, StartLoc, PrevSpec,
-                             DiagID)) {
+                             DiagID, Policy)) {
     Diag(StartLoc, DiagID) << PrevSpec;
     DS.SetTypeSpecError();
   }
@@ -837,7 +868,8 @@
   const char *PrevSpec = 0;
   unsigned DiagID;
   if (DS.SetTypeSpecType(DeclSpec::TST_underlyingType, StartLoc, PrevSpec,
-                         DiagID, Result.release()))
+                         DiagID, Result.release(),
+                         Actions.getASTContext().getPrintingPolicy()))
     Diag(StartLoc, DiagID) << PrevSpec;
   DS.setTypeofParensRange(T.getRange());
 }
@@ -985,7 +1017,8 @@
 
   const char *PrevSpec = 0;
   unsigned DiagID;
-  DS.SetTypeSpecType(TST_typename, IdLoc, PrevSpec, DiagID, Type);
+  DS.SetTypeSpecType(TST_typename, IdLoc, PrevSpec, DiagID, Type,
+                     Actions.getASTContext().getPrintingPolicy());
 
   Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
   return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
@@ -998,7 +1031,7 @@
     IdentifierInfo *AttrName = Tok.getIdentifierInfo();
     SourceLocation AttrNameLoc = ConsumeToken();
     attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0, 0,
-                 AttributeList::AS_GNU);
+                 AttributeList::AS_Keyword);
   }
 }
 
@@ -1021,6 +1054,7 @@
   case tok::l_paren:            // struct foo {...} (         x);
   case tok::comma:              // __builtin_offsetof(struct foo{...} ,
   case tok::kw_operator:        // struct foo       operator  ++() {...}
+  case tok::kw___declspec:      // struct foo {...} __declspec(...)
     return true;
   case tok::colon:
     return CouldBeBitfield;     // enum E { ... }   :         2;
@@ -1179,31 +1213,17 @@
   // C++11 attributes
   SourceLocation AttrFixitLoc = Tok.getLocation();
 
-  if (TagType == DeclSpec::TST_struct &&
-      !Tok.is(tok::identifier) &&
-      Tok.getIdentifierInfo() &&
-      (Tok.is(tok::kw___is_arithmetic) ||
-       Tok.is(tok::kw___is_convertible) ||
-       Tok.is(tok::kw___is_empty) ||
-       Tok.is(tok::kw___is_floating_point) ||
-       Tok.is(tok::kw___is_function) ||
-       Tok.is(tok::kw___is_fundamental) ||
-       Tok.is(tok::kw___is_integral) ||
-       Tok.is(tok::kw___is_member_function_pointer) ||
-       Tok.is(tok::kw___is_member_pointer) ||
-       Tok.is(tok::kw___is_pod) ||
-       Tok.is(tok::kw___is_pointer) ||
-       Tok.is(tok::kw___is_same) ||
-       Tok.is(tok::kw___is_scalar) ||
-       Tok.is(tok::kw___is_signed) ||
-       Tok.is(tok::kw___is_unsigned) ||
-       Tok.is(tok::kw___is_void)))
-    // GNU libstdc++ 4.2 and libc++ use certain intrinsic names as the
-    // name of struct templates, but some are keywords in GCC >= 4.3
-    // and Clang. Therefore, when we see the token sequence "struct
-    // X", make X into a normal identifier rather than a keyword, to
-    // allow libstdc++ 4.2 and libc++ to work properly.
-    TryKeywordIdentFallback(true);
+  // GNU libstdc++ and libc++ use certain intrinsic names as the
+  // name of struct templates, but some are keywords in GCC >= 4.3
+  // MSVC and Clang. For compatibility, convert the token to an identifier
+  // and issue a warning diagnostic.
+  if (TagType == DeclSpec::TST_struct && !Tok.is(tok::identifier) &&
+      !Tok.isAnnotation()) {
+    const IdentifierInfo *II = Tok.getIdentifierInfo();
+    // We rarely end up here so the following check is efficient.
+    if (II && II->getName().startswith("__is_"))
+      TryKeywordIdentFallback(true);
+  }
 
   // Parse the (optional) nested-name-specifier.
   CXXScopeSpec &SS = DS.getTypeSpecScope();
@@ -1215,7 +1235,7 @@
       DS.SetTypeSpecError();
     if (SS.isSet())
       if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id))
-        Diag(Tok, diag::err_expected_ident);
+        Diag(Tok, diag::err_expected) << tok::identifier;
   }
 
   TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
@@ -1243,13 +1263,8 @@
       }
 
       Diag(NameLoc, diag::err_explicit_spec_non_template)
-        << (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
-        << (TagType == DeclSpec::TST_class? 0
-            : TagType == DeclSpec::TST_struct? 1
-            : TagType == DeclSpec::TST_union? 2
-            : 3)
-        << Name
-        << SourceRange(LAngleLoc, RAngleLoc);
+          << (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+          << TagTokKind << Name << SourceRange(LAngleLoc, RAngleLoc);
 
       // Strip off the last template parameter list if it was empty, since
       // we've removed its template argument list.
@@ -1286,6 +1301,7 @@
       if (SS.isNotEmpty())
         Range.setBegin(SS.getBeginLoc());
 
+      // FIXME: Name may be null here.
       Diag(TemplateId->LAngleLoc, diag::err_template_spec_syntax_non_template)
         << TemplateId->Name << static_cast<int>(TemplateId->Kind) << Range;
 
@@ -1318,11 +1334,12 @@
   //   new struct s;
   // or
   //   &T::operator struct s;
-  // For these, DSC is DSC_type_specifier.
+  // For these, DSC is DSC_type_specifier or DSC_alias_declaration.
 
   // If there are attributes after class name, parse them.
   MaybeParseCXX11Attributes(Attributes);
 
+  const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
   Sema::TagUseKind TUK;
   if (DSC == DSC_trailing)
     TUK = Sema::TUK_Reference;
@@ -1375,14 +1392,15 @@
       TUK = Sema::TUK_Reference;
 
     PA.Revert();
-  } else if (DSC != DSC_type_specifier &&
+  } else if (!isTypeSpecifier(DSC) &&
              (Tok.is(tok::semi) ||
               (Tok.isAtStartOfLine() && !isValidAfterTypeSpecifier(false)))) {
     TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
     if (Tok.isNot(tok::semi)) {
+      const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
       // A semicolon was missing after this declaration. Diagnose and recover.
-      ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
-        DeclSpec::getSpecifierName(TagType));
+      ExpectAndConsume(tok::semi, diag::err_expected_after,
+                       DeclSpec::getSpecifierName(TagType, PPol));
       PP.EnterToken(Tok);
       Tok.setKind(tok::semi);
     }
@@ -1424,7 +1442,7 @@
     if (DS.getTypeSpecType() != DeclSpec::TST_error) {
       // We have a declaration or reference to an anonymous class.
       Diag(StartLoc, diag::err_anon_type_definition)
-        << DeclSpec::getSpecifierName(TagType);
+        << DeclSpec::getSpecifierName(TagType, Policy);
     }
 
     // If we are parsing a definition and stop at a base-clause, continue on
@@ -1591,7 +1609,8 @@
                                        DS.getModulePrivateSpecLoc(),
                                        TParams, Owned, IsDependent,
                                        SourceLocation(), false,
-                                       clang::TypeResult());
+                                       clang::TypeResult(),
+                                       DSC == DSC_type_specifier);
 
     // If ActOnTag said the type was dependent, try again with the
     // less common call.
@@ -1620,11 +1639,12 @@
   if (!TypeResult.isInvalid()) {
     Result = DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc,
                                 NameLoc.isValid() ? NameLoc : StartLoc,
-                                PrevSpec, DiagID, TypeResult.get());
+                                PrevSpec, DiagID, TypeResult.get(), Policy);
   } else if (!TagOrTempResult.isInvalid()) {
     Result = DS.SetTypeSpecType(TagType, StartLoc,
                                 NameLoc.isValid() ? NameLoc : StartLoc,
-                                PrevSpec, DiagID, TagOrTempResult.get(), Owned);
+                                PrevSpec, DiagID, TagOrTempResult.get(), Owned,
+                                Policy);
   } else {
     DS.SetTypeSpecError();
     return;
@@ -1645,8 +1665,9 @@
   if (TUK == Sema::TUK_Definition &&
       (TemplateInfo.Kind || !isValidAfterTypeSpecifier(false))) {
     if (Tok.isNot(tok::semi)) {
-      ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
-        DeclSpec::getSpecifierName(TagType));
+      const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
+      ExpectAndConsume(tok::semi, diag::err_expected_after,
+                       DeclSpec::getSpecifierName(TagType, PPol));
       // Push this token back into the preprocessor and change our current token
       // to ';' so that the rest of the code recovers as though there were an
       // ';' after the definition.
@@ -1684,10 +1705,8 @@
 
     // If the next token is a comma, consume it and keep reading
     // base-specifiers.
-    if (Tok.isNot(tok::comma)) break;
-
-    // Consume the comma.
-    ConsumeToken();
+    if (!TryConsumeToken(tok::comma))
+      break;
   }
 
   // Attach the base specifiers
@@ -1713,10 +1732,8 @@
   MaybeParseCXX11Attributes(Attributes);
 
   // Parse the 'virtual' keyword.
-  if (Tok.is(tok::kw_virtual))  {
-    ConsumeToken();
+  if (TryConsumeToken(tok::kw_virtual))
     IsVirtual = true;
-  }
 
   CheckMisplacedCXX11Attribute(Attributes, StartLoc);
 
@@ -1753,9 +1770,8 @@
   // actually part of the base-specifier-list grammar productions, but we
   // parse it here for convenience.
   SourceLocation EllipsisLoc;
-  if (Tok.is(tok::ellipsis))
-    EllipsisLoc = ConsumeToken();
-  
+  TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
   // Find the complete source range for the base-specifier.
   SourceRange Range(StartLoc, EndLocation);
 
@@ -1793,8 +1809,8 @@
   DeclaratorChunk::FunctionTypeInfo &FTI
     = DeclaratorInfo.getFunctionTypeInfo();
 
-  for (unsigned ParamIdx = 0; ParamIdx < FTI.NumArgs; ++ParamIdx) {
-    if (LateMethod || FTI.ArgInfo[ParamIdx].DefaultArgTokens) {
+  for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx) {
+    if (LateMethod || FTI.Params[ParamIdx].DefaultArgTokens) {
       if (!LateMethod) {
         // Push this method onto the stack of late-parsed method
         // declarations.
@@ -1804,17 +1820,16 @@
 
         // Add all of the parameters prior to this one (they don't
         // have default arguments).
-        LateMethod->DefaultArgs.reserve(FTI.NumArgs);
+        LateMethod->DefaultArgs.reserve(FTI.NumParams);
         for (unsigned I = 0; I < ParamIdx; ++I)
           LateMethod->DefaultArgs.push_back(
-                             LateParsedDefaultArgument(FTI.ArgInfo[I].Param));
+              LateParsedDefaultArgument(FTI.Params[I].Param));
       }
 
       // Add this parameter to the list of parameters (it may or may
       // not have a default argument).
-      LateMethod->DefaultArgs.push_back(
-        LateParsedDefaultArgument(FTI.ArgInfo[ParamIdx].Param,
-                                  FTI.ArgInfo[ParamIdx].DefaultArgTokens));
+      LateMethod->DefaultArgs.push_back(LateParsedDefaultArgument(
+          FTI.Params[ParamIdx].Param, FTI.Params[ParamIdx].DefaultArgTokens));
     }
   }
 }
@@ -1826,30 +1841,28 @@
 ///         override
 ///         final
 VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
-  if (!getLangOpts().CPlusPlus)
+  if (!getLangOpts().CPlusPlus || Tok.isNot(tok::identifier))
     return VirtSpecifiers::VS_None;
 
-  if (Tok.is(tok::identifier)) {
-    IdentifierInfo *II = Tok.getIdentifierInfo();
+  IdentifierInfo *II = Tok.getIdentifierInfo();
 
-    // Initialize the contextual keywords.
-    if (!Ident_final) {
-      Ident_final = &PP.getIdentifierTable().get("final");
-      if (getLangOpts().MicrosoftExt)
-        Ident_sealed = &PP.getIdentifierTable().get("sealed");
-      Ident_override = &PP.getIdentifierTable().get("override");
-    }
-
-    if (II == Ident_override)
-      return VirtSpecifiers::VS_Override;
-
-    if (II == Ident_sealed)
-      return VirtSpecifiers::VS_Sealed;
-
-    if (II == Ident_final)
-      return VirtSpecifiers::VS_Final;
+  // Initialize the contextual keywords.
+  if (!Ident_final) {
+    Ident_final = &PP.getIdentifierTable().get("final");
+    if (getLangOpts().MicrosoftExt)
+      Ident_sealed = &PP.getIdentifierTable().get("sealed");
+    Ident_override = &PP.getIdentifierTable().get("override");
   }
 
+  if (II == Ident_override)
+    return VirtSpecifiers::VS_Override;
+
+  if (II == Ident_sealed)
+    return VirtSpecifiers::VS_Sealed;
+
+  if (II == Ident_final)
+    return VirtSpecifiers::VS_Final;
+
   return VirtSpecifiers::VS_None;
 }
 
@@ -1891,24 +1904,58 @@
 }
 
 /// isCXX11FinalKeyword - Determine whether the next token is a C++11
-/// contextual 'final' keyword.
+/// 'final' or Microsoft 'sealed' contextual keyword.
 bool Parser::isCXX11FinalKeyword() const {
-  if (!getLangOpts().CPlusPlus)
-    return false;
+  VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier();
+  return Specifier == VirtSpecifiers::VS_Final ||
+         Specifier == VirtSpecifiers::VS_Sealed;
+}
 
-  if (!Tok.is(tok::identifier))
-    return false;
-
-  // Initialize the contextual keywords.
-  if (!Ident_final) {
-    Ident_final = &PP.getIdentifierTable().get("final");
-    if (getLangOpts().MicrosoftExt)
-      Ident_sealed = &PP.getIdentifierTable().get("sealed");
-    Ident_override = &PP.getIdentifierTable().get("override");
+/// \brief Parse a C++ member-declarator up to, but not including, the optional
+/// brace-or-equal-initializer or pure-specifier.
+void Parser::ParseCXXMemberDeclaratorBeforeInitializer(
+    Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize,
+    LateParsedAttrList &LateParsedAttrs) {
+  // member-declarator:
+  //   declarator pure-specifier[opt]
+  //   declarator brace-or-equal-initializer[opt]
+  //   identifier[opt] ':' constant-expression
+  if (Tok.isNot(tok::colon)) {
+    // Don't parse FOO:BAR as if it were a typo for FOO::BAR, in this context it
+    // is a bitfield.
+    // FIXME: This should only apply when parsing the id-expression (see
+    // PR18587).
+    ColonProtectionRAIIObject X(*this);
+    ParseDeclarator(DeclaratorInfo);
   }
 
-  return Tok.getIdentifierInfo() == Ident_final ||
-         Tok.getIdentifierInfo() == Ident_sealed;
+  if (!DeclaratorInfo.isFunctionDeclarator() && TryConsumeToken(tok::colon)) {
+    BitfieldSize = ParseConstantExpression();
+    if (BitfieldSize.isInvalid())
+      SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
+  } else
+    ParseOptionalCXX11VirtSpecifierSeq(VS, getCurrentClass().IsInterface);
+
+  // If a simple-asm-expr is present, parse it.
+  if (Tok.is(tok::kw_asm)) {
+    SourceLocation Loc;
+    ExprResult AsmLabel(ParseSimpleAsm(&Loc));
+    if (AsmLabel.isInvalid())
+      SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
+
+    DeclaratorInfo.setAsmLabel(AsmLabel.release());
+    DeclaratorInfo.SetRangeEnd(Loc);
+  }
+
+  // If attributes exist after the declarator, but before an '{', parse them.
+  MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+
+  // For compatibility with code written to older Clang, also accept a
+  // virt-specifier *after* the GNU attributes.
+  // FIXME: If we saw any attributes that are known to GCC followed by a
+  // virt-specifier, issue a GCC-compat warning.
+  if (BitfieldSize.isUnset() && VS.isUnset())
+    ParseOptionalCXX11VirtSpecifierSeq(VS, getCurrentClass().IsInterface);
 }
 
 /// ParseCXXClassMemberDeclaration - Parse a C++ class member declaration.
@@ -1993,11 +2040,11 @@
       }
 
       // TODO: recover from mistakenly-qualified operator declarations.
-      if (ExpectAndConsume(tok::semi,
-                           diag::err_expected_semi_after,
-                           "access declaration",
-                           tok::semi))
+      if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+                           "access declaration")) {
+        SkipUntil(tok::semi);
         return;
+      }
 
       Actions.ActOnUsingDeclaration(getCurScope(), AS,
                                     /* HasUsingKeyword */ false,
@@ -2036,10 +2083,6 @@
                                           TemplateInfo, TemplateDiags);
   }
 
-  // Don't parse FOO:BAR as if it were a typo for FOO::BAR, in this context it
-  // is a bitfield.
-  ColonProtectionRAIIObject X(*this);
-
   ParsedAttributesWithRange attrs(AttrFactory);
   ParsedAttributesWithRange FnAttrs(AttrFactory);
   // Optional C++11 attribute-specifier
@@ -2078,8 +2121,14 @@
   DS.takeAttributesFrom(attrs);
   if (MalformedTypeSpec)
     DS.SetTypeSpecError();
-  ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class,
-                             &CommonLateParsedAttrs);
+
+  {
+    // Don't parse FOO:BAR as if it were a typo for FOO::BAR, in this context it
+    // is a bitfield.
+    ColonProtectionRAIIObject X(*this);
+    ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class,
+                               &CommonLateParsedAttrs);
+  }
 
   // If we had a free-standing type definition with a missing semicolon, we
   // may get this far before the problem becomes obvious.
@@ -2093,9 +2142,7 @@
       TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->data() : 0,
       TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->size() : 0);
 
-  if (Tok.is(tok::semi)) {
-    ConsumeToken();
-
+  if (TryConsumeToken(tok::semi)) {
     if (DS.isFriendSpecified())
       ProhibitAttributes(FnAttrs);
 
@@ -2114,30 +2161,30 @@
   SourceLocation EqualLoc;
   bool HasInitializer = false;
   ExprResult Init;
-  if (Tok.isNot(tok::colon)) {
-    // Don't parse FOO:BAR as if it were a typo for FOO::BAR.
-    ColonProtectionRAIIObject X(*this);
 
-    // Parse the first declarator.
-    ParseDeclarator(DeclaratorInfo);
-    // Error parsing the declarator?
-    if (!DeclaratorInfo.hasName()) {
-      // If so, skip until the semi-colon or a }.
-      SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
-      if (Tok.is(tok::semi))
-        ConsumeToken();
-      return;
-    }
+  SmallVector<Decl *, 8> DeclsInGroup;
+  ExprResult BitfieldSize;
+  bool ExpectSemi = true;
 
-    ParseOptionalCXX11VirtSpecifierSeq(VS, getCurrentClass().IsInterface);
+  // Parse the first declarator.
+  ParseCXXMemberDeclaratorBeforeInitializer(DeclaratorInfo, VS, BitfieldSize,
+                                            LateParsedAttrs);
 
-    // If attributes exist after the declarator, but before an '{', parse them.
-    MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+  // If this has neither a name nor a bit width, something has gone seriously
+  // wrong. Skip until the semi-colon or }.
+  if (!DeclaratorInfo.hasName() && BitfieldSize.isUnset()) {
+    // If so, skip until the semi-colon or a }.
+    SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+    TryConsumeToken(tok::semi);
+    return;
+  }
 
-    // MSVC permits pure specifier on inline functions declared at class scope.
+  // Check for a member function definition.
+  if (BitfieldSize.isUnset()) {
+    // MSVC permits pure specifier on inline functions defined at class scope.
     // Hence check for =0 before checking for function definition.
     if (getLangOpts().MicrosoftExt && Tok.is(tok::equal) &&
-        DeclaratorInfo.isFunctionDeclarator() && 
+        DeclaratorInfo.isFunctionDeclarator() &&
         NextToken().is(tok::numeric_constant)) {
       EqualLoc = ConsumeToken();
       Init = ParseInitializer();
@@ -2183,8 +2230,8 @@
         SkipUntil(tok::r_brace);
 
         // Consume the optional ';'
-        if (Tok.is(tok::semi))
-          ConsumeToken();
+        TryConsumeToken(tok::semi);
+
         return;
       }
 
@@ -2222,40 +2269,7 @@
   //   member-declarator
   //   member-declarator-list ',' member-declarator
 
-  SmallVector<Decl *, 8> DeclsInGroup;
-  ExprResult BitfieldSize;
-  bool ExpectSemi = true;
-
   while (1) {
-    // member-declarator:
-    //   declarator pure-specifier[opt]
-    //   declarator brace-or-equal-initializer[opt]
-    //   identifier[opt] ':' constant-expression
-    if (Tok.is(tok::colon)) {
-      ConsumeToken();
-      BitfieldSize = ParseConstantExpression();
-      if (BitfieldSize.isInvalid())
-        SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
-    }
-
-    // If a simple-asm-expr is present, parse it.
-    if (Tok.is(tok::kw_asm)) {
-      SourceLocation Loc;
-      ExprResult AsmLabel(ParseSimpleAsm(&Loc));
-      if (AsmLabel.isInvalid())
-        SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
- 
-      DeclaratorInfo.setAsmLabel(AsmLabel.release());
-      DeclaratorInfo.SetRangeEnd(Loc);
-    }
-
-    // If attributes exist after the declarator, parse them.
-    MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
-
-    // FIXME: When g++ adds support for this, we'll need to check whether it
-    // goes before or after the GNU attributes and __asm__.
-    ParseOptionalCXX11VirtSpecifierSeq(VS, getCurrentClass().IsInterface);
-
     InClassInitStyle HasInClassInit = ICIS_NoInit;
     if ((Tok.is(tok::equal) || Tok.is(tok::l_brace)) && !HasInitializer) {
       if (BitfieldSize.get()) {
@@ -2276,22 +2290,18 @@
 
     NamedDecl *ThisDecl = 0;
     if (DS.isFriendSpecified()) {
-      // C++11 [dcl.attr.grammar] p4: If an attribute-specifier-seq appertains 
+      // C++11 [dcl.attr.grammar] p4: If an attribute-specifier-seq appertains
       // to a friend declaration, that declaration shall be a definition.
       //
-      // Diagnose attributes appear after friend member function declarator:
-      // foo [[]] ();
+      // Diagnose attributes that appear in a friend member function declarator:
+      //   friend int foo [[]] ();
       SmallVector<SourceRange, 4> Ranges;
       DeclaratorInfo.getCXX11AttributeRanges(Ranges);
-      if (!Ranges.empty()) {
-        for (SmallVectorImpl<SourceRange>::iterator I = Ranges.begin(),
-             E = Ranges.end(); I != E; ++I) {
-          Diag((*I).getBegin(), diag::err_attributes_not_allowed) 
-            << *I;
-        }
-      }
+      for (SmallVectorImpl<SourceRange>::iterator I = Ranges.begin(),
+           E = Ranges.end(); I != E; ++I)
+        Diag((*I).getBegin(), diag::err_attributes_not_allowed) << *I;
 
-      // TODO: handle initializers, bitfields, 'delete'
+      // TODO: handle initializers, VS, bitfields, 'delete'
       ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
                                                  TemplateParams);
     } else {
@@ -2372,12 +2382,10 @@
 
     // If we don't have a comma, it is either the end of the list (a ';')
     // or an error, bail out.
-    if (Tok.isNot(tok::comma))
+    SourceLocation CommaLoc;
+    if (!TryConsumeToken(tok::comma, CommaLoc))
       break;
 
-    // Consume the comma.
-    SourceLocation CommaLoc = ConsumeToken();
-
     if (Tok.isAtStartOfLine() &&
         !MightBeDeclarator(Declarator::MemberContext)) {
       // This comma was followed by a line-break and something which can't be
@@ -2397,11 +2405,11 @@
     HasInitializer = false;
     DeclaratorInfo.setCommaLoc(CommaLoc);
 
-    // Attributes are only allowed on the second declarator.
+    // GNU attributes are allowed before the second and subsequent declarator.
     MaybeParseGNUAttributes(DeclaratorInfo);
 
-    if (Tok.isNot(tok::colon))
-      ParseDeclarator(DeclaratorInfo);
+    ParseCXXMemberDeclaratorBeforeInitializer(DeclaratorInfo, VS, BitfieldSize,
+                                              LateParsedAttrs);
   }
 
   if (ExpectSemi &&
@@ -2409,7 +2417,7 @@
     // Skip to end of block or statement.
     SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
     // If we stopped at a ';', eat it.
-    if (Tok.is(tok::semi)) ConsumeToken();
+    TryConsumeToken(tok::semi);
     return;
   }
 
@@ -2446,8 +2454,7 @@
   EnterExpressionEvaluationContext Context(Actions, 
                                            Sema::PotentiallyEvaluated,
                                            D);
-  if (Tok.is(tok::equal)) {
-    EqualLoc = ConsumeToken();
+  if (TryConsumeToken(tok::equal, EqualLoc)) {
     if (Tok.is(tok::kw_delete)) {
       // In principle, an initializer of '= delete p;' is legal, but it will
       // never type-check. It's better to diagnose it as an ill-formed expression
@@ -2456,7 +2463,7 @@
       // a top-level comma always ends the initializer expression.
       const Token &Next = NextToken();
       if (IsFunction || Next.is(tok::semi) || Next.is(tok::comma) ||
-           Next.is(tok::eof)) {
+          Next.is(tok::eof)) {
         if (IsFunction)
           Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
             << 1 /* delete */;
@@ -2510,7 +2517,7 @@
             << /*ErrorType=*/6
             << (isa<NamedDecl>(TagDecl)
                   ? cast<NamedDecl>(TagDecl)->getQualifiedNameAsString()
-                  : "<anonymous>");
+                  : "(anonymous)");
         }
         break;
       }
@@ -2601,7 +2608,7 @@
 
   if (TagDecl) {
     // While we still have something to read, read the member-declarations.
-    while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+    while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
       // Each iteration of this loop reads one member-declaration.
 
       if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
@@ -2636,6 +2643,11 @@
         continue;
       }
 
+      if (Tok.is(tok::annot_pragma_ms_pointers_to_members)) {
+        HandlePragmaMSPointersToMembers();
+        continue;
+      }
+
       // If we see a namespace here, a close brace was missing somewhere.
       if (Tok.is(tok::kw_namespace)) {
         DiagnoseUnexpectedNamespace(cast<NamedDecl>(TagDecl));
@@ -2653,18 +2665,14 @@
         MaybeParseGNUAttributes(AccessAttrs);
 
         SourceLocation EndLoc;
-        if (Tok.is(tok::colon)) {
-          EndLoc = Tok.getLocation();
-          ConsumeToken();
-        } else if (Tok.is(tok::semi)) {
-          EndLoc = Tok.getLocation();
-          ConsumeToken();
-          Diag(EndLoc, diag::err_expected_colon) 
-            << FixItHint::CreateReplacement(EndLoc, ":");
+        if (TryConsumeToken(tok::colon, EndLoc)) {
+        } else if (TryConsumeToken(tok::semi, EndLoc)) {
+          Diag(EndLoc, diag::err_expected)
+              << tok::colon << FixItHint::CreateReplacement(EndLoc, ":");
         } else {
           EndLoc = ASLoc.getLocWithOffset(TokLength);
-          Diag(EndLoc, diag::err_expected_colon) 
-            << FixItHint::CreateInsertion(EndLoc, ":");
+          Diag(EndLoc, diag::err_expected)
+              << tok::colon << FixItHint::CreateInsertion(EndLoc, ":");
         }
 
         // The Microsoft extension __interface does not permit non-public
@@ -2810,7 +2818,8 @@
         << FixItHint::CreateInsertion(Loc, ", ");
     } else {
       // Skip over garbage, until we get to '{'.  Don't eat the '{'.
-      Diag(Tok.getLocation(), diag::err_expected_lbrace_or_comma);
+      Diag(Tok.getLocation(), diag::err_expected_either) << tok::l_brace
+                                                         << tok::comma;
       SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch);
       break;
     }
@@ -2878,8 +2887,7 @@
       return true;
 
     SourceLocation EllipsisLoc;
-    if (Tok.is(tok::ellipsis))
-      EllipsisLoc = ConsumeToken();
+    TryConsumeToken(tok::ellipsis, EllipsisLoc);
 
     return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
                                        TemplateTypeTy, DS, IdLoc, 
@@ -2899,8 +2907,7 @@
     T.consumeClose();
 
     SourceLocation EllipsisLoc;
-    if (Tok.is(tok::ellipsis))
-      EllipsisLoc = ConsumeToken();
+    TryConsumeToken(tok::ellipsis, EllipsisLoc);
 
     return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
                                        TemplateTypeTy, DS, IdLoc,
@@ -2908,9 +2915,10 @@
                                        T.getCloseLocation(), EllipsisLoc);
   }
 
-  Diag(Tok, getLangOpts().CPlusPlus11 ? diag::err_expected_lparen_or_lbrace
-                                  : diag::err_expected_lparen);
-  return true;
+  if (getLangOpts().CPlusPlus11)
+    return Diag(Tok, diag::err_expected_either) << tok::l_paren << tok::l_brace;
+  else
+    return Diag(Tok, diag::err_expected) << tok::l_paren;
 }
 
 /// \brief Parse a C++ exception-specification if present (C++0x [except.spec]).
@@ -3054,10 +3062,8 @@
       Exceptions.push_back(Res.get());
       Ranges.push_back(Range);
     }
-    
-    if (Tok.is(tok::comma))
-      ConsumeToken();
-    else
+
+    if (!TryConsumeToken(tok::comma))
       break;
   }
 
@@ -3193,8 +3199,50 @@
   }
 }
 
-/// ParseCXX11AttributeSpecifier - Parse a C++11 attribute-specifier. Currently
-/// only parses standard attributes.
+/// ParseCXX11AttributeArgs -- Parse a C++11 attribute-argument-clause.
+///
+/// [C++11] attribute-argument-clause:
+///         '(' balanced-token-seq ')'
+///
+/// [C++11] balanced-token-seq:
+///         balanced-token
+///         balanced-token-seq balanced-token
+///
+/// [C++11] balanced-token:
+///         '(' balanced-token-seq ')'
+///         '[' balanced-token-seq ']'
+///         '{' balanced-token-seq '}'
+///         any token but '(', ')', '[', ']', '{', or '}'
+bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
+                                     SourceLocation AttrNameLoc,
+                                     ParsedAttributes &Attrs,
+                                     SourceLocation *EndLoc,
+                                     IdentifierInfo *ScopeName,
+                                     SourceLocation ScopeLoc) {
+  assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
+
+  // If the attribute isn't known, we will not attempt to parse any
+  // arguments.
+  if (!hasAttribute(AttrSyntax::CXX, ScopeName, AttrName,
+                    getTargetInfo().getTriple(), getLangOpts())) {
+    // Eat the left paren, then skip to the ending right paren.
+    ConsumeParen();
+    SkipUntil(tok::r_paren);
+    return false;
+  }
+
+  if (ScopeName && ScopeName->getName() == "gnu")
+    // GNU-scoped attributes have some special cases to handle GNU-specific
+    // behaviors.
+    ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+                          ScopeLoc, AttributeList::AS_CXX11, 0);
+  else
+    ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+                             ScopeLoc, AttributeList::AS_CXX11);
+  return true;
+}
+
+/// ParseCXX11AttributeSpecifier - Parse a C++11 attribute-specifier.
 ///
 /// [C++11] attribute-specifier:
 ///         '[' '[' attribute-list ']' ']'
@@ -3218,19 +3266,6 @@
 ///
 /// [C++11] attribute-namespace:
 ///         identifier
-///
-/// [C++11] attribute-argument-clause:
-///         '(' balanced-token-seq ')'
-///
-/// [C++11] balanced-token-seq:
-///         balanced-token
-///         balanced-token-seq balanced-token
-///
-/// [C++11] balanced-token:
-///         '(' balanced-token-seq ')'
-///         '[' balanced-token-seq ']'
-///         '{' balanced-token-seq '}'
-///         any token but '(', ')', '[', ']', '{', or '}'
 void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
                                           SourceLocation *endLoc) {
   if (Tok.is(tok::kw_alignas)) {
@@ -3251,10 +3286,8 @@
 
   while (Tok.isNot(tok::r_square)) {
     // attribute not present
-    if (Tok.is(tok::comma)) {
-      ConsumeToken();
+    if (TryConsumeToken(tok::comma))
       continue;
-    }
 
     SourceLocation ScopeLoc, AttrLoc;
     IdentifierInfo *ScopeName = 0, *AttrName = 0;
@@ -3265,43 +3298,34 @@
       break;
 
     // scoped attribute
-    if (Tok.is(tok::coloncolon)) {
-      ConsumeToken();
-
+    if (TryConsumeToken(tok::coloncolon)) {
       ScopeName = AttrName;
       ScopeLoc = AttrLoc;
 
       AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
       if (!AttrName) {
-        Diag(Tok.getLocation(), diag::err_expected_ident);
+        Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
         SkipUntil(tok::r_square, tok::comma, StopAtSemi | StopBeforeMatch);
         continue;
       }
     }
 
-    bool StandardAttr = IsBuiltInOrStandardCXX11Attribute(AttrName,ScopeName);
+    bool StandardAttr = IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName);
     bool AttrParsed = false;
 
     if (StandardAttr &&
         !SeenAttrs.insert(std::make_pair(AttrName, AttrLoc)).second)
       Diag(AttrLoc, diag::err_cxx11_attribute_repeated)
-        << AttrName << SourceRange(SeenAttrs[AttrName]);
+          << AttrName << SourceRange(SeenAttrs[AttrName]);
 
     // Parse attribute arguments
     if (Tok.is(tok::l_paren)) {
-      if (ScopeName && ScopeName->getName() == "gnu") {
-        ParseGNUAttributeArgs(AttrName, AttrLoc, attrs, endLoc,
-                              ScopeName, ScopeLoc, AttributeList::AS_CXX11);
-        AttrParsed = true;
-      } else {
-        if (StandardAttr)
-          Diag(Tok.getLocation(), diag::err_cxx11_attribute_forbids_arguments)
+      if (StandardAttr)
+        Diag(Tok.getLocation(), diag::err_cxx11_attribute_forbids_arguments)
             << AttrName->getName();
 
-        // FIXME: handle other formats of c++11 attribute arguments
-        ConsumeParen();
-        SkipUntil(tok::r_paren);
-      }
+      AttrParsed = ParseCXX11AttributeArgs(AttrName, AttrLoc, attrs, endLoc,
+                                           ScopeName, ScopeLoc);
     }
 
     if (!AttrParsed)
@@ -3310,19 +3334,16 @@
                                AttrLoc),
                    ScopeName, ScopeLoc, 0, 0, AttributeList::AS_CXX11);
 
-    if (Tok.is(tok::ellipsis)) {
-      ConsumeToken();
-
+    if (TryConsumeToken(tok::ellipsis))
       Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
         << AttrName->getName();
-    }
   }
 
-  if (ExpectAndConsume(tok::r_square, diag::err_expected_rsquare))
+  if (ExpectAndConsume(tok::r_square))
     SkipUntil(tok::r_square);
   if (endLoc)
     *endLoc = Tok.getLocation();
-  if (ExpectAndConsume(tok::r_square, diag::err_expected_rsquare))
+  if (ExpectAndConsume(tok::r_square))
     SkipUntil(tok::r_square);
 }
 
@@ -3393,7 +3414,7 @@
     ConsumeBracket();
     SkipUntil(tok::r_square, StopAtSemi | StopBeforeMatch);
     if (endLoc) *endLoc = Tok.getLocation();
-    ExpectAndConsume(tok::r_square, diag::err_expected_rsquare);
+    ExpectAndConsume(tok::r_square);
   }
 }
 
@@ -3405,7 +3426,7 @@
   
   BalancedDelimiterTracker Braces(*this, tok::l_brace);
   if (Braces.consumeOpen()) {
-    Diag(Tok, diag::err_expected_lbrace);
+    Diag(Tok, diag::err_expected) << tok::l_brace;
     return;
   }
 
@@ -3424,7 +3445,7 @@
     return;
   }
 
-  while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+  while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
     // __if_exists, __if_not_exists can nest.
     if ((Tok.is(tok::kw___if_exists) || Tok.is(tok::kw___if_not_exists))) {
       ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType, CurAS);
@@ -3446,7 +3467,7 @@
       if (Tok.is(tok::colon))
         Actions.ActOnAccessSpecifier(AS, ASLoc, Tok.getLocation());
       else
-        Diag(Tok, diag::err_expected_colon);
+        Diag(Tok, diag::err_expected) << tok::colon;
       ConsumeToken();
       continue;
     }
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index 45f1b1d..ed9d72d 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -23,6 +23,7 @@
 
 #include "clang/Parse/Parser.h"
 #include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
 #include "clang/Basic/PrettyStackTrace.h"
 #include "clang/Sema/DeclSpec.h"
 #include "clang/Sema/ParsedTemplate.h"
@@ -269,10 +270,7 @@
         Diag(Tok, diag::ext_gnu_conditional_expr);
       }
 
-      if (Tok.is(tok::colon)) {
-        // Eat the colon.
-        ColonLoc = ConsumeToken();
-      } else {
+      if (!TryConsumeToken(tok::colon, ColonLoc)) {
         // Otherwise, we're missing a ':'.  Assume that this was a typo that
         // the user forgot. If we're not in a macro expansion, we can suggest
         // a fixit hint. If there were two spaces before the current token,
@@ -294,10 +292,10 @@
             }
           }
         }
-        
-        Diag(Tok, diag::err_expected_colon)
-          << FixItHint::CreateInsertion(FILoc, FIText);
-        Diag(OpToken, diag::note_matching) << "?";
+
+        Diag(Tok, diag::err_expected)
+            << tok::colon << FixItHint::CreateInsertion(FILoc, FIText);
+        Diag(OpToken, diag::note_matching) << tok::question;
         ColonLoc = Tok.getLocation();
       }
     }
@@ -430,7 +428,7 @@
     WantTypeSpecifiers = AllowTypes;
   }
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     NamedDecl *ND = candidate.getCorrectionDecl();
     if (!ND)
       return candidate.isKeyword();
@@ -715,48 +713,11 @@
       // If this identifier was reverted from a token ID, and the next token
       // is a parenthesis, this is likely to be a use of a type trait. Check
       // those tokens.
-      if (Next.is(tok::l_paren) &&
-          Tok.is(tok::identifier) &&
-          Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier()) {
-        IdentifierInfo *II = Tok.getIdentifierInfo();
-        // Build up the mapping of revertable type traits, for future use.
-        if (RevertableTypeTraits.empty()) {
-#define RTT_JOIN(X,Y) X##Y
-#define REVERTABLE_TYPE_TRAIT(Name)                         \
-          RevertableTypeTraits[PP.getIdentifierInfo(#Name)] \
-            = RTT_JOIN(tok::kw_,Name)
-
-          REVERTABLE_TYPE_TRAIT(__is_arithmetic);
-          REVERTABLE_TYPE_TRAIT(__is_convertible);
-          REVERTABLE_TYPE_TRAIT(__is_empty);
-          REVERTABLE_TYPE_TRAIT(__is_floating_point);
-          REVERTABLE_TYPE_TRAIT(__is_function);
-          REVERTABLE_TYPE_TRAIT(__is_fundamental);
-          REVERTABLE_TYPE_TRAIT(__is_integral);
-          REVERTABLE_TYPE_TRAIT(__is_member_function_pointer);
-          REVERTABLE_TYPE_TRAIT(__is_member_pointer);
-          REVERTABLE_TYPE_TRAIT(__is_pod);
-          REVERTABLE_TYPE_TRAIT(__is_pointer);
-          REVERTABLE_TYPE_TRAIT(__is_same);
-          REVERTABLE_TYPE_TRAIT(__is_scalar);
-          REVERTABLE_TYPE_TRAIT(__is_signed);
-          REVERTABLE_TYPE_TRAIT(__is_unsigned);
-          REVERTABLE_TYPE_TRAIT(__is_void);
-#undef REVERTABLE_TYPE_TRAIT
-#undef RTT_JOIN
-        }
-
-        // If we find that this is in fact the name of a type trait,
-        // update the token kind in place and parse again to treat it as
-        // the appropriate kind of type trait.
-        llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind>::iterator Known
-          = RevertableTypeTraits.find(II);
-        if (Known != RevertableTypeTraits.end()) {
-          Tok.setKind(Known->second);
-          return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
-                                     NotCastExpr, isTypeCast);
-        }
-      }
+      if (Next.is(tok::l_paren) && Tok.is(tok::identifier) &&
+          Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier() &&
+          TryIdentKeywordUpgrade())
+        return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+                                   NotCastExpr, isTypeCast);
 
       if (Next.is(tok::coloncolon) ||
           (!ColonIsSacred && Next.is(tok::colon)) ||
@@ -830,7 +791,8 @@
             DS.SetRangeEnd(ILoc);
             const char *PrevSpec = 0;
             unsigned DiagID;
-            DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ);
+            DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ,
+                               Actions.getASTContext().getPrintingPolicy());
             
             Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
             TypeResult Ty = Actions.ActOnTypeName(getCurScope(), 
@@ -955,7 +917,7 @@
   case tok::ampamp: {      // unary-expression: '&&' identifier
     SourceLocation AmpAmpLoc = ConsumeToken();
     if (Tok.isNot(tok::identifier))
-      return ExprError(Diag(Tok, diag::err_expected_ident));
+      return ExprError(Diag(Tok, diag::err_expected) << tok::identifier);
 
     if (getCurScope()->getFnParent() == 0)
       return ExprError(Diag(Tok, diag::err_address_of_label_outside_fn));
@@ -995,7 +957,8 @@
       const char *PrevSpec = 0;
       unsigned DiagID;
       DS.SetTypeSpecType(TST_typename, Tok.getAnnotationEndLoc(),
-                         PrevSpec, DiagID, Type);
+                         PrevSpec, DiagID, Type,
+                         Actions.getASTContext().getPrintingPolicy());
 
       Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
       TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
@@ -1028,15 +991,7 @@
   case tok::kw_void:
   case tok::kw_typename:
   case tok::kw_typeof:
-  case tok::kw___vector:
-  case tok::kw_image1d_t:
-  case tok::kw_image1d_array_t:
-  case tok::kw_image1d_buffer_t:
-  case tok::kw_image2d_t:
-  case tok::kw_image2d_array_t:
-  case tok::kw_image3d_t:
-  case tok::kw_sampler_t:
-  case tok::kw_event_t: {
+  case tok::kw___vector: {
     if (!getLangOpts().CPlusPlus) {
       Diag(Tok, diag::err_expected_expression);
       return ExprError();
@@ -1168,65 +1123,9 @@
     return Result;
   }
 
-  case tok::kw___is_abstract: // [GNU] unary-type-trait
-  case tok::kw___is_class:
-  case tok::kw___is_empty:
-  case tok::kw___is_enum:
-  case tok::kw___is_interface_class:
-  case tok::kw___is_literal:
-  case tok::kw___is_arithmetic:
-  case tok::kw___is_integral:
-  case tok::kw___is_floating_point:
-  case tok::kw___is_complete_type:
-  case tok::kw___is_void:
-  case tok::kw___is_array:
-  case tok::kw___is_function:
-  case tok::kw___is_reference:
-  case tok::kw___is_lvalue_reference:
-  case tok::kw___is_rvalue_reference:
-  case tok::kw___is_fundamental:
-  case tok::kw___is_object:
-  case tok::kw___is_scalar:
-  case tok::kw___is_compound:
-  case tok::kw___is_pointer:
-  case tok::kw___is_member_object_pointer:
-  case tok::kw___is_member_function_pointer:
-  case tok::kw___is_member_pointer:
-  case tok::kw___is_const:
-  case tok::kw___is_volatile:
-  case tok::kw___is_standard_layout:
-  case tok::kw___is_signed:
-  case tok::kw___is_unsigned:
-  case tok::kw___is_literal_type:
-  case tok::kw___is_pod:
-  case tok::kw___is_polymorphic:
-  case tok::kw___is_trivial:
-  case tok::kw___is_trivially_copyable:
-  case tok::kw___is_union:
-  case tok::kw___is_final:
-  case tok::kw___is_sealed:
-  case tok::kw___has_trivial_constructor:
-  case tok::kw___has_trivial_move_constructor:
-  case tok::kw___has_trivial_copy:
-  case tok::kw___has_trivial_assign:
-  case tok::kw___has_trivial_move_assign:
-  case tok::kw___has_trivial_destructor:
-  case tok::kw___has_nothrow_assign:
-  case tok::kw___has_nothrow_move_assign:
-  case tok::kw___has_nothrow_copy:
-  case tok::kw___has_nothrow_constructor:
-  case tok::kw___has_virtual_destructor:
-    return ParseUnaryTypeTrait();
-
-  case tok::kw___builtin_types_compatible_p:
-  case tok::kw___is_base_of:
-  case tok::kw___is_same:
-  case tok::kw___is_convertible:
-  case tok::kw___is_convertible_to:
-  case tok::kw___is_trivially_assignable:
-    return ParseBinaryTypeTrait();
-
-  case tok::kw___is_trivially_constructible:
+#define TYPE_TRAIT(N,Spelling,K) \
+  case tok::kw_##Spelling:
+#include "clang/Basic/TokenKinds.def"
     return ParseTypeTrait();
       
   case tok::kw___array_rank:
@@ -1387,21 +1286,20 @@
           LHS = ExprError();
         }
 
-        SourceLocation CloseLoc = Tok.getLocation();
-        if (Tok.is(tok::greatergreatergreater)) {
-          ConsumeToken();
+        SourceLocation CloseLoc;
+        if (TryConsumeToken(tok::greatergreatergreater, CloseLoc)) {
         } else if (LHS.isInvalid()) {
           SkipUntil(tok::greatergreatergreater, StopAtSemi);
         } else {
           // There was an error closing the brackets
-          Diag(Tok, diag::err_expected_ggg);
-          Diag(OpenLoc, diag::note_matching) << "<<<";
+          Diag(Tok, diag::err_expected) << tok::greatergreatergreater;
+          Diag(OpenLoc, diag::note_matching) << tok::lesslessless;
           SkipUntil(tok::greatergreatergreater, StopAtSemi);
           LHS = ExprError();
         }
 
         if (!LHS.isInvalid()) {
-          if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen, ""))
+          if (ExpectAndConsume(tok::l_paren))
             LHS = ExprError();
           else
             Loc = PrevTokLocation;
@@ -1475,8 +1373,8 @@
             (BaseType->isFunctionType() ||
              BaseType->isSpecificPlaceholderType(BuiltinType::BoundMember))) {
           Diag(OpLoc, diag::err_function_is_not_record)
-            << (OpKind == tok::arrow) << Base->getSourceRange()
-            << FixItHint::CreateRemoval(OpLoc);
+              << OpKind << Base->getSourceRange()
+              << FixItHint::CreateRemoval(OpLoc);
           return ParsePostfixExpressionSuffix(Base);
         }
 
@@ -1597,8 +1495,7 @@
     // pathenthesis around type name.
     if (OpTok.is(tok::kw_sizeof)  || OpTok.is(tok::kw___alignof) ||
         OpTok.is(tok::kw_alignof) || OpTok.is(tok::kw__Alignof)) {
-      bool isAmbiguousTypeId;
-      if (isTypeIdInParens(isAmbiguousTypeId)) {
+      if (isTypeIdUnambiguously()) {
         DeclSpec DS(AttrFactory);
         ParseSpecifierQualifierList(DS);
         Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
@@ -1617,7 +1514,8 @@
 
     isCastExpr = false;
     if (OpTok.is(tok::kw_typeof) && !getLangOpts().CPlusPlus) {
-      Diag(Tok,diag::err_expected_lparen_after_id) << OpTok.getIdentifierInfo();
+      Diag(Tok, diag::err_expected_after) << OpTok.getIdentifierInfo()
+                                          << tok::l_paren;
       return ExprError();
     }
 
@@ -1789,8 +1687,8 @@
 
   // All of these start with an open paren.
   if (Tok.isNot(tok::l_paren))
-    return ExprError(Diag(Tok, diag::err_expected_lparen_after_id)
-                       << BuiltinII);
+    return ExprError(Diag(Tok, diag::err_expected_after) << BuiltinII
+                                                         << tok::l_paren);
 
   BalancedDelimiterTracker PT(*this, tok::l_paren);
   PT.consumeOpen();
@@ -1802,13 +1700,15 @@
   case tok::kw___builtin_va_arg: {
     ExprResult Expr(ParseAssignmentExpression());
 
-    if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+    if (ExpectAndConsume(tok::comma)) {
+      SkipUntil(tok::r_paren, StopAtSemi);
       Expr = ExprError();
+    }
 
     TypeResult Ty = ParseTypeName();
 
     if (Tok.isNot(tok::r_paren)) {
-      Diag(Tok, diag::err_expected_rparen);
+      Diag(Tok, diag::err_expected) << tok::r_paren;
       Expr = ExprError();
     }
 
@@ -1826,12 +1726,14 @@
       return ExprError();
     }
 
-    if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+    if (ExpectAndConsume(tok::comma)) {
+      SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
+    }
 
     // We must have at least one identifier here.
     if (Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident);
+      Diag(Tok, diag::err_expected) << tok::identifier;
       SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
     }
@@ -1853,7 +1755,7 @@
         Comps.back().LocStart = ConsumeToken();
 
         if (Tok.isNot(tok::identifier)) {
-          Diag(Tok, diag::err_expected_ident);
+          Diag(Tok, diag::err_expected) << tok::identifier;
           SkipUntil(tok::r_paren, StopAtSemi);
           return ExprError();
         }
@@ -1902,16 +1804,20 @@
       SkipUntil(tok::r_paren, StopAtSemi);
       return Cond;
     }
-    if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+    if (ExpectAndConsume(tok::comma)) {
+      SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
+    }
 
     ExprResult Expr1(ParseAssignmentExpression());
     if (Expr1.isInvalid()) {
       SkipUntil(tok::r_paren, StopAtSemi);
       return Expr1;
     }
-    if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+    if (ExpectAndConsume(tok::comma)) {
+      SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
+    }
 
     ExprResult Expr2(ParseAssignmentExpression());
     if (Expr2.isInvalid()) {
@@ -1919,7 +1825,7 @@
       return Expr2;
     }
     if (Tok.isNot(tok::r_paren)) {
-      Diag(Tok, diag::err_expected_rparen);
+      Diag(Tok, diag::err_expected) << tok::r_paren;
       return ExprError();
     }
     Res = Actions.ActOnChooseExpr(StartLoc, Cond.take(), Expr1.take(),
@@ -1933,11 +1839,12 @@
       SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
     }
-    
-    if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "", 
-                         tok::r_paren))
+
+    if (ExpectAndConsume(tok::comma)) {
+      SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
-    
+    }
+
     // Second argument is the type to bitcast to.
     TypeResult DestTy = ParseTypeName();
     if (DestTy.isInvalid())
@@ -1945,7 +1852,7 @@
     
     // Attempt to consume the r-paren.
     if (Tok.isNot(tok::r_paren)) {
-      Diag(Tok, diag::err_expected_rparen);
+      Diag(Tok, diag::err_expected) << tok::r_paren;
       SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
     }
@@ -1961,11 +1868,12 @@
       SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
     }
-    
-    if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "", 
-                         tok::r_paren))
+
+    if (ExpectAndConsume(tok::comma)) {
+      SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
-    
+    }
+
     // Second argument is the type to bitcast to.
     TypeResult DestTy = ParseTypeName();
     if (DestTy.isInvalid())
@@ -1973,7 +1881,7 @@
     
     // Attempt to consume the r-paren.
     if (Tok.isNot(tok::r_paren)) {
-      Diag(Tok, diag::err_expected_rparen);
+      Diag(Tok, diag::err_expected) << tok::r_paren;
       SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
     }
@@ -2042,7 +1950,7 @@
                       Tok.is(tok::kw___bridge_retained) ||
                       Tok.is(tok::kw___bridge_retain)));
   if (BridgeCast && !getLangOpts().ObjCAutoRefCount) {
-    if (Tok.isNot(tok::kw___bridge)) {
+    if (!TryConsumeToken(tok::kw___bridge)) {
       StringRef BridgeCastName = Tok.getName();
       SourceLocation BridgeKeywordLoc = ConsumeToken();
       if (!PP.getSourceManager().isInSystemHeader(BridgeKeywordLoc))
@@ -2050,8 +1958,6 @@
           << BridgeCastName
           << FixItHint::CreateReplacement(BridgeKeywordLoc, "");
     }
-    else
-      ConsumeToken(); // consume __bridge
     BridgeCast = false;
   }
   
@@ -2299,7 +2205,7 @@
     Diag(KeyLoc, diag::ext_c11_generic_selection);
 
   BalancedDelimiterTracker T(*this, tok::l_paren);
-  if (T.expectAndConsume(diag::err_expected_lparen))
+  if (T.expectAndConsume())
     return ExprError();
 
   ExprResult ControllingExpr;
@@ -2314,7 +2220,7 @@
     }
   }
 
-  if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "")) {
+  if (ExpectAndConsume(tok::comma)) {
     SkipUntil(tok::r_paren, StopAtSemi);
     return ExprError();
   }
@@ -2322,7 +2228,7 @@
   SourceLocation DefaultLoc;
   TypeVector Types;
   ExprVector Exprs;
-  while (1) {
+  do {
     ParsedType Ty;
     if (Tok.is(tok::kw_default)) {
       // C11 6.5.1.1p2 "A generic selection shall have no more than one default
@@ -2346,7 +2252,7 @@
     }
     Types.push_back(Ty);
 
-    if (ExpectAndConsume(tok::colon, diag::err_expected_colon, "")) {
+    if (ExpectAndConsume(tok::colon)) {
       SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
     }
@@ -2359,11 +2265,7 @@
       return ExprError();
     }
     Exprs.push_back(ER.release());
-
-    if (Tok.isNot(tok::comma))
-      break;
-    ConsumeToken();
-  }
+  } while (TryConsumeToken(tok::comma));
 
   T.consumeClose();
   if (T.getCloseLocation().isInvalid())
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index 5fe47fc..f10ca6a 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -10,12 +10,13 @@
 // This file implements the Expression parsing implementation for C++.
 //
 //===----------------------------------------------------------------------===//
-#include "clang/AST/DeclTemplate.h"
-#include "clang/Parse/Parser.h"
+#include "clang/AST/ASTContext.h"
 #include "RAIIObjectsForParser.h"
+#include "clang/AST/DeclTemplate.h"
 #include "clang/Basic/PrettyStackTrace.h"
 #include "clang/Lex/LiteralSupport.h"
 #include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
 #include "clang/Sema/DeclSpec.h"
 #include "clang/Sema/ParsedTemplate.h"
 #include "clang/Sema/Scope.h"
@@ -26,7 +27,9 @@
 
 static int SelectDigraphErrorMessage(tok::TokenKind Kind) {
   switch (Kind) {
-    case tok::kw_template:         return 0;
+    // template name
+    case tok::unknown:             return 0;
+    // casts
     case tok::kw_const_cast:       return 1;
     case tok::kw_dynamic_cast:     return 2;
     case tok::kw_reinterpret_cast: return 3;
@@ -93,7 +96,7 @@
                               Template, MemberOfUnknownSpecialization))
     return;
 
-  FixDigraph(*this, PP, Next, SecondToken, tok::kw_template,
+  FixDigraph(*this, PP, Next, SecondToken, tok::unknown,
              /*AtDigraph*/false);
 }
 
@@ -233,12 +236,13 @@
     DeclSpec DS(AttrFactory);
     SourceLocation DeclLoc = Tok.getLocation();
     SourceLocation EndLoc  = ParseDecltypeSpecifier(DS);
-    if (Tok.isNot(tok::coloncolon)) {
+
+    SourceLocation CCLoc;
+    if (!TryConsumeToken(tok::coloncolon, CCLoc)) {
       AnnotateExistingDecltypeSpecifier(DS, DeclLoc, EndLoc);
       return false;
     }
-    
-    SourceLocation CCLoc = ConsumeToken();
+
     if (Actions.ActOnCXXNestedNameSpecifierDecltype(SS, DS, CCLoc))
       SS.SetInvalid(SourceRange(DeclLoc, CCLoc));
 
@@ -287,19 +291,23 @@
 
       TentativeParsingAction TPA(*this);
       SourceLocation TemplateKWLoc = ConsumeToken();
-      
+
       UnqualifiedId TemplateName;
       if (Tok.is(tok::identifier)) {
         // Consume the identifier.
         TemplateName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
         ConsumeToken();
       } else if (Tok.is(tok::kw_operator)) {
-        if (ParseUnqualifiedIdOperator(SS, EnteringContext, ObjectType, 
+        // We don't need to actually parse the unqualified-id in this case,
+        // because a simple-template-id cannot start with 'operator', but
+        // go ahead and parse it anyway for consistency with the case where
+        // we already annotated the template-id.
+        if (ParseUnqualifiedIdOperator(SS, EnteringContext, ObjectType,
                                        TemplateName)) {
           TPA.Commit();
           break;
         }
-        
+
         if (TemplateName.getKind() != UnqualifiedId::IK_OperatorFunctionId &&
             TemplateName.getKind() != UnqualifiedId::IK_LiteralOperatorId) {
           Diag(TemplateName.getSourceRange().getBegin(),
@@ -341,10 +349,10 @@
     if (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) {
       // We have
       //
-      //   simple-template-id '::'
+      //   template-id '::'
       //
-      // So we need to check whether the simple-template-id is of the
-      // right kind (it should name a type or be dependent), and then
+      // So we need to check whether the template-id is a simple-template-id of
+      // the right kind (it should name a type or be dependent), and then
       // convert it into a type within the nested-name-specifier.
       TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
       if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde)) {
@@ -814,8 +822,7 @@
         // to save the necessary state, and restore it later.
         EnterExpressionEvaluationContext EC(Actions,
                                             Sema::PotentiallyEvaluated);
-        if (Tok.is(tok::equal))
-          ConsumeToken();
+        TryConsumeToken(tok::equal);
 
         if (!SkippedInits)
           Init = ParseInitializer();
@@ -864,8 +871,8 @@
             ConsumeToken();
           }
         }
-      } else if (Tok.is(tok::ellipsis))
-        EllipsisLoc = ConsumeToken();
+      } else
+        TryConsumeToken(tok::ellipsis, EllipsisLoc);
     }
     // If this is an init capture, process the initialization expression
     // right away.  For lambda init-captures such as the following:
@@ -988,7 +995,6 @@
     ParsedAttributes Attr(AttrFactory);
     SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
     SourceLocation EllipsisLoc;
-
     
     if (Tok.isNot(tok::r_paren)) {
       Actions.RecordParsingTemplateParameterDepth(TemplateParameterDepth);
@@ -1002,12 +1008,14 @@
     SourceLocation RParenLoc = T.getCloseLocation();
     DeclEndLoc = RParenLoc;
 
+    // GNU-style attributes must be parsed before the mutable specifier to be
+    // compatible with GCC.
+    MaybeParseGNUAttributes(Attr, &DeclEndLoc);
+
     // Parse 'mutable'[opt].
     SourceLocation MutableLoc;
-    if (Tok.is(tok::kw_mutable)) {
-      MutableLoc = ConsumeToken();
+    if (TryConsumeToken(tok::kw_mutable, MutableLoc))
       DeclEndLoc = MutableLoc;
-    }
 
     // Parse exception-specification[opt].
     ExceptionSpecificationType ESpecType = EST_None;
@@ -1061,22 +1069,41 @@
                                            LParenLoc, FunLocalRangeEnd, D,
                                            TrailingReturnType),
                   Attr, DeclEndLoc);
-  } else if (Tok.is(tok::kw_mutable) || Tok.is(tok::arrow)) {
-    // It's common to forget that one needs '()' before 'mutable' or the 
-    // result type. Deal with this.
+  } else if (Tok.is(tok::kw_mutable) || Tok.is(tok::arrow) ||
+             Tok.is(tok::kw___attribute) ||
+             (Tok.is(tok::l_square) && NextToken().is(tok::l_square))) {
+    // It's common to forget that one needs '()' before 'mutable', an attribute
+    // specifier, or the result type. Deal with this.
+    unsigned TokKind = 0;
+    switch (Tok.getKind()) {
+    case tok::kw_mutable: TokKind = 0; break;
+    case tok::arrow: TokKind = 1; break;
+    case tok::kw___attribute:
+    case tok::l_square: TokKind = 2; break;
+    default: llvm_unreachable("Unknown token kind");
+    }
+
     Diag(Tok, diag::err_lambda_missing_parens)
-      << Tok.is(tok::arrow)
+      << TokKind
       << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
     SourceLocation DeclLoc = Tok.getLocation();
     SourceLocation DeclEndLoc = DeclLoc;
-    
+
+    // GNU-style attributes must be parsed before the mutable specifier to be
+    // compatible with GCC.
+    ParsedAttributes Attr(AttrFactory);
+    MaybeParseGNUAttributes(Attr, &DeclEndLoc);
+
     // Parse 'mutable', if it's there.
     SourceLocation MutableLoc;
     if (Tok.is(tok::kw_mutable)) {
       MutableLoc = ConsumeToken();
       DeclEndLoc = MutableLoc;
     }
-    
+
+    // Parse attribute-specifier[opt].
+    MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
+
     // Parse the return type, if there is one.
     TypeResult TrailingReturnType;
     if (Tok.is(tok::arrow)) {
@@ -1086,7 +1113,6 @@
         DeclEndLoc = Range.getEnd();      
     }
 
-    ParsedAttributes Attr(AttrFactory);
     SourceLocation NoLoc;
     D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
                                                /*isAmbiguous=*/false,
@@ -1182,8 +1208,8 @@
 
   SourceLocation RAngleBracketLoc = Tok.getLocation();
 
-  if (ExpectAndConsume(tok::greater, diag::err_expected_greater))
-    return ExprError(Diag(LAngleBracketLoc, diag::note_matching) << "<");
+  if (ExpectAndConsume(tok::greater))
+    return ExprError(Diag(LAngleBracketLoc, diag::note_matching) << tok::less);
 
   SourceLocation LParenLoc, RParenLoc;
   BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -1667,6 +1693,8 @@
   const char *PrevSpec;
   unsigned DiagID;
   SourceLocation Loc = Tok.getLocation();
+  const clang::PrintingPolicy &Policy =
+      Actions.getASTContext().getPrintingPolicy();
 
   switch (Tok.getKind()) {
   case tok::identifier:   // foo::bar
@@ -1679,7 +1707,7 @@
   case tok::annot_typename: {
     if (getTypeAnnotation(Tok))
       DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
-                         getTypeAnnotation(Tok));
+                         getTypeAnnotation(Tok), Policy);
     else
       DS.SetTypeSpecError();
     
@@ -1693,19 +1721,19 @@
     if (Tok.is(tok::less) && getLangOpts().ObjC1)
       ParseObjCProtocolQualifiers(DS);
     
-    DS.Finish(Diags, PP);
+    DS.Finish(Diags, PP, Policy);
     return;
   }
 
   // builtin types
   case tok::kw_short:
-    DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_long:
-    DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw___int64:
-    DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_signed:
     DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec, DiagID);
@@ -1714,47 +1742,47 @@
     DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec, DiagID);
     break;
   case tok::kw_void:
-    DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_char:
-    DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_int:
-    DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw___int128:
-    DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_half:
-    DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_float:
-    DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_double:
-    DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_wchar_t:
-    DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_char16_t:
-    DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_char32_t:
-    DS.SetTypeSpecType(DeclSpec::TST_char32, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_char32, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::kw_bool:
-    DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec, DiagID);
+    DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec, DiagID, Policy);
     break;
   case tok::annot_decltype:
   case tok::kw_decltype:
     DS.SetRangeEnd(ParseDecltypeSpecifier(DS));
-    return DS.Finish(Diags, PP);
+    return DS.Finish(Diags, PP, Policy);
 
   // GNU typeof support.
   case tok::kw_typeof:
     ParseTypeofSpecifier(DS);
-    DS.Finish(Diags, PP);
+    DS.Finish(Diags, PP, Policy);
     return;
   }
   if (Tok.is(tok::annot_typename))
@@ -1762,7 +1790,7 @@
   else
     DS.SetRangeEnd(Tok.getLocation());
   ConsumeToken();
-  DS.Finish(Diags, PP);
+  DS.Finish(Diags, PP, Policy);
 }
 
 /// ParseCXXTypeSpecifierSeq - Parse a C++ type-specifier-seq (C++
@@ -1778,7 +1806,7 @@
 ///
 bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
   ParseSpecifierQualifierList(DS, AS_none, DSC_type_specifier);
-  DS.Finish(Diags, PP);
+  DS.Finish(Diags, PP, Actions.getASTContext().getPrintingPolicy());
   return false;
 }
 
@@ -1935,6 +1963,7 @@
     TemplateIdAnnotation *TemplateId
       = TemplateIdAnnotation::Allocate(TemplateArgs.size(), TemplateIds);
 
+    // FIXME: Store name for literal operator too.
     if (Id.getKind() == UnqualifiedId::IK_Identifier) {
       TemplateId->Name = Id.Identifier;
       TemplateId->Operator = OO_None;
@@ -2158,7 +2187,7 @@
       SuffixLoc = ConsumeToken();
       TokLocs.push_back(SuffixLoc);
     } else {
-      Diag(Tok.getLocation(), diag::err_expected_ident);
+      Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
       return true;
     }
 
@@ -2183,9 +2212,10 @@
     }
 
     Result.setLiteralOperatorId(II, KeywordLoc, SuffixLoc);
-    return false;
+
+    return Actions.checkLiteralOperatorId(SS, Result);
   }
-  
+
   // Parse a conversion-function-id.
   //
   //   conversion-function-id: [C++ 12.3.2]
@@ -2678,79 +2708,17 @@
   return Actions.ActOnCXXDelete(Start, UseGlobal, ArrayDelete, Operand.take());
 }
 
-static UnaryTypeTrait UnaryTypeTraitFromTokKind(tok::TokenKind kind) {
-  switch(kind) {
-  default: llvm_unreachable("Not a known unary type trait.");
-  case tok::kw___has_nothrow_assign:      return UTT_HasNothrowAssign;
-  case tok::kw___has_nothrow_move_assign: return UTT_HasNothrowMoveAssign;
-  case tok::kw___has_nothrow_constructor: return UTT_HasNothrowConstructor;
-  case tok::kw___has_nothrow_copy:           return UTT_HasNothrowCopy;
-  case tok::kw___has_trivial_assign:      return UTT_HasTrivialAssign;
-  case tok::kw___has_trivial_move_assign: return UTT_HasTrivialMoveAssign;
-  case tok::kw___has_trivial_constructor:
-                                    return UTT_HasTrivialDefaultConstructor;
-  case tok::kw___has_trivial_move_constructor:
-                                    return UTT_HasTrivialMoveConstructor;
-  case tok::kw___has_trivial_copy:           return UTT_HasTrivialCopy;
-  case tok::kw___has_trivial_destructor:  return UTT_HasTrivialDestructor;
-  case tok::kw___has_virtual_destructor:  return UTT_HasVirtualDestructor;
-  case tok::kw___is_abstract:             return UTT_IsAbstract;
-  case tok::kw___is_arithmetic:              return UTT_IsArithmetic;
-  case tok::kw___is_array:                   return UTT_IsArray;
-  case tok::kw___is_class:                return UTT_IsClass;
-  case tok::kw___is_complete_type:           return UTT_IsCompleteType;
-  case tok::kw___is_compound:                return UTT_IsCompound;
-  case tok::kw___is_const:                   return UTT_IsConst;
-  case tok::kw___is_empty:                return UTT_IsEmpty;
-  case tok::kw___is_enum:                 return UTT_IsEnum;
-  case tok::kw___is_final:                 return UTT_IsFinal;
-  case tok::kw___is_floating_point:          return UTT_IsFloatingPoint;
-  case tok::kw___is_function:                return UTT_IsFunction;
-  case tok::kw___is_fundamental:             return UTT_IsFundamental;
-  case tok::kw___is_integral:                return UTT_IsIntegral;
-  case tok::kw___is_interface_class:         return UTT_IsInterfaceClass;
-  case tok::kw___is_lvalue_reference:        return UTT_IsLvalueReference;
-  case tok::kw___is_member_function_pointer: return UTT_IsMemberFunctionPointer;
-  case tok::kw___is_member_object_pointer:   return UTT_IsMemberObjectPointer;
-  case tok::kw___is_member_pointer:          return UTT_IsMemberPointer;
-  case tok::kw___is_object:                  return UTT_IsObject;
-  case tok::kw___is_literal:              return UTT_IsLiteral;
-  case tok::kw___is_literal_type:         return UTT_IsLiteral;
-  case tok::kw___is_pod:                  return UTT_IsPOD;
-  case tok::kw___is_pointer:                 return UTT_IsPointer;
-  case tok::kw___is_polymorphic:          return UTT_IsPolymorphic;
-  case tok::kw___is_reference:               return UTT_IsReference;
-  case tok::kw___is_rvalue_reference:        return UTT_IsRvalueReference;
-  case tok::kw___is_scalar:                  return UTT_IsScalar;
-  case tok::kw___is_sealed:                  return UTT_IsSealed;
-  case tok::kw___is_signed:                  return UTT_IsSigned;
-  case tok::kw___is_standard_layout:         return UTT_IsStandardLayout;
-  case tok::kw___is_trivial:                 return UTT_IsTrivial;
-  case tok::kw___is_trivially_copyable:      return UTT_IsTriviallyCopyable;
-  case tok::kw___is_union:                return UTT_IsUnion;
-  case tok::kw___is_unsigned:                return UTT_IsUnsigned;
-  case tok::kw___is_void:                    return UTT_IsVoid;
-  case tok::kw___is_volatile:                return UTT_IsVolatile;
-  }
-}
-
-static BinaryTypeTrait BinaryTypeTraitFromTokKind(tok::TokenKind kind) {
-  switch(kind) {
-  default: llvm_unreachable("Not a known binary type trait");
-  case tok::kw___is_base_of:                 return BTT_IsBaseOf;
-  case tok::kw___is_convertible:             return BTT_IsConvertible;
-  case tok::kw___is_same:                    return BTT_IsSame;
-  case tok::kw___builtin_types_compatible_p: return BTT_TypeCompatible;
-  case tok::kw___is_convertible_to:          return BTT_IsConvertibleTo;
-  case tok::kw___is_trivially_assignable:    return BTT_IsTriviallyAssignable;
-  }
-}
-
 static TypeTrait TypeTraitFromTokKind(tok::TokenKind kind) {
   switch (kind) {
   default: llvm_unreachable("Not a known type trait");
-  case tok::kw___is_trivially_constructible: 
-    return TT_IsTriviallyConstructible;
+#define TYPE_TRAIT_1(Spelling, Name, Key) \
+case tok::kw_ ## Spelling: return UTT_ ## Name;
+#define TYPE_TRAIT_2(Spelling, Name, Key) \
+case tok::kw_ ## Spelling: return BTT_ ## Name;
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_N(Spelling, Name, Key) \
+  case tok::kw_ ## Spelling: return TT_ ## Name;
+#include "clang/Basic/TokenKinds.def"
   }
 }
 
@@ -2770,87 +2738,33 @@
   }
 }
 
-/// ParseUnaryTypeTrait - Parse the built-in unary type-trait
-/// pseudo-functions that allow implementation of the TR1/C++0x type traits
-/// templates.
-///
-///       primary-expression:
-/// [GNU]             unary-type-trait '(' type-id ')'
-///
-ExprResult Parser::ParseUnaryTypeTrait() {
-  UnaryTypeTrait UTT = UnaryTypeTraitFromTokKind(Tok.getKind());
-  SourceLocation Loc = ConsumeToken();
-
-  BalancedDelimiterTracker T(*this, tok::l_paren);
-  if (T.expectAndConsume(diag::err_expected_lparen))
-    return ExprError();
-
-  // FIXME: Error reporting absolutely sucks! If the this fails to parse a type
-  // there will be cryptic errors about mismatched parentheses and missing
-  // specifiers.
-  TypeResult Ty = ParseTypeName();
-
-  T.consumeClose();
-
-  if (Ty.isInvalid())
-    return ExprError();
-
-  return Actions.ActOnUnaryTypeTrait(UTT, Loc, Ty.get(), T.getCloseLocation());
-}
-
-/// ParseBinaryTypeTrait - Parse the built-in binary type-trait
-/// pseudo-functions that allow implementation of the TR1/C++0x type traits
-/// templates.
-///
-///       primary-expression:
-/// [GNU]             binary-type-trait '(' type-id ',' type-id ')'
-///
-ExprResult Parser::ParseBinaryTypeTrait() {
-  BinaryTypeTrait BTT = BinaryTypeTraitFromTokKind(Tok.getKind());
-  SourceLocation Loc = ConsumeToken();
-
-  BalancedDelimiterTracker T(*this, tok::l_paren);
-  if (T.expectAndConsume(diag::err_expected_lparen))
-    return ExprError();
-
-  TypeResult LhsTy = ParseTypeName();
-  if (LhsTy.isInvalid()) {
-    SkipUntil(tok::r_paren, StopAtSemi);
-    return ExprError();
+static unsigned TypeTraitArity(tok::TokenKind kind) {
+  switch (kind) {
+    default: llvm_unreachable("Not a known type trait");
+#define TYPE_TRAIT(N,Spelling,K) case tok::kw_##Spelling: return N;
+#include "clang/Basic/TokenKinds.def"
   }
-
-  if (ExpectAndConsume(tok::comma, diag::err_expected_comma)) {
-    SkipUntil(tok::r_paren, StopAtSemi);
-    return ExprError();
-  }
-
-  TypeResult RhsTy = ParseTypeName();
-  if (RhsTy.isInvalid()) {
-    SkipUntil(tok::r_paren, StopAtSemi);
-    return ExprError();
-  }
-
-  T.consumeClose();
-
-  return Actions.ActOnBinaryTypeTrait(BTT, Loc, LhsTy.get(), RhsTy.get(),
-                                      T.getCloseLocation());
 }
 
 /// \brief Parse the built-in type-trait pseudo-functions that allow 
 /// implementation of the TR1/C++11 type traits templates.
 ///
 ///       primary-expression:
+///          unary-type-trait '(' type-id ')'
+///          binary-type-trait '(' type-id ',' type-id ')'
 ///          type-trait '(' type-id-seq ')'
 ///
 ///       type-id-seq:
 ///          type-id ...[opt] type-id-seq[opt]
 ///
 ExprResult Parser::ParseTypeTrait() {
-  TypeTrait Kind = TypeTraitFromTokKind(Tok.getKind());
+  tok::TokenKind Kind = Tok.getKind();
+  unsigned Arity = TypeTraitArity(Kind);
+
   SourceLocation Loc = ConsumeToken();
   
   BalancedDelimiterTracker Parens(*this, tok::l_paren);
-  if (Parens.expectAndConsume(diag::err_expected_lparen))
+  if (Parens.expectAndConsume())
     return ExprError();
 
   SmallVector<ParsedType, 2> Args;
@@ -2873,19 +2787,26 @@
     
     // Add this type to the list of arguments.
     Args.push_back(Ty.get());
-    
-    if (Tok.is(tok::comma)) {
-      ConsumeToken();
-      continue;
-    }
-    
-    break;
-  } while (true);
-  
+  } while (TryConsumeToken(tok::comma));
+
   if (Parens.consumeClose())
     return ExprError();
-  
-  return Actions.ActOnTypeTrait(Kind, Loc, Args, Parens.getCloseLocation());
+
+  SourceLocation EndLoc = Parens.getCloseLocation();
+
+  if (Arity && Args.size() != Arity) {
+    Diag(EndLoc, diag::err_type_trait_arity)
+      << Arity << 0 << (Arity > 1) << (int)Args.size() << SourceRange(Loc);
+    return ExprError();
+  }
+
+  if (!Arity && Args.empty()) {
+    Diag(EndLoc, diag::err_type_trait_arity)
+      << 1 << 1 << 1 << (int)Args.size() << SourceRange(Loc);
+    return ExprError();
+  }
+
+  return Actions.ActOnTypeTrait(TypeTraitFromTokKind(Kind), Loc, Args, EndLoc);
 }
 
 /// ParseArrayTypeTrait - Parse the built-in array type-trait
@@ -2900,7 +2821,7 @@
   SourceLocation Loc = ConsumeToken();
 
   BalancedDelimiterTracker T(*this, tok::l_paren);
-  if (T.expectAndConsume(diag::err_expected_lparen))
+  if (T.expectAndConsume())
     return ExprError();
 
   TypeResult Ty = ParseTypeName();
@@ -2917,7 +2838,7 @@
                                        T.getCloseLocation());
   }
   case ATT_ArrayExtent: {
-    if (ExpectAndConsume(tok::comma, diag::err_expected_comma)) {
+    if (ExpectAndConsume(tok::comma)) {
       SkipUntil(tok::r_paren, StopAtSemi);
       return ExprError();
     }
@@ -2943,7 +2864,7 @@
   SourceLocation Loc = ConsumeToken();
 
   BalancedDelimiterTracker T(*this, tok::l_paren);
-  if (T.expectAndConsume(diag::err_expected_lparen))
+  if (T.expectAndConsume())
     return ExprError();
 
   ExprResult Expr = ParseExpression();
diff --git a/lib/Parse/ParseInit.cpp b/lib/Parse/ParseInit.cpp
index 37f74bb..44053f1 100644
--- a/lib/Parse/ParseInit.cpp
+++ b/lib/Parse/ParseInit.cpp
@@ -105,8 +105,6 @@
       return true;
     }
   }
-  
-  return true;
 }
 
 static void CheckArrayDesignatorSyntax(Parser &P, SourceLocation Loc,
@@ -493,7 +491,7 @@
   
   BalancedDelimiterTracker Braces(*this, tok::l_brace);
   if (Braces.consumeOpen()) {
-    Diag(Tok, diag::err_expected_lbrace);
+    Diag(Tok, diag::err_expected) << tok::l_brace;
     return false;
   }
 
@@ -512,7 +510,7 @@
     return false;
   }
 
-  while (Tok.isNot(tok::eof)) {
+  while (!isEofOrEom()) {
     trailingComma = false;
     // If we know that this cannot be a designation, just parse the nested
     // initializer directly.
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index 86f38cf..776dbd6 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -81,9 +81,9 @@
   case tok::objc_import:
     if (getLangOpts().Modules)
       return ParseModuleImport(AtLoc);
-      
-    // Fall through
-      
+    Diag(AtLoc, diag::err_atimport);
+    SkipUntil(tok::semi);
+    return Actions.ConvertDeclToDeclGroup(0);
   default:
     Diag(AtLoc, diag::err_unexpected_at);
     SkipUntil(tok::semi);
@@ -107,7 +107,7 @@
   while (1) {
     MaybeSkipAttributes(tok::objc_class);
     if (Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident);
+      Diag(Tok, diag::err_expected) << tok::identifier;
       SkipUntil(tok::semi);
       return Actions.ConvertDeclToDeclGroup(0);
     }
@@ -115,14 +115,12 @@
     ClassLocs.push_back(Tok.getLocation());
     ConsumeToken();
 
-    if (Tok.isNot(tok::comma))
+    if (!TryConsumeToken(tok::comma))
       break;
-
-    ConsumeToken();
   }
 
   // Consume the ';'.
-  if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@class"))
+  if (ExpectAndConsume(tok::semi, diag::err_expected_after, "@class"))
     return Actions.ConvertDeclToDeclGroup(0);
 
   return Actions.ActOnForwardClassDeclaration(atLoc, ClassNames.data(),
@@ -195,7 +193,8 @@
   MaybeSkipAttributes(tok::objc_interface);
 
   if (Tok.isNot(tok::identifier)) {
-    Diag(Tok, diag::err_expected_ident); // missing class or category name.
+    Diag(Tok, diag::err_expected)
+        << tok::identifier; // missing class or category name.
     return 0;
   }
 
@@ -222,7 +221,8 @@
       categoryLoc = ConsumeToken();
     }
     else if (!getLangOpts().ObjC2) {
-      Diag(Tok, diag::err_expected_ident); // missing category name.
+      Diag(Tok, diag::err_expected)
+          << tok::identifier; // missing category name.
       return 0;
     }
    
@@ -274,7 +274,8 @@
     }
 
     if (Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident); // missing super class name.
+      Diag(Tok, diag::err_expected)
+          << tok::identifier; // missing super class name.
       return 0;
     }
     superClassId = Tok.getIdentifierInfo();
@@ -328,7 +329,7 @@
     MethodImplKind(MethodImplKind) {
   }
 
-  void invoke(ParsingFieldDeclarator &FD) {
+  void invoke(ParsingFieldDeclarator &FD) override {
     if (FD.D.getIdentifier() == 0) {
       P.Diag(AtLoc, diag::err_objc_property_requires_field_name)
         << FD.D.getSourceRange();
@@ -423,7 +424,7 @@
     }
 
     // If we got to the end of the file, exit the loop.
-    if (Tok.is(tok::eof))
+    if (isEofOrEom())
       break;
 
     // Code completion within an Objective-C interface.
@@ -610,8 +611,10 @@
       unsigned DiagID = IsSetter ? diag::err_objc_expected_equal_for_setter :
         diag::err_objc_expected_equal_for_getter;
 
-      if (ExpectAndConsume(tok::equal, DiagID, "", tok::r_paren))
+      if (ExpectAndConsume(tok::equal, DiagID)) {
+        SkipUntil(tok::r_paren, StopAtSemi);
         return;
+      }
 
       if (Tok.is(tok::code_completion)) {
         if (IsSetter)
@@ -636,10 +639,11 @@
         DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_setter);
         DS.setSetterName(SelIdent);
 
-        if (ExpectAndConsume(tok::colon, 
-                             diag::err_expected_colon_after_setter_name, "",
-                             tok::r_paren))
+        if (ExpectAndConsume(tok::colon,
+                             diag::err_expected_colon_after_setter_name)) {
+          SkipUntil(tok::r_paren, StopAtSemi);
           return;
+        }
       } else {
         DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_getter);
         DS.setGetterName(SelIdent);
@@ -1054,11 +1058,8 @@
     Sema::ObjCArgInfo ArgInfo;
 
     // Each iteration parses a single keyword argument.
-    if (Tok.isNot(tok::colon)) {
-      Diag(Tok, diag::err_expected_colon);
+    if (ExpectAndConsume(tok::colon))
       break;
-    }
-    ConsumeToken(); // Eat the ':'.
 
     ArgInfo.Type = ParsedType();
     if (Tok.is(tok::l_paren)) // Parse the argument type if present.
@@ -1086,7 +1087,8 @@
     }
     
     if (Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident); // missing argument name.
+      Diag(Tok, diag::err_expected)
+          << tok::identifier; // missing argument name.
       break;
     }
 
@@ -1198,7 +1200,7 @@
     }
 
     if (Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident);
+      Diag(Tok, diag::err_expected) << tok::identifier;
       SkipUntil(tok::greater, StopAtSemi);
       return true;
     }
@@ -1207,9 +1209,8 @@
     ProtocolLocs.push_back(Tok.getLocation());
     ConsumeToken();
 
-    if (Tok.isNot(tok::comma))
+    if (!TryConsumeToken(tok::comma))
       break;
-    ConsumeToken();
   }
 
   // Consume the '>'.
@@ -1289,7 +1290,7 @@
   BalancedDelimiterTracker T(*this, tok::l_brace);
   T.consumeOpen();
   // While we still have something to read, read the instance variables.
-  while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+  while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
     // Each iteration of this loop reads one objc-instance-variable-decl.
 
     // Check for extraneous top-level semicolon.
@@ -1299,9 +1300,7 @@
     }
 
     // Set the default visibility to private.
-    if (Tok.is(tok::at)) { // parse objc-visibility-spec
-      ConsumeToken(); // eat the @ sign
-      
+    if (TryConsumeToken(tok::at)) { // parse objc-visibility-spec
       if (Tok.is(tok::code_completion)) {
         Actions.CodeCompleteObjCAtVisibility(getCurScope());
         return cutOffParsing();
@@ -1349,7 +1348,7 @@
         P(P), IDecl(IDecl), visibility(V), AllIvarDecls(AllIvarDecls) {
       }
 
-      void invoke(ParsingFieldDeclarator &FD) {
+      void invoke(ParsingFieldDeclarator &FD) override {
         P.Actions.ActOnObjCContainerStartDefinition(IDecl);
         // Install the declarator into the interface decl.
         Decl *Field
@@ -1412,16 +1411,15 @@
   MaybeSkipAttributes(tok::objc_protocol);
 
   if (Tok.isNot(tok::identifier)) {
-    Diag(Tok, diag::err_expected_ident); // missing protocol name.
+    Diag(Tok, diag::err_expected) << tok::identifier; // missing protocol name.
     return DeclGroupPtrTy();
   }
   // Save the protocol name, then consume it.
   IdentifierInfo *protocolName = Tok.getIdentifierInfo();
   SourceLocation nameLoc = ConsumeToken();
 
-  if (Tok.is(tok::semi)) { // forward declaration of one protocol.
+  if (TryConsumeToken(tok::semi)) { // forward declaration of one protocol.
     IdentifierLocPair ProtoInfo(protocolName, nameLoc);
-    ConsumeToken();
     return Actions.ActOnForwardProtocolDeclaration(AtLoc, &ProtoInfo, 1,
                                                    attrs.getList());
   }
@@ -1436,7 +1434,7 @@
     while (1) {
       ConsumeToken(); // the ','
       if (Tok.isNot(tok::identifier)) {
-        Diag(Tok, diag::err_expected_ident);
+        Diag(Tok, diag::err_expected) << tok::identifier;
         SkipUntil(tok::semi);
         return DeclGroupPtrTy();
       }
@@ -1448,7 +1446,7 @@
         break;
     }
     // Consume the ';'.
-    if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@protocol"))
+    if (ExpectAndConsume(tok::semi, diag::err_expected_after, "@protocol"))
       return DeclGroupPtrTy();
 
     return Actions.ActOnForwardProtocolDeclaration(AtLoc,
@@ -1505,7 +1503,8 @@
   MaybeSkipAttributes(tok::objc_implementation);
 
   if (Tok.isNot(tok::identifier)) {
-    Diag(Tok, diag::err_expected_ident); // missing class or category name.
+    Diag(Tok, diag::err_expected)
+        << tok::identifier; // missing class or category name.
     return DeclGroupPtrTy();
   }
   // We have a class or category name - consume it.
@@ -1529,11 +1528,12 @@
       categoryId = Tok.getIdentifierInfo();
       categoryLoc = ConsumeToken();
     } else {
-      Diag(Tok, diag::err_expected_ident); // missing category name.
+      Diag(Tok, diag::err_expected)
+          << tok::identifier; // missing category name.
       return DeclGroupPtrTy();
     }
     if (Tok.isNot(tok::r_paren)) {
-      Diag(Tok, diag::err_expected_rparen);
+      Diag(Tok, diag::err_expected) << tok::r_paren;
       SkipUntil(tok::r_paren); // don't stop at ';'
       return DeclGroupPtrTy();
     }
@@ -1552,11 +1552,11 @@
     // We have a class implementation
     SourceLocation superClassLoc;
     IdentifierInfo *superClassId = 0;
-    if (Tok.is(tok::colon)) {
+    if (TryConsumeToken(tok::colon)) {
       // We have a super class
-      ConsumeToken();
       if (Tok.isNot(tok::identifier)) {
-        Diag(Tok, diag::err_expected_ident); // missing super class name.
+        Diag(Tok, diag::err_expected)
+            << tok::identifier; // missing super class name.
         return DeclGroupPtrTy();
       }
       superClassId = Tok.getIdentifierInfo();
@@ -1582,7 +1582,7 @@
 
   {
     ObjCImplParsingDataRAII ObjCImplParsing(*this, ObjCImpDecl);
-    while (!ObjCImplParsing.isFinished() && Tok.isNot(tok::eof)) {
+    while (!ObjCImplParsing.isFinished() && !isEofOrEom()) {
       ParsedAttributesWithRange attrs(AttrFactory);
       MaybeParseCXX11Attributes(attrs);
       MaybeParseMicrosoftAttributes(attrs);
@@ -1612,7 +1612,7 @@
 Parser::ObjCImplParsingDataRAII::~ObjCImplParsingDataRAII() {
   if (!Finished) {
     finish(P.Tok.getLocation());
-    if (P.Tok.is(tok::eof)) {
+    if (P.isEofOrEom()) {
       P.Diag(P.Tok, diag::err_objc_missing_end)
           << FixItHint::CreateInsertion(P.Tok.getLocation(), "\n@end\n");
       P.Diag(Dcl->getLocStart(), diag::note_objc_container_start)
@@ -1655,19 +1655,18 @@
          "ParseObjCAtAliasDeclaration(): Expected @compatibility_alias");
   ConsumeToken(); // consume compatibility_alias
   if (Tok.isNot(tok::identifier)) {
-    Diag(Tok, diag::err_expected_ident);
+    Diag(Tok, diag::err_expected) << tok::identifier;
     return 0;
   }
   IdentifierInfo *aliasId = Tok.getIdentifierInfo();
   SourceLocation aliasLoc = ConsumeToken(); // consume alias-name
   if (Tok.isNot(tok::identifier)) {
-    Diag(Tok, diag::err_expected_ident);
+    Diag(Tok, diag::err_expected) << tok::identifier;
     return 0;
   }
   IdentifierInfo *classId = Tok.getIdentifierInfo();
   SourceLocation classLoc = ConsumeToken(); // consume class-name;
-  ExpectAndConsume(tok::semi, diag::err_expected_semi_after, 
-                   "@compatibility_alias");
+  ExpectAndConsume(tok::semi, diag::err_expected_after, "@compatibility_alias");
   return Actions.ActOnCompatibilityAlias(atLoc, aliasId, aliasLoc,
                                          classId, classLoc);
 }
@@ -1705,10 +1704,8 @@
     IdentifierInfo *propertyId = Tok.getIdentifierInfo();
     SourceLocation propertyLoc = ConsumeToken(); // consume property name
     SourceLocation propertyIvarLoc;
-    if (Tok.is(tok::equal)) {
+    if (TryConsumeToken(tok::equal)) {
       // property '=' ivar-name
-      ConsumeToken(); // consume '='
-      
       if (Tok.is(tok::code_completion)) {
         Actions.CodeCompleteObjCPropertySynthesizeIvar(getCurScope(), propertyId);
         cutOffParsing();
@@ -1716,7 +1713,7 @@
       }
       
       if (Tok.isNot(tok::identifier)) {
-        Diag(Tok, diag::err_expected_ident);
+        Diag(Tok, diag::err_expected) << tok::identifier;
         break;
       }
       propertyIvar = Tok.getIdentifierInfo();
@@ -1728,7 +1725,7 @@
       break;
     ConsumeToken(); // consume ','
   }
-  ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@synthesize");
+  ExpectAndConsume(tok::semi, diag::err_expected_after, "@synthesize");
   return 0;
 }
 
@@ -1751,7 +1748,7 @@
     }
     
     if (Tok.isNot(tok::identifier)) {
-      Diag(Tok, diag::err_expected_ident);
+      Diag(Tok, diag::err_expected) << tok::identifier;
       SkipUntil(tok::semi);
       return 0;
     }
@@ -1765,7 +1762,7 @@
       break;
     ConsumeToken(); // consume ','
   }
-  ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@dynamic");
+  ExpectAndConsume(tok::semi, diag::err_expected_after, "@dynamic");
   return 0;
 }
 
@@ -1783,7 +1780,7 @@
     }
   }
   // consume ';'
-  ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@throw");
+  ExpectAndConsume(tok::semi, diag::err_expected_after, "@throw");
   return Actions.ActOnObjCAtThrowStmt(atLoc, Res.take(), getCurScope());
 }
 
@@ -1806,7 +1803,7 @@
     ConsumeParen();  // ')'
   } else {
     if (!operand.isInvalid())
-      Diag(Tok, diag::err_expected_rparen);
+      Diag(Tok, diag::err_expected) << tok::r_paren;
 
     // Skip forward until we see a left brace, but don't consume it.
     SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch);
@@ -1815,7 +1812,7 @@
   // Require a compound statement.
   if (Tok.isNot(tok::l_brace)) {
     if (!operand.isInvalid())
-      Diag(Tok, diag::err_expected_lbrace);
+      Diag(Tok, diag::err_expected) << tok::l_brace;
     return StmtError();
   }
 
@@ -1855,7 +1852,7 @@
 
   ConsumeToken(); // consume try
   if (Tok.isNot(tok::l_brace)) {
-    Diag(Tok, diag::err_expected_lbrace);
+    Diag(Tok, diag::err_expected) << tok::l_brace;
     return StmtError();
   }
   StmtVector CatchStmts;
@@ -1905,7 +1902,7 @@
         if (Tok.is(tok::l_brace))
           CatchBody = ParseCompoundStatementBody();
         else
-          Diag(Tok, diag::err_expected_lbrace);
+          Diag(Tok, diag::err_expected) << tok::l_brace;
         if (CatchBody.isInvalid())
           CatchBody = Actions.ActOnNullStmt(Tok.getLocation());
         
@@ -1931,7 +1928,7 @@
       if (Tok.is(tok::l_brace))
         FinallyBody = ParseCompoundStatementBody();
       else
-        Diag(Tok, diag::err_expected_lbrace);
+        Diag(Tok, diag::err_expected) << tok::l_brace;
       if (FinallyBody.isInvalid())
         FinallyBody = Actions.ActOnNullStmt(Tok.getLocation());
       FinallyStmt = Actions.ActOnObjCAtFinallyStmt(AtCatchFinallyLoc,
@@ -1957,7 +1954,7 @@
 Parser::ParseObjCAutoreleasePoolStmt(SourceLocation atLoc) {
   ConsumeToken(); // consume autoreleasepool
   if (Tok.isNot(tok::l_brace)) {
-    Diag(Tok, diag::err_expected_lbrace);
+    Diag(Tok, diag::err_expected) << tok::l_brace;
     return StmtError();
   }
   // Enter a scope to hold everything within the compound stmt.  Compound
@@ -2478,8 +2475,7 @@
       KeyIdents.push_back(selIdent);
       KeyLocs.push_back(Loc);
 
-      if (Tok.isNot(tok::colon)) {
-        Diag(Tok, diag::err_expected_colon);
+      if (ExpectAndConsume(tok::colon)) {
         // We must manually skip to a ']', otherwise the expression skipper will
         // stop at the ']' when it skips to the ';'.  We want it to skip beyond
         // the enclosing expression.
@@ -2487,7 +2483,6 @@
         return ExprError();
       }
 
-      ConsumeToken(); // Eat the ':'.
       ///  Parse the expression after ':'
       
       if (Tok.is(tok::code_completion)) {
@@ -2572,7 +2567,7 @@
       KeyExprs.push_back(Res.release());
     }
   } else if (!selIdent) {
-    Diag(Tok, diag::err_expected_ident); // missing selector name.
+    Diag(Tok, diag::err_expected) << tok::identifier; // missing selector name.
 
     // We must manually skip to a ']', otherwise the expression skipper will
     // stop at the ']' when it skips to the ';'.  We want it to skip beyond
@@ -2582,10 +2577,8 @@
   }
     
   if (Tok.isNot(tok::r_square)) {
-    if (Tok.is(tok::identifier))
-      Diag(Tok, diag::err_expected_colon);
-    else
-      Diag(Tok, diag::err_expected_rsquare);
+    Diag(Tok, diag::err_expected)
+        << (Tok.is(tok::identifier) ? tok::colon : tok::r_square);
     // We must manually skip to a ']', otherwise the expression skipper will
     // stop at the ']' when it skips to the ';'.  We want it to skip beyond
     // the enclosing expression.
@@ -2730,7 +2723,8 @@
     if (Tok.is(tok::comma))
       ConsumeToken(); // Eat the ','.
     else if (Tok.isNot(tok::r_square))
-     return ExprError(Diag(Tok, diag::err_expected_rsquare_or_comma));
+      return ExprError(Diag(Tok, diag::err_expected_either) << tok::r_square
+                                                            << tok::comma);
   }
   SourceLocation EndLoc = ConsumeBracket(); // location of ']'
   MultiExprArg Args(ElementExprs);
@@ -2755,10 +2749,7 @@
       }
     }
 
-    if (Tok.is(tok::colon)) {
-      ConsumeToken();
-    } else {
-      Diag(Tok, diag::err_expected_colon);
+    if (ExpectAndConsume(tok::colon)) {
       SkipUntil(tok::r_brace, StopAtSemi);
       return ExprError();
     }
@@ -2774,20 +2765,19 @@
     
     // Parse the ellipsis that designates this as a pack expansion.
     SourceLocation EllipsisLoc;
-    if (Tok.is(tok::ellipsis) && getLangOpts().CPlusPlus)
-      EllipsisLoc = ConsumeToken();
-    
+    if (getLangOpts().CPlusPlus)
+      TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
     // We have a valid expression. Collect it in a vector so we can
     // build the argument list.
     ObjCDictionaryElement Element = { 
       KeyExpr.get(), ValueExpr.get(), EllipsisLoc, None 
     };
     Elements.push_back(Element);
-    
-    if (Tok.is(tok::comma))
-      ConsumeToken(); // Eat the ','.
-    else if (Tok.isNot(tok::r_brace))
-      return ExprError(Diag(Tok, diag::err_expected_rbrace_or_comma));
+
+    if (!TryConsumeToken(tok::comma) && Tok.isNot(tok::r_brace))
+      return ExprError(Diag(Tok, diag::err_expected_either) << tok::r_brace
+                                                            << tok::comma);
   }
   SourceLocation EndLoc = ConsumeBrace();
   
@@ -2834,7 +2824,7 @@
   T.consumeOpen();
 
   if (Tok.isNot(tok::identifier))
-    return ExprError(Diag(Tok, diag::err_expected_ident));
+    return ExprError(Diag(Tok, diag::err_expected) << tok::identifier);
 
   IdentifierInfo *protocolId = Tok.getIdentifierInfo();
   SourceLocation ProtoIdLoc = ConsumeToken();
@@ -2869,20 +2859,19 @@
   IdentifierInfo *SelIdent = ParseObjCSelectorPiece(sLoc);
   if (!SelIdent &&  // missing selector name.
       Tok.isNot(tok::colon) && Tok.isNot(tok::coloncolon))
-    return ExprError(Diag(Tok, diag::err_expected_ident));
+    return ExprError(Diag(Tok, diag::err_expected) << tok::identifier);
 
   KeyIdents.push_back(SelIdent);
   unsigned nColons = 0;
   if (Tok.isNot(tok::r_paren)) {
     while (1) {
-      if (Tok.is(tok::coloncolon)) { // Handle :: in C++.
+      if (TryConsumeToken(tok::coloncolon)) { // Handle :: in C++.
         ++nColons;
         KeyIdents.push_back(0);
-      } else if (Tok.isNot(tok::colon))
-        return ExprError(Diag(Tok, diag::err_expected_colon));
-
+      } else if (ExpectAndConsume(tok::colon)) // Otherwise expect ':'.
+        return ExprError();
       ++nColons;
-      ConsumeToken(); // Eat the ':' or '::'.
+
       if (Tok.is(tok::r_paren))
         break;
       
@@ -2931,7 +2920,7 @@
   assert((Tok.is(tok::l_brace) || Tok.is(tok::kw_try) ||
           Tok.is(tok::colon)) && 
           "Inline objective-c method not starting with '{' or 'try' or ':'");
-  // Enter a scope for the method or c-fucntion body.
+  // Enter a scope for the method or c-function body.
   ParseScope BodyScope(this,
                        parseMethod
                        ? Scope::ObjCMethodScope|Scope::FnScope|Scope::DeclScope
@@ -2944,11 +2933,11 @@
   else
     Actions.ActOnStartOfFunctionDef(getCurScope(), MCDecl);
   if (Tok.is(tok::kw_try))
-    MCDecl = ParseFunctionTryBlock(MCDecl, BodyScope);
+    ParseFunctionTryBlock(MCDecl, BodyScope);
   else {
     if (Tok.is(tok::colon))
       ParseConstructorInitializer(MCDecl);
-    MCDecl = ParseFunctionStatementBody(MCDecl, BodyScope);
+    ParseFunctionStatementBody(MCDecl, BodyScope);
   }
   
   if (Tok.getLocation() != OrigLoc) {
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index 89e4147..f1b6107 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -12,12 +12,12 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/AST/ASTConsumer.h"
+#include "RAIIObjectsForParser.h"
 #include "clang/AST/StmtOpenMP.h"
 #include "clang/Parse/ParseDiagnostic.h"
 #include "clang/Parse/Parser.h"
 #include "clang/Sema/Scope.h"
 #include "llvm/ADT/PointerIntPair.h"
-#include "RAIIObjectsForParser.h"
 using namespace clang;
 
 //===----------------------------------------------------------------------===//
@@ -60,6 +60,7 @@
     Diag(Tok, diag::err_omp_unknown_directive);
     break;
   case OMPD_parallel:
+  case OMPD_simd:
   case OMPD_task:
   case NUM_OPENMP_DIRECTIVES:
     Diag(Tok, diag::err_omp_unexpected_directive)
@@ -114,7 +115,8 @@
     }
     SkipUntil(tok::annot_pragma_openmp_end);
     break;
-  case OMPD_parallel: {
+  case OMPD_parallel:
+  case OMPD_simd: {
     ConsumeToken();
 
     Actions.StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope());
@@ -225,8 +227,9 @@
       IsCorrect = false;
       SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
                 StopBeforeMatch);
-      Diag(PrevTok.getLocation(), diag::err_expected_ident)
-        << SourceRange(PrevTok.getLocation(), PrevTokLocation);
+      Diag(PrevTok.getLocation(), diag::err_expected)
+          << tok::identifier
+          << SourceRange(PrevTok.getLocation(), PrevTokLocation);
     } else {
       DeclarationNameInfo NameInfo = Actions.GetNameFromUnqualifiedId(Name);
       ExprResult Res = Actions.ActOnOpenMPIdExpression(getCurScope(), SS,
@@ -241,7 +244,7 @@
   }
 
   if (NoIdentIsFound) {
-    Diag(Tok, diag::err_expected_ident);
+    Diag(Tok, diag::err_expected) << tok::identifier;
     IsCorrect = false;
   }
 
@@ -254,7 +257,8 @@
 /// \brief Parsing of OpenMP clauses.
 ///
 ///    clause:
-///       default-clause|private-clause|firstprivate-clause|shared-clause
+///       if-clause | num_threads-clause | safelen-clause | default-clause |
+///       private-clause | firstprivate-clause | shared-clause
 ///
 OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
                                      OpenMPClauseKind CKind, bool FirstClause) {
@@ -268,10 +272,25 @@
   }
 
   switch (CKind) {
+  case OMPC_if:
+  case OMPC_num_threads:
+  case OMPC_safelen:
+    // OpenMP [2.5, Restrictions]
+    //  At most one if clause can appear on the directive.
+    //  At most one num_threads clause can appear on the directive.
+    // OpenMP [2.8.1, simd construct, Restrictions]
+    //  Only one safelen clause can appear on a simd directive.
+    if (!FirstClause) {
+      Diag(Tok, diag::err_omp_more_one_clause)
+           << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind);
+    }
+
+    Clause = ParseOpenMPSingleExprClause(CKind);
+    break;
   case OMPC_default:
-    // OpenMP [2.9.3.1, Restrictions]
-    //  Only a single default clause may be specified on a parallel or task
-    //  directive.
+    // OpenMP [2.14.3.1, Restrictions]
+    //  Only a single default clause may be specified on a parallel, task or
+    //  teams directive.
     if (!FirstClause) {
       Diag(Tok, diag::err_omp_more_one_clause)
            << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind);
@@ -282,6 +301,7 @@
   case OMPC_private:
   case OMPC_firstprivate:
   case OMPC_shared:
+  case OMPC_copyin:
     Clause = ParseOpenMPVarListClause(CKind);
     break;
   case OMPC_unknown:
@@ -299,6 +319,41 @@
   return ErrorFound ? 0 : Clause;
 }
 
+/// \brief Parsing of OpenMP clauses with single expressions like 'if',
+/// 'collapse', 'safelen', 'num_threads', 'simdlen', 'num_teams' or
+/// 'thread_limit'.
+///
+///    if-clause:
+///      'if' '(' expression ')'
+///
+///    num_threads-clause:
+///      'num_threads' '(' expression ')'
+///
+///    safelen-clause:
+///      'safelen' '(' expression ')'
+///
+OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind) {
+  SourceLocation Loc = ConsumeToken();
+
+  BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+  if (T.expectAndConsume(diag::err_expected_lparen_after,
+                         getOpenMPClauseName(Kind)))
+    return 0;
+
+  ExprResult LHS(ParseCastExpression(false, false, NotTypeCast));
+  ExprResult Val(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+
+  // Parse ')'.
+  T.consumeClose();
+
+  if (Val.isInvalid())
+    return 0;
+
+  return Actions.ActOnOpenMPSingleExprClause(Kind, Val.take(), Loc,
+                                             T.getOpenLocation(),
+                                             T.getCloseLocation());
+}
+
 /// \brief Parsing of simple OpenMP clauses like 'default'.
 ///
 ///    default-clause:
@@ -366,7 +421,7 @@
     } else if (Tok.isNot(tok::r_paren) &&
                Tok.isNot(tok::annot_pragma_openmp_end)) {
       Diag(Tok, diag::err_omp_expected_punc)
-        << 1 << getOpenMPClauseName(Kind);
+        << getOpenMPClauseName(Kind);
     }
   }
 
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index 8a374e0..29e1771 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -11,7 +11,6 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "ParsePragma.h"
 #include "clang/Lex/Preprocessor.h"
 #include "clang/Parse/ParseDiagnostic.h"
 #include "clang/Parse/Parser.h"
@@ -19,6 +18,208 @@
 #include "llvm/ADT/StringSwitch.h"
 using namespace clang;
 
+namespace {
+
+struct PragmaAlignHandler : public PragmaHandler {
+  explicit PragmaAlignHandler() : PragmaHandler("align") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaGCCVisibilityHandler : public PragmaHandler {
+  explicit PragmaGCCVisibilityHandler() : PragmaHandler("visibility") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaOptionsHandler : public PragmaHandler {
+  explicit PragmaOptionsHandler() : PragmaHandler("options") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaPackHandler : public PragmaHandler {
+  explicit PragmaPackHandler() : PragmaHandler("pack") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaMSStructHandler : public PragmaHandler {
+  explicit PragmaMSStructHandler() : PragmaHandler("ms_struct") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaUnusedHandler : public PragmaHandler {
+  PragmaUnusedHandler() : PragmaHandler("unused") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaWeakHandler : public PragmaHandler {
+  explicit PragmaWeakHandler() : PragmaHandler("weak") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaRedefineExtnameHandler : public PragmaHandler {
+  explicit PragmaRedefineExtnameHandler() : PragmaHandler("redefine_extname") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaOpenCLExtensionHandler : public PragmaHandler {
+  PragmaOpenCLExtensionHandler() : PragmaHandler("EXTENSION") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+
+struct PragmaFPContractHandler : public PragmaHandler {
+  PragmaFPContractHandler() : PragmaHandler("FP_CONTRACT") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaNoOpenMPHandler : public PragmaHandler {
+  PragmaNoOpenMPHandler() : PragmaHandler("omp") { }
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaOpenMPHandler : public PragmaHandler {
+  PragmaOpenMPHandler() : PragmaHandler("omp") { }
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+/// PragmaCommentHandler - "\#pragma comment ...".
+struct PragmaCommentHandler : public PragmaHandler {
+  PragmaCommentHandler(Sema &Actions)
+    : PragmaHandler("comment"), Actions(Actions) {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+private:
+  Sema &Actions;
+};
+
+struct PragmaDetectMismatchHandler : public PragmaHandler {
+  PragmaDetectMismatchHandler(Sema &Actions)
+    : PragmaHandler("detect_mismatch"), Actions(Actions) {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+private:
+  Sema &Actions;
+};
+
+struct PragmaMSPointersToMembers : public PragmaHandler {
+  explicit PragmaMSPointersToMembers() : PragmaHandler("pointers_to_members") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+struct PragmaMSVtorDisp : public PragmaHandler {
+  explicit PragmaMSVtorDisp() : PragmaHandler("vtordisp") {}
+  void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+                    Token &FirstToken) override;
+};
+
+}  // end namespace
+
+void Parser::initializePragmaHandlers() {
+  AlignHandler.reset(new PragmaAlignHandler());
+  PP.AddPragmaHandler(AlignHandler.get());
+
+  GCCVisibilityHandler.reset(new PragmaGCCVisibilityHandler());
+  PP.AddPragmaHandler("GCC", GCCVisibilityHandler.get());
+
+  OptionsHandler.reset(new PragmaOptionsHandler());
+  PP.AddPragmaHandler(OptionsHandler.get());
+
+  PackHandler.reset(new PragmaPackHandler());
+  PP.AddPragmaHandler(PackHandler.get());
+
+  MSStructHandler.reset(new PragmaMSStructHandler());
+  PP.AddPragmaHandler(MSStructHandler.get());
+
+  UnusedHandler.reset(new PragmaUnusedHandler());
+  PP.AddPragmaHandler(UnusedHandler.get());
+
+  WeakHandler.reset(new PragmaWeakHandler());
+  PP.AddPragmaHandler(WeakHandler.get());
+
+  RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler());
+  PP.AddPragmaHandler(RedefineExtnameHandler.get());
+
+  FPContractHandler.reset(new PragmaFPContractHandler());
+  PP.AddPragmaHandler("STDC", FPContractHandler.get());
+
+  if (getLangOpts().OpenCL) {
+    OpenCLExtensionHandler.reset(new PragmaOpenCLExtensionHandler());
+    PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
+
+    PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
+  }
+  if (getLangOpts().OpenMP)
+    OpenMPHandler.reset(new PragmaOpenMPHandler());
+  else
+    OpenMPHandler.reset(new PragmaNoOpenMPHandler());
+  PP.AddPragmaHandler(OpenMPHandler.get());
+
+  if (getLangOpts().MicrosoftExt) {
+    MSCommentHandler.reset(new PragmaCommentHandler(Actions));
+    PP.AddPragmaHandler(MSCommentHandler.get());
+    MSDetectMismatchHandler.reset(new PragmaDetectMismatchHandler(Actions));
+    PP.AddPragmaHandler(MSDetectMismatchHandler.get());
+    MSPointersToMembers.reset(new PragmaMSPointersToMembers());
+    PP.AddPragmaHandler(MSPointersToMembers.get());
+    MSVtorDisp.reset(new PragmaMSVtorDisp());
+    PP.AddPragmaHandler(MSVtorDisp.get());
+  }
+}
+
+void Parser::resetPragmaHandlers() {
+  // Remove the pragma handlers we installed.
+  PP.RemovePragmaHandler(AlignHandler.get());
+  AlignHandler.reset();
+  PP.RemovePragmaHandler("GCC", GCCVisibilityHandler.get());
+  GCCVisibilityHandler.reset();
+  PP.RemovePragmaHandler(OptionsHandler.get());
+  OptionsHandler.reset();
+  PP.RemovePragmaHandler(PackHandler.get());
+  PackHandler.reset();
+  PP.RemovePragmaHandler(MSStructHandler.get());
+  MSStructHandler.reset();
+  PP.RemovePragmaHandler(UnusedHandler.get());
+  UnusedHandler.reset();
+  PP.RemovePragmaHandler(WeakHandler.get());
+  WeakHandler.reset();
+  PP.RemovePragmaHandler(RedefineExtnameHandler.get());
+  RedefineExtnameHandler.reset();
+
+  if (getLangOpts().OpenCL) {
+    PP.RemovePragmaHandler("OPENCL", OpenCLExtensionHandler.get());
+    OpenCLExtensionHandler.reset();
+    PP.RemovePragmaHandler("OPENCL", FPContractHandler.get());
+  }
+  PP.RemovePragmaHandler(OpenMPHandler.get());
+  OpenMPHandler.reset();
+
+  if (getLangOpts().MicrosoftExt) {
+    PP.RemovePragmaHandler(MSCommentHandler.get());
+    MSCommentHandler.reset();
+    PP.RemovePragmaHandler(MSDetectMismatchHandler.get());
+    MSDetectMismatchHandler.reset();
+    PP.RemovePragmaHandler(MSPointersToMembers.get());
+    MSPointersToMembers.reset();
+    PP.RemovePragmaHandler(MSVtorDisp.get());
+    MSVtorDisp.reset();
+  }
+
+  PP.RemovePragmaHandler("STDC", FPContractHandler.get());
+  FPContractHandler.reset();
+}
+
 /// \brief Handle the annotation token produced for #pragma unused(...)
 ///
 /// Each annot_pragma_unused is followed by the argument token so e.g.
@@ -130,7 +331,7 @@
   ConsumeToken();
 
   if (Tok.isNot(tok::l_brace)) {
-    PP.Diag(Tok, diag::err_expected_lbrace);
+    PP.Diag(Tok, diag::err_expected) << tok::l_brace;
     return StmtError();
   }
 
@@ -180,7 +381,24 @@
   }
 }
 
+void Parser::HandlePragmaMSPointersToMembers() {
+  assert(Tok.is(tok::annot_pragma_ms_pointers_to_members));
+  LangOptions::PragmaMSPointersToMembersKind RepresentationMethod =
+      static_cast<LangOptions::PragmaMSPointersToMembersKind>(
+          reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+  SourceLocation PragmaLoc = ConsumeToken(); // The annotation token.
+  Actions.ActOnPragmaMSPointersToMembers(RepresentationMethod, PragmaLoc);
+}
 
+void Parser::HandlePragmaMSVtorDisp() {
+  assert(Tok.is(tok::annot_pragma_ms_vtordisp));
+  uintptr_t Value = reinterpret_cast<uintptr_t>(Tok.getAnnotationValue());
+  Sema::PragmaVtorDispKind Kind =
+      static_cast<Sema::PragmaVtorDispKind>((Value >> 16) & 0xFFFF);
+  MSVtorDispAttr::Mode Mode = MSVtorDispAttr::Mode(Value & 0xFFFF);
+  SourceLocation PragmaLoc = ConsumeToken(); // The annotation token.
+  Actions.ActOnPragmaMSVtorDisp(Kind, PragmaLoc, Mode);
+}
 
 // #pragma GCC visibility comes in two variants:
 //   'push' '(' [visibility] ')'
@@ -283,7 +501,7 @@
       } else if (II->isStr("pop")) {
         Kind = Sema::PPK_Pop;
       } else {
-        PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_invalid_action);
+        PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_action) << "pack";
         return;
       }
       PP.Lex(Tok);
@@ -530,7 +748,7 @@
     }
 
     // Illegal token!
-    PP.Diag(Tok.getLocation(), diag::warn_pragma_unused_expected_punc);
+    PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_punc) << "unused";
     return;
   }
 
@@ -799,6 +1017,193 @@
                       /*DisableMacroExpansion=*/true, /*OwnsTokens=*/true);
 }
 
+/// \brief Handle '#pragma pointers_to_members'
+// The grammar for this pragma is as follows:
+//
+// <inheritance model> ::= ('single' | 'multiple' | 'virtual') '_inheritance'
+//
+// #pragma pointers_to_members '(' 'best_case' ')'
+// #pragma pointers_to_members '(' 'full_generality' [',' inheritance-model] ')'
+// #pragma pointers_to_members '(' inheritance-model ')'
+void PragmaMSPointersToMembers::HandlePragma(Preprocessor &PP,
+                                             PragmaIntroducerKind Introducer,
+                                             Token &Tok) {
+  SourceLocation PointersToMembersLoc = Tok.getLocation();
+  PP.Lex(Tok);
+  if (Tok.isNot(tok::l_paren)) {
+    PP.Diag(PointersToMembersLoc, diag::warn_pragma_expected_lparen)
+      << "pointers_to_members";
+    return;
+  }
+  PP.Lex(Tok);
+  const IdentifierInfo *Arg = Tok.getIdentifierInfo();
+  if (!Arg) {
+    PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+      << "pointers_to_members";
+    return;
+  }
+  PP.Lex(Tok);
+
+  LangOptions::PragmaMSPointersToMembersKind RepresentationMethod;
+  if (Arg->isStr("best_case")) {
+    RepresentationMethod = LangOptions::PPTMK_BestCase;
+  } else {
+    if (Arg->isStr("full_generality")) {
+      if (Tok.is(tok::comma)) {
+        PP.Lex(Tok);
+
+        Arg = Tok.getIdentifierInfo();
+        if (!Arg) {
+          PP.Diag(Tok.getLocation(),
+                  diag::err_pragma_pointers_to_members_unknown_kind)
+              << Tok.getKind() << /*OnlyInheritanceModels*/ 0;
+          return;
+        }
+        PP.Lex(Tok);
+      } else if (Tok.is(tok::r_paren)) {
+        // #pragma pointers_to_members(full_generality) implicitly specifies
+        // virtual_inheritance.
+        Arg = 0;
+        RepresentationMethod = LangOptions::PPTMK_FullGeneralityVirtualInheritance;
+      } else {
+        PP.Diag(Tok.getLocation(), diag::err_expected_punc)
+            << "full_generality";
+        return;
+      }
+    }
+
+    if (Arg) {
+      if (Arg->isStr("single_inheritance")) {
+        RepresentationMethod =
+            LangOptions::PPTMK_FullGeneralitySingleInheritance;
+      } else if (Arg->isStr("multiple_inheritance")) {
+        RepresentationMethod =
+            LangOptions::PPTMK_FullGeneralityMultipleInheritance;
+      } else if (Arg->isStr("virtual_inheritance")) {
+        RepresentationMethod =
+            LangOptions::PPTMK_FullGeneralityVirtualInheritance;
+      } else {
+        PP.Diag(Tok.getLocation(),
+                diag::err_pragma_pointers_to_members_unknown_kind)
+            << Arg << /*HasPointerDeclaration*/ 1;
+        return;
+      }
+    }
+  }
+
+  if (Tok.isNot(tok::r_paren)) {
+    PP.Diag(Tok.getLocation(), diag::err_expected_rparen_after)
+        << (Arg ? Arg->getName() : "full_generality");
+    return;
+  }
+
+  PP.Lex(Tok);
+  if (Tok.isNot(tok::eod)) {
+    PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+      << "pointers_to_members";
+    return;
+  }
+
+  Token AnnotTok;
+  AnnotTok.startToken();
+  AnnotTok.setKind(tok::annot_pragma_ms_pointers_to_members);
+  AnnotTok.setLocation(PointersToMembersLoc);
+  AnnotTok.setAnnotationValue(
+      reinterpret_cast<void *>(static_cast<uintptr_t>(RepresentationMethod)));
+  PP.EnterToken(AnnotTok);
+}
+
+/// \brief Handle '#pragma vtordisp'
+// The grammar for this pragma is as follows:
+//
+// <vtordisp-mode> ::= ('off' | 'on' | '0' | '1' | '2' )
+//
+// #pragma vtordisp '(' ['push' ','] vtordisp-mode ')'
+// #pragma vtordisp '(' 'pop' ')'
+// #pragma vtordisp '(' ')'
+void PragmaMSVtorDisp::HandlePragma(Preprocessor &PP,
+                                    PragmaIntroducerKind Introducer,
+                                    Token &Tok) {
+  SourceLocation VtorDispLoc = Tok.getLocation();
+  PP.Lex(Tok);
+  if (Tok.isNot(tok::l_paren)) {
+    PP.Diag(VtorDispLoc, diag::warn_pragma_expected_lparen) << "vtordisp";
+    return;
+  }
+  PP.Lex(Tok);
+
+  Sema::PragmaVtorDispKind Kind = Sema::PVDK_Set;
+  const IdentifierInfo *II = Tok.getIdentifierInfo();
+  if (II) {
+    if (II->isStr("push")) {
+      // #pragma vtordisp(push, mode)
+      PP.Lex(Tok);
+      if (Tok.isNot(tok::comma)) {
+        PP.Diag(VtorDispLoc, diag::warn_pragma_expected_punc) << "vtordisp";
+        return;
+      }
+      PP.Lex(Tok);
+      Kind = Sema::PVDK_Push;
+      // not push, could be on/off
+    } else if (II->isStr("pop")) {
+      // #pragma vtordisp(pop)
+      PP.Lex(Tok);
+      Kind = Sema::PVDK_Pop;
+    }
+    // not push or pop, could be on/off
+  } else {
+    if (Tok.is(tok::r_paren)) {
+      // #pragma vtordisp()
+      Kind = Sema::PVDK_Reset;
+    }
+  }
+
+
+  uint64_t Value = 0;
+  if (Kind == Sema::PVDK_Push || Kind == Sema::PVDK_Set) {
+    const IdentifierInfo *II = Tok.getIdentifierInfo();
+    if (II && II->isStr("off")) {
+      PP.Lex(Tok);
+      Value = 0;
+    } else if (II && II->isStr("on")) {
+      PP.Lex(Tok);
+      Value = 1;
+    } else if (Tok.is(tok::numeric_constant) &&
+               PP.parseSimpleIntegerLiteral(Tok, Value)) {
+      if (Value > 2) {
+        PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_integer)
+            << 0 << 2 << "vtordisp";
+        return;
+      }
+    } else {
+      PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_action)
+          << "vtordisp";
+      return;
+    }
+  }
+
+  // Finish the pragma: ')' $
+  if (Tok.isNot(tok::r_paren)) {
+    PP.Diag(VtorDispLoc, diag::warn_pragma_expected_rparen) << "vtordisp";
+    return;
+  }
+  PP.Lex(Tok);
+  if (Tok.isNot(tok::eod)) {
+    PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+        << "vtordisp";
+    return;
+  }
+
+  // Enter the annotation.
+  Token AnnotTok;
+  AnnotTok.startToken();
+  AnnotTok.setKind(tok::annot_pragma_ms_vtordisp);
+  AnnotTok.setLocation(VtorDispLoc);
+  AnnotTok.setAnnotationValue(reinterpret_cast<void *>(
+      static_cast<uintptr_t>((Kind << 16) | (Value & 0xFFFF))));
+  PP.EnterToken(AnnotTok);
+}
+
 /// \brief Handle the Microsoft \#pragma detect_mismatch extension.
 ///
 /// The syntax is:
@@ -815,7 +1220,7 @@
   SourceLocation CommentLoc = Tok.getLocation();
   PP.Lex(Tok);
   if (Tok.isNot(tok::l_paren)) {
-    PP.Diag(CommentLoc, diag::err_expected_lparen);
+    PP.Diag(CommentLoc, diag::err_expected) << tok::l_paren;
     return;
   }
 
@@ -838,7 +1243,7 @@
     return;
 
   if (Tok.isNot(tok::r_paren)) {
-    PP.Diag(Tok.getLocation(), diag::err_expected_rparen);
+    PP.Diag(Tok.getLocation(), diag::err_expected) << tok::r_paren;
     return;
   }
   PP.Lex(Tok);  // Eat the r_paren.
diff --git a/lib/Parse/ParsePragma.h b/lib/Parse/ParsePragma.h
deleted file mode 100644
index b41450f..0000000
--- a/lib/Parse/ParsePragma.h
+++ /dev/null
@@ -1,139 +0,0 @@
-//===---- ParserPragmas.h - Language specific pragmas -----------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines #pragma handlers for language specific pragmas.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_PARSE_PARSEPRAGMA_H
-#define LLVM_CLANG_PARSE_PARSEPRAGMA_H
-
-#include "clang/Lex/Pragma.h"
-
-namespace clang {
-  class Sema;
-  class Parser;
-
-class PragmaAlignHandler : public PragmaHandler {
-public:
-  explicit PragmaAlignHandler() : PragmaHandler("align") {}
-
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaGCCVisibilityHandler : public PragmaHandler {
-public:
-  explicit PragmaGCCVisibilityHandler() : PragmaHandler("visibility") {}
-
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaOptionsHandler : public PragmaHandler {
-public:
-  explicit PragmaOptionsHandler() : PragmaHandler("options") {}
-
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaPackHandler : public PragmaHandler {
-public:
-  explicit PragmaPackHandler() : PragmaHandler("pack") {}
-
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-  
-class PragmaMSStructHandler : public PragmaHandler {
-public:
-  explicit PragmaMSStructHandler() : PragmaHandler("ms_struct") {}
-    
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaUnusedHandler : public PragmaHandler {
-public:
-  PragmaUnusedHandler() : PragmaHandler("unused") {}
-
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaWeakHandler : public PragmaHandler {
-public:
-  explicit PragmaWeakHandler() : PragmaHandler("weak") {}
-
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaRedefineExtnameHandler : public PragmaHandler {
-public:
-  explicit PragmaRedefineExtnameHandler() : PragmaHandler("redefine_extname") {}
-
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaOpenCLExtensionHandler : public PragmaHandler {
-public:
-  PragmaOpenCLExtensionHandler() : PragmaHandler("EXTENSION") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-  
-
-class PragmaFPContractHandler : public PragmaHandler {
-public:
-  PragmaFPContractHandler() : PragmaHandler("FP_CONTRACT") {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaNoOpenMPHandler : public PragmaHandler {
-public:
-  PragmaNoOpenMPHandler() : PragmaHandler("omp") { }
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-class PragmaOpenMPHandler : public PragmaHandler {
-public:
-  PragmaOpenMPHandler() : PragmaHandler("omp") { }
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-};
-
-/// PragmaCommentHandler - "\#pragma comment ...".
-class PragmaCommentHandler : public PragmaHandler {
-public:
-  PragmaCommentHandler(Sema &Actions)
-    : PragmaHandler("comment"), Actions(Actions) {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-private:
-  Sema &Actions;
-};
-
-class PragmaDetectMismatchHandler : public PragmaHandler {
-public:
-  PragmaDetectMismatchHandler(Sema &Actions)
-    : PragmaHandler("detect_mismatch"), Actions(Actions) {}
-  virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
-                            Token &FirstToken);
-private:
-  Sema &Actions;
-};
-
-}  // end namespace clang
-
-#endif
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index d1f2138..7254eb3 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -23,6 +23,7 @@
 #include "clang/Sema/PrettyDeclStackTrace.h"
 #include "clang/Sema/Scope.h"
 #include "clang/Sema/TypoCorrection.h"
+#include "llvm/ADT/SmallString.h"
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/MC/MCObjectFileInfo.h"
@@ -34,7 +35,6 @@
 #include "llvm/Support/SourceMgr.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/TargetSelect.h"
-#include "llvm/ADT/SmallString.h"
 using namespace clang;
 
 //===----------------------------------------------------------------------===//
@@ -142,7 +142,7 @@
     WantCXXNamedCasts = false;
   }
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     if (FieldDecl *FD = candidate.getCorrectionDeclAs<FieldDecl>())
       return !candidate.getCorrectionSpecifier() || isa<ObjCIvarDecl>(FD);
     if (NextToken.is(tok::equal))
@@ -345,12 +345,15 @@
     ProhibitAttributes(Attrs);
     return ParseOpenMPDeclarativeOrExecutableDirective();
 
+  case tok::annot_pragma_ms_pointers_to_members:
+    ProhibitAttributes(Attrs);
+    HandlePragmaMSPointersToMembers();
+    return StmtEmpty();
+
   }
 
   // If we reached this code, the statement must end in a semicolon.
-  if (Tok.is(tok::semi)) {
-    ConsumeToken();
-  } else if (!Res.isInvalid()) {
+  if (!TryConsumeToken(tok::semi) && !Res.isInvalid()) {
     // If the result was valid, then we do want to diagnose this.  Use
     // ExpectAndConsume to emit the diagnostic, even though we know it won't
     // succeed.
@@ -412,7 +415,7 @@
 ///
 StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) {
   if(Tok.isNot(tok::l_brace))
-    return StmtError(Diag(Tok,diag::err_expected_lbrace));
+    return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
 
   StmtResult TryBlock(ParseCompoundStatement());
   if(TryBlock.isInvalid())
@@ -449,7 +452,7 @@
     raii2(Ident___exception_code, false),
     raii3(Ident_GetExceptionCode, false);
 
-  if(ExpectAndConsume(tok::l_paren,diag::err_expected_lparen))
+  if (ExpectAndConsume(tok::l_paren))
     return StmtError();
 
   ParseScope ExpectScope(this, Scope::DeclScope | Scope::ControlScope);
@@ -470,7 +473,7 @@
   if(FilterExpr.isInvalid())
     return StmtError();
 
-  if(ExpectAndConsume(tok::r_paren,diag::err_expected_rparen))
+  if (ExpectAndConsume(tok::r_paren))
     return StmtError();
 
   StmtResult Block(ParseCompoundStatement());
@@ -543,7 +546,7 @@
         SubStmt = Actions.ProcessStmtAttributes(
             SubStmt.get(), TempAttrs.getList(), TempAttrs.Range);
     } else {
-      Diag(Tok, diag::err_expected_semi_after) << "__attribute__";
+      Diag(Tok, diag::err_expected_after) << "__attribute__" << tok::semi;
     }
   }
 
@@ -624,10 +627,8 @@
     // GNU case range extension.
     SourceLocation DotDotDotLoc;
     ExprResult RHS;
-    if (Tok.is(tok::ellipsis)) {
-      Diag(Tok, diag::ext_gnu_case_range);
-      DotDotDotLoc = ConsumeToken();
-
+    if (TryConsumeToken(tok::ellipsis, DotDotDotLoc)) {
+      Diag(DotDotDotLoc, diag::ext_gnu_case_range);
       RHS = ParseConstantExpression();
       if (RHS.isInvalid()) {
         SkipUntil(tok::colon, StopAtSemi);
@@ -637,18 +638,17 @@
 
     ColonProtection.restore();
 
-    if (Tok.is(tok::colon)) {
-      ColonLoc = ConsumeToken();
-
-    // Treat "case blah;" as a typo for "case blah:".
-    } else if (Tok.is(tok::semi)) {
-      ColonLoc = ConsumeToken();
-      Diag(ColonLoc, diag::err_expected_colon_after) << "'case'"
-        << FixItHint::CreateReplacement(ColonLoc, ":");
+    if (TryConsumeToken(tok::colon, ColonLoc)) {
+    } else if (TryConsumeToken(tok::semi, ColonLoc)) {
+      // Treat "case blah;" as a typo for "case blah:".
+      Diag(ColonLoc, diag::err_expected_after)
+          << "'case'" << tok::colon
+          << FixItHint::CreateReplacement(ColonLoc, ":");
     } else {
       SourceLocation ExpectedLoc = PP.getLocForEndOfToken(PrevTokLocation);
-      Diag(ExpectedLoc, diag::err_expected_colon_after) << "'case'"
-        << FixItHint::CreateInsertion(ExpectedLoc, ":");
+      Diag(ExpectedLoc, diag::err_expected_after)
+          << "'case'" << tok::colon
+          << FixItHint::CreateInsertion(ExpectedLoc, ":");
       ColonLoc = ExpectedLoc;
     }
 
@@ -713,18 +713,17 @@
   SourceLocation DefaultLoc = ConsumeToken();  // eat the 'default'.
 
   SourceLocation ColonLoc;
-  if (Tok.is(tok::colon)) {
-    ColonLoc = ConsumeToken();
-
-  // Treat "default;" as a typo for "default:".
-  } else if (Tok.is(tok::semi)) {
-    ColonLoc = ConsumeToken();
-    Diag(ColonLoc, diag::err_expected_colon_after) << "'default'"
-      << FixItHint::CreateReplacement(ColonLoc, ":");
+  if (TryConsumeToken(tok::colon, ColonLoc)) {
+  } else if (TryConsumeToken(tok::semi, ColonLoc)) {
+    // Treat "default;" as a typo for "default:".
+    Diag(ColonLoc, diag::err_expected_after)
+        << "'default'" << tok::colon
+        << FixItHint::CreateReplacement(ColonLoc, ":");
   } else {
     SourceLocation ExpectedLoc = PP.getLocForEndOfToken(PrevTokLocation);
-    Diag(ExpectedLoc, diag::err_expected_colon_after) << "'default'"
-      << FixItHint::CreateInsertion(ExpectedLoc, ":");
+    Diag(ExpectedLoc, diag::err_expected_after)
+        << "'default'" << tok::colon
+        << FixItHint::CreateInsertion(ExpectedLoc, ":");
     ColonLoc = ExpectedLoc;
   }
 
@@ -826,6 +825,9 @@
     case tok::annot_pragma_fp_contract:
       HandlePragmaFPContract();
       break;
+    case tok::annot_pragma_ms_pointers_to_members:
+      HandlePragmaMSPointersToMembers();
+      break;
     default:
       checkForPragmas = false;
       break;
@@ -867,7 +869,7 @@
     SmallVector<Decl *, 8> DeclsInGroup;
     while (1) {
       if (Tok.isNot(tok::identifier)) {
-        Diag(Tok, diag::err_expected_ident);
+        Diag(Tok, diag::err_expected) << tok::identifier;
         break;
       }
 
@@ -875,9 +877,8 @@
       SourceLocation IdLoc = ConsumeToken();
       DeclsInGroup.push_back(Actions.LookupOrCreateLabel(II, IdLoc, LabelLoc));
 
-      if (!Tok.is(tok::comma))
+      if (!TryConsumeToken(tok::comma))
         break;
-      ConsumeToken();
     }
 
     DeclSpec DS(AttrFactory);
@@ -890,7 +891,7 @@
       Stmts.push_back(R.release());
   }
 
-  while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+  while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
     if (Tok.is(tok::annot_pragma_unused)) {
       HandlePragmaUnused();
       continue;
@@ -1077,8 +1078,7 @@
   //    would have to notify ParseStatement not to create a new scope. It's
   //    simpler to let it create a new scope.
   //
-  ParseScope InnerScope(this, Scope::DeclScope,
-                        C99orCXX && Tok.isNot(tok::l_brace));
+  ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
 
   // Read the 'then' stmt.
   SourceLocation ThenStmtLoc = Tok.getLocation();
@@ -1110,8 +1110,7 @@
     // The substatement in a selection-statement (each substatement, in the else
     // form of the if statement) implicitly defines a local scope.
     //
-    ParseScope InnerScope(this, Scope::DeclScope,
-                          C99orCXX && Tok.isNot(tok::l_brace));
+    ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
 
     ElseStmt = ParseStatement();
 
@@ -1175,7 +1174,7 @@
   // while, for, and switch statements are local to the if, while, for, or
   // switch statement (including the controlled statement).
   //
-  unsigned ScopeFlags = Scope::BreakScope | Scope::SwitchScope;
+  unsigned ScopeFlags = Scope::SwitchScope;
   if (C99orCXX)
     ScopeFlags |= Scope::DeclScope | Scope::ControlScope;
   ParseScope SwitchScope(this, ScopeFlags);
@@ -1213,8 +1212,8 @@
   // See comments in ParseIfStatement for why we create a scope for the
   // condition and a new scope for substatement in C++.
   //
-  ParseScope InnerScope(this, Scope::DeclScope,
-                        C99orCXX && Tok.isNot(tok::l_brace));
+  getCurScope()->AddFlags(Scope::BreakScope);
+  ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
 
   // Read the body statement.
   StmtResult Body(ParseStatement(TrailingElseLoc));
@@ -1280,7 +1279,7 @@
 
   FullExprArg FullCond(Actions.MakeFullExpr(Cond.get(), WhileLoc));
 
-  // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+  // C99 6.8.5p5 - In C99, the body of the while statement is a scope, even if
   // there is no compound stmt.  C90 does not have this clause.  We only do this
   // if the body isn't a compound statement to avoid push/pop in common cases.
   //
@@ -1291,8 +1290,7 @@
   // See comments in ParseIfStatement for why we create a scope for the
   // condition and a new scope for substatement in C++.
   //
-  ParseScope InnerScope(this, Scope::DeclScope,
-                        C99orCXX && Tok.isNot(tok::l_brace));
+  ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
 
   // Read the body statement.
   StmtResult Body(ParseStatement(TrailingElseLoc));
@@ -1325,7 +1323,7 @@
 
   ParseScope DoScope(this, ScopeFlags);
 
-  // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+  // C99 6.8.5p5 - In C99, the body of the do statement is a scope, even if
   // there is no compound stmt.  C90 does not have this clause. We only do this
   // if the body isn't a compound statement to avoid push/pop in common cases.
   //
@@ -1333,9 +1331,8 @@
   // The substatement in an iteration-statement implicitly defines a local scope
   // which is entered and exited each time through the loop.
   //
-  ParseScope InnerScope(this, Scope::DeclScope,
-                        (getLangOpts().C99 || getLangOpts().CPlusPlus) &&
-                        Tok.isNot(tok::l_brace));
+  bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
+  ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
 
   // Read the body statement.
   StmtResult Body(ParseStatement());
@@ -1346,7 +1343,7 @@
   if (Tok.isNot(tok::kw_while)) {
     if (!Body.isInvalid()) {
       Diag(Tok, diag::err_expected_while);
-      Diag(DoLoc, diag::note_matching) << "do";
+      Diag(DoLoc, diag::note_matching) << "'do'";
       SkipUntil(tok::semi, StopBeforeMatch);
     }
     return StmtError();
@@ -1424,12 +1421,9 @@
   // Names declared in the for-init-statement are in the same declarative-region
   // as those declared in the condition.
   //
-  unsigned ScopeFlags;
+  unsigned ScopeFlags = 0;
   if (C99orCXXorObjC)
-    ScopeFlags = Scope::BreakScope | Scope::ContinueScope |
-                 Scope::DeclScope  | Scope::ControlScope;
-  else
-    ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+    ScopeFlags = Scope::DeclScope | Scope::ControlScope;
 
   ParseScope ForScope(this, ScopeFlags);
 
@@ -1544,6 +1538,9 @@
       }
     }
   }
+
+  // Parse the second part of the for specifier.
+  getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
   if (!ForEach && !ForRange) {
     assert(!SecondPart.get() && "Shouldn't have a second expression yet.");
     // Parse the second part of the for specifier.
@@ -1611,7 +1608,7 @@
                                                      T.getCloseLocation());
   }
 
-  // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+  // C99 6.8.5p5 - In C99, the body of the for statement is a scope, even if
   // there is no compound stmt.  C90 does not have this clause.  We only do this
   // if the body isn't a compound statement to avoid push/pop in common cases.
   //
@@ -1622,8 +1619,15 @@
   // See comments in ParseIfStatement for why we create a scope for
   // for-init-statement/condition and a new scope for substatement in C++.
   //
-  ParseScope InnerScope(this, Scope::DeclScope,
-                        C99orCXXorObjC && Tok.isNot(tok::l_brace));
+  ParseScope InnerScope(this, Scope::DeclScope, C99orCXXorObjC,
+                        Tok.is(tok::l_brace));
+
+  // The body of the for loop has the same local mangling number as the
+  // for-init-statement.
+  // It will only be incremented if the body contains other things that would
+  // normally increment the mangling number (like a compound statement).
+  if (C99orCXXorObjC)
+    getCurScope()->decrementMSLocalManglingNumber();
 
   // Read the body statement.
   StmtResult Body(ParseStatement(TrailingElseLoc));
@@ -1677,7 +1681,7 @@
     }
     Res = Actions.ActOnIndirectGotoStmt(GotoLoc, StarLoc, R.take());
   } else {
-    Diag(Tok, diag::err_expected_ident);
+    Diag(Tok, diag::err_expected) << tok::identifier;
     return StmtError();
   }
 
@@ -1730,8 +1734,8 @@
           << R.get()->getSourceRange();
     } else
         R = ParseExpression();
-    if (R.isInvalid()) {  // Skip to the semicolon, but don't consume it.
-      SkipUntil(tok::semi, StopBeforeMatch);
+    if (R.isInvalid()) {
+      SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
       return StmtError();
     }
   }
@@ -1762,7 +1766,7 @@
 
     void *LookupInlineAsmIdentifier(StringRef &LineBuf,
                                     InlineAsmIdentifierInfo &Info,
-                                    bool IsUnevaluatedContext) {
+                                    bool IsUnevaluatedContext) override {
       // Collect the desired tokens.
       SmallVector<Token, 16> LineToks;
       const Token *FirstOrigToken = 0;
@@ -1802,7 +1806,7 @@
     }
 
     bool LookupInlineAsmField(StringRef Base, StringRef Member,
-                              unsigned &Offset) {
+                              unsigned &Offset) override {
       return TheParser.getActions().LookupInlineAsmField(Base, Member,
                                                          Offset, AsmLoc);
     }
@@ -1920,30 +1924,33 @@
                                     TemplateKWLoc,
                                     Id);
 
-  // If we've run into the poison token we inserted before, or there
-  // was a parsing error, then claim the entire line.
-  if (Invalid || Tok.is(EndOfStream)) {
-    NumLineToksConsumed = LineToks.size() - 2;
-
-    // Otherwise, claim up to the start of the next token.
+  // Figure out how many tokens we are into LineToks.
+  unsigned LineIndex = 0;
+  if (Tok.is(EndOfStream)) {
+    LineIndex = LineToks.size() - 2;
   } else {
-    // Figure out how many tokens we are into LineToks.
-    unsigned LineIndex = 0;
     while (LineToks[LineIndex].getLocation() != Tok.getLocation()) {
       LineIndex++;
       assert(LineIndex < LineToks.size() - 2); // we added two extra tokens
     }
+  }
 
+  // If we've run into the poison token we inserted before, or there
+  // was a parsing error, then claim the entire line.
+  if (Invalid || Tok.is(EndOfStream)) {
+    NumLineToksConsumed = LineToks.size() - 2;
+  } else {
+    // Otherwise, claim up to the start of the next token.
     NumLineToksConsumed = LineIndex;
   }
-      
-  // Finally, restore the old parsing state by consuming all the
-  // tokens we staged before, implicitly killing off the
-  // token-lexer we pushed.
-  for (unsigned n = LineToks.size() - 2 - NumLineToksConsumed; n != 0; --n) {
+
+  // Finally, restore the old parsing state by consuming all the tokens we
+  // staged before, implicitly killing off the token-lexer we pushed.
+  for (unsigned i = 0, e = LineToks.size() - LineIndex - 2; i != e; ++i) {
     ConsumeAnyToken();
   }
-  ConsumeToken(EndOfStream);
+  assert(Tok.is(EndOfStream));
+  ConsumeToken();
 
   // Leave LineToks in its original state.
   LineToks.pop_back();
@@ -2058,7 +2065,7 @@
   SourceLocation TokLoc = Tok.getLocation();
   do {
     // If we hit EOF, we're done, period.
-    if (Tok.is(tok::eof))
+    if (isEofOrEom())
       break;
 
     if (!InAsmComment && Tok.is(tok::semi)) {
@@ -2112,12 +2119,12 @@
 
   if (InBraces && BraceCount != savedBraceCount) {
     // __asm without closing brace (this can happen at EOF).
-    Diag(Tok, diag::err_expected_rbrace);
-    Diag(LBraceLoc, diag::note_matching) << "{";
+    Diag(Tok, diag::err_expected) << tok::r_brace;
+    Diag(LBraceLoc, diag::note_matching) << tok::l_brace;
     return StmtError();
   } else if (NumTokensRead == 0) {
     // Empty __asm.
-    Diag(Tok, diag::err_expected_lbrace);
+    Diag(Tok, diag::err_expected) << tok::l_brace;
     return StmtError();
   }
 
@@ -2127,7 +2134,7 @@
   SmallVector<StringRef, 4> ClobberRefs;
 
   // We need an actual supported target.
-  llvm::Triple TheTriple = Actions.Context.getTargetInfo().getTriple();
+  const llvm::Triple &TheTriple = Actions.Context.getTargetInfo().getTriple();
   llvm::Triple::ArchType ArchTy = TheTriple.getArch();
   const std::string &TT = TheTriple.getTriple();
   const llvm::Target *TheTarget = 0;
@@ -2156,13 +2163,13 @@
   if (buildMSAsmString(PP, AsmLoc, AsmToks, TokOffsets, AsmString))
     return StmtError();
 
-  OwningPtr<llvm::MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TT));
-  OwningPtr<llvm::MCAsmInfo> MAI(TheTarget->createMCAsmInfo(*MRI, TT));
+  std::unique_ptr<llvm::MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TT));
+  std::unique_ptr<llvm::MCAsmInfo> MAI(TheTarget->createMCAsmInfo(*MRI, TT));
   // Get the instruction descriptor.
-  const llvm::MCInstrInfo *MII = TheTarget->createMCInstrInfo(); 
-  OwningPtr<llvm::MCObjectFileInfo> MOFI(new llvm::MCObjectFileInfo());
-  OwningPtr<llvm::MCSubtargetInfo>
-    STI(TheTarget->createMCSubtargetInfo(TT, "", ""));
+  const llvm::MCInstrInfo *MII = TheTarget->createMCInstrInfo();
+  std::unique_ptr<llvm::MCObjectFileInfo> MOFI(new llvm::MCObjectFileInfo());
+  std::unique_ptr<llvm::MCSubtargetInfo> STI(
+      TheTarget->createMCSubtargetInfo(TT, "", ""));
 
   llvm::SourceMgr TempSrcMgr;
   llvm::MCContext Ctx(MAI.get(), MRI.get(), MOFI.get(), &TempSrcMgr);
@@ -2172,11 +2179,11 @@
   // Tell SrcMgr about this buffer, which is what the parser will pick up.
   TempSrcMgr.AddNewSourceBuffer(Buffer, llvm::SMLoc());
 
-  OwningPtr<llvm::MCStreamer> Str(createNullStreamer(Ctx));
-  OwningPtr<llvm::MCAsmParser>
-    Parser(createMCAsmParser(TempSrcMgr, Ctx, *Str.get(), *MAI));
-  OwningPtr<llvm::MCTargetAsmParser>
-    TargetParser(TheTarget->createMCAsmParser(*STI, *Parser, *MII));
+  std::unique_ptr<llvm::MCStreamer> Str(createNullStreamer(Ctx));
+  std::unique_ptr<llvm::MCAsmParser> Parser(
+      createMCAsmParser(TempSrcMgr, Ctx, *Str.get(), *MAI));
+  std::unique_ptr<llvm::MCTargetAsmParser> TargetParser(
+      TheTarget->createMCAsmParser(*STI, *Parser, *MII));
 
   llvm::MCInstPrinter *IP =
     TheTarget->createMCInstPrinter(1, *MAI, *MII, *MRI, *STI);
@@ -2204,6 +2211,11 @@
                                Clobbers, MII, IP, Callback))
     return StmtError();
 
+  // Filter out "fpsw".  Clang doesn't accept it, and it always lists flags and
+  // fpsr as clobbers.
+  auto End = std::remove(Clobbers.begin(), Clobbers.end(), "fpsw");
+  Clobbers.erase(End, Clobbers.end());
+
   // Build the vector of clobber StringRefs.
   unsigned NumClobbers = Clobbers.size();
   ClobberRefs.resize(NumClobbers);
@@ -2357,8 +2369,8 @@
 
         Clobbers.push_back(Clobber.release());
 
-        if (Tok.isNot(tok::comma)) break;
-        ConsumeToken();
+        if (!TryConsumeToken(tok::comma))
+          break;
       }
     }
   }
@@ -2397,7 +2409,7 @@
       T.consumeOpen();
 
       if (Tok.isNot(tok::identifier)) {
-        Diag(Tok, diag::err_expected_ident);
+        Diag(Tok, diag::err_expected) << tok::identifier;
         SkipUntil(tok::r_paren, StopAtSemi);
         return true;
       }
@@ -2434,8 +2446,8 @@
     }
     Exprs.push_back(Res.release());
     // Eat the comma and continue parsing if it exists.
-    if (Tok.isNot(tok::comma)) return false;
-    ConsumeToken();
+    if (!TryConsumeToken(tok::comma))
+      return false;
   }
 }
 
@@ -2554,11 +2566,11 @@
 ///
 ///       [Borland] try-block:
 ///         'try' compound-statement seh-except-block
-///         'try' compound-statment  seh-finally-block
+///         'try' compound-statement seh-finally-block
 ///
 StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry) {
   if (Tok.isNot(tok::l_brace))
-    return StmtError(Diag(Tok, diag::err_expected_lbrace));
+    return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
   // FIXME: Possible draft standard bug: attribute-specifier should be allowed?
 
   StmtResult TryBlock(ParseCompoundStatement(/*isStmtExpr=*/false,
@@ -2629,7 +2641,7 @@
   SourceLocation CatchLoc = ConsumeToken();
 
   BalancedDelimiterTracker T(*this, tok::l_paren);
-  if (T.expectAndConsume(diag::err_expected_lparen))
+  if (T.expectAndConsume())
     return StmtError();
 
   // C++ 3.3.2p3:
@@ -2662,7 +2674,7 @@
     return StmtError();
 
   if (Tok.isNot(tok::l_brace))
-    return StmtError(Diag(Tok, diag::err_expected_lbrace));
+    return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
 
   // FIXME: Possible draft standard bug: attribute-specifier should be allowed?
   StmtResult Block(ParseCompoundStatement());
@@ -2683,7 +2695,7 @@
   // inside these braces escaping to the surrounding code.
   if (Result.Behavior == IEB_Dependent) {
     if (!Tok.is(tok::l_brace)) {
-      Diag(Tok, diag::err_expected_lbrace);
+      Diag(Tok, diag::err_expected) << tok::l_brace;
       return;
     }
 
@@ -2703,7 +2715,7 @@
 
   BalancedDelimiterTracker Braces(*this, tok::l_brace);
   if (Braces.consumeOpen()) {
-    Diag(Tok, diag::err_expected_lbrace);
+    Diag(Tok, diag::err_expected) << tok::l_brace;
     return;
   }
 
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index 076edb9..777fd01 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -101,15 +101,11 @@
   do {
     // Consume the 'export', if any.
     SourceLocation ExportLoc;
-    if (Tok.is(tok::kw_export)) {
-      ExportLoc = ConsumeToken();
-    }
+    TryConsumeToken(tok::kw_export, ExportLoc);
 
     // Consume the 'template', which should be here.
     SourceLocation TemplateLoc;
-    if (Tok.is(tok::kw_template)) {
-      TemplateLoc = ConsumeToken();
-    } else {
+    if (!TryConsumeToken(tok::kw_template, TemplateLoc)) {
       Diag(Tok.getLocation(), diag::err_expected_template);
       return 0;
     }
@@ -121,8 +117,7 @@
                                 TemplateParams, LAngleLoc, RAngleLoc)) {
       // Skip until the semi-colon or a }.
       SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
-      if (Tok.is(tok::semi))
-        ConsumeToken();
+      TryConsumeToken(tok::semi);
       return 0;
     }
 
@@ -302,11 +297,10 @@
                                      SourceLocation &LAngleLoc,
                                      SourceLocation &RAngleLoc) {
   // Get the template parameter list.
-  if (!Tok.is(tok::less)) {
+  if (!TryConsumeToken(tok::less, LAngleLoc)) {
     Diag(Tok.getLocation(), diag::err_expected_less_after) << "template";
     return true;
   }
-  LAngleLoc = ConsumeToken();
 
   // Try to parse the template parameter list.
   bool Failed = false;
@@ -322,10 +316,8 @@
     Tok.setKind(tok::greater);
     RAngleLoc = Tok.getLocation();
     Tok.setLocation(Tok.getLocation().getLocWithOffset(1));
-  } else if (Tok.is(tok::greater))
-    RAngleLoc = ConsumeToken();
-  else if (Failed) {
-    Diag(Tok.getLocation(), diag::err_expected_greater);
+  } else if (!TryConsumeToken(tok::greater, RAngleLoc) && Failed) {
+    Diag(Tok.getLocation(), diag::err_expected) << tok::greater;
     return true;
   }
   return false;
@@ -483,10 +475,8 @@
   // Grab the ellipsis (if given).
   bool Ellipsis = false;
   SourceLocation EllipsisLoc;
-  if (Tok.is(tok::ellipsis)) {
+  if (TryConsumeToken(tok::ellipsis, EllipsisLoc)) {
     Ellipsis = true;
-    EllipsisLoc = ConsumeToken();
-
     Diag(EllipsisLoc,
          getLangOpts().CPlusPlus11
            ? diag::warn_cxx98_compat_variadic_templates
@@ -504,7 +494,7 @@
     // Unnamed template parameter. Don't have to do anything here, just
     // don't consume this token.
   } else {
-    Diag(Tok.getLocation(), diag::err_expected_ident);
+    Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
     return 0;
   }
 
@@ -513,11 +503,9 @@
   // we introduce the type parameter into the local scope.
   SourceLocation EqualLoc;
   ParsedType DefaultArg;
-  if (Tok.is(tok::equal)) {
-    EqualLoc = ConsumeToken();
+  if (TryConsumeToken(tok::equal, EqualLoc))
     DefaultArg = ParseTypeName(/*Range=*/0,
                                Declarator::TemplateTypeArgContext).get();
-  }
 
   return Actions.ActOnTypeParameter(getCurScope(), TypenameKeyword, Ellipsis, 
                                     EllipsisLoc, KeyLoc, ParamName, NameLoc,
@@ -552,7 +540,7 @@
   // identifier, comma, or greater. Provide a fixit if the identifier, comma,
   // or greater appear immediately or after 'typename' or 'struct'. In the
   // latter case, replace the keyword with 'class'.
-  if (!Tok.is(tok::kw_class)) {
+  if (!TryConsumeToken(tok::kw_class)) {
     bool Replace = Tok.is(tok::kw_typename) || Tok.is(tok::kw_struct);
     const Token& Next = Replace ? NextToken() : Tok;
     if (Next.is(tok::identifier) || Next.is(tok::comma) ||
@@ -566,19 +554,15 @@
 
     if (Replace)
       ConsumeToken();
-  } else
-    ConsumeToken();
+  }
 
   // Parse the ellipsis, if given.
   SourceLocation EllipsisLoc;
-  if (Tok.is(tok::ellipsis)) {
-    EllipsisLoc = ConsumeToken();
-    
+  if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
     Diag(EllipsisLoc,
          getLangOpts().CPlusPlus11
            ? diag::warn_cxx98_compat_variadic_templates
            : diag::ext_variadic_templates);
-  }
       
   // Get the identifier, if given.
   SourceLocation NameLoc;
@@ -591,7 +575,7 @@
     // Unnamed template parameter. Don't have to do anything here, just
     // don't consume this token.
   } else {
-    Diag(Tok.getLocation(), diag::err_expected_ident);
+    Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
     return 0;
   }
 
@@ -607,8 +591,7 @@
   // we introduce the template parameter into the local scope.
   SourceLocation EqualLoc;
   ParsedTemplateArgument DefaultArg;
-  if (Tok.is(tok::equal)) {
-    EqualLoc = ConsumeToken();
+  if (TryConsumeToken(tok::equal, EqualLoc)) {
     DefaultArg = ParseTemplateTemplateArgument();
     if (DefaultArg.isInvalid()) {
       Diag(Tok.getLocation(), 
@@ -651,9 +634,7 @@
   // we introduce the template parameter into the local scope.
   SourceLocation EqualLoc;
   ExprResult DefaultArg;
-  if (Tok.is(tok::equal)) {
-    EqualLoc = ConsumeToken();
-
+  if (TryConsumeToken(tok::equal, EqualLoc)) {
     // C++ [temp.param]p15:
     //   When parsing a default template-argument for a non-type
     //   template-parameter, the first non-nested > is taken as the
@@ -692,7 +673,7 @@
 
   switch (Tok.getKind()) {
   default:
-    Diag(Tok.getLocation(), diag::err_expected_greater);
+    Diag(Tok.getLocation(), diag::err_expected) << tok::greater;
     return true;
 
   case tok::greater:
@@ -900,8 +881,7 @@
   if (Invalid) {
     // If we failed to parse the template ID but skipped ahead to a >, we're not
     // going to be able to form a token annotation.  Eat the '>' if present.
-    if (Tok.is(tok::greater))
-      ConsumeToken();
+    TryConsumeToken(tok::greater);
     return true;
   }
 
@@ -916,8 +896,7 @@
     if (Type.isInvalid()) {
       // If we failed to parse the template ID but skipped ahead to a >, we're not
       // going to be able to form a token annotation.  Eat the '>' if present.
-      if (Tok.is(tok::greater))
-        ConsumeToken();
+      TryConsumeToken(tok::greater);
       return true;
     }
 
@@ -1044,11 +1023,9 @@
       UnqualifiedId Name;
       Name.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
       ConsumeToken(); // the identifier
-      
-      // Parse the ellipsis.
-      if (Tok.is(tok::ellipsis))
-        EllipsisLoc = ConsumeToken();
-      
+
+      TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
       // If the next token signals the end of a template argument,
       // then we have a dependent template name that could be a template
       // template argument.
@@ -1067,10 +1044,8 @@
     UnqualifiedId Name;
     Name.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
     ConsumeToken(); // the identifier
-    
-    // Parse the ellipsis.
-    if (Tok.is(tok::ellipsis))
-      EllipsisLoc = ConsumeToken();
+
+    TryConsumeToken(tok::ellipsis, EllipsisLoc);
 
     if (isEndOfTemplateArgument(Tok)) {
       bool MemberOfUnknownSpecialization;
@@ -1161,9 +1136,8 @@
   }
   
   // '<'
-  if (!Tok.is(tok::less))
+  if (!TryConsumeToken(tok::less))
     return false;
-  ConsumeToken();
 
   // An empty template argument list.
   if (Tok.is(tok::greater))
@@ -1188,12 +1162,11 @@
   // Template argument lists are constant-evaluation contexts.
   EnterExpressionEvaluationContext EvalContext(Actions,Sema::ConstantEvaluated);
 
-  while (true) {
+  do {
     ParsedTemplateArgument Arg = ParseTemplateArgument();
-    if (Tok.is(tok::ellipsis)) {
-      SourceLocation EllipsisLoc  = ConsumeToken();
+    SourceLocation EllipsisLoc;
+    if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
       Arg = Actions.ActOnPackExpansion(Arg, EllipsisLoc);
-    }
 
     if (Arg.isInvalid()) {
       SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
@@ -1205,11 +1178,7 @@
       
     // If the next token is a comma, consume it and keep reading
     // arguments.
-    if (Tok.isNot(tok::comma)) break;
-
-    // Consume the comma.
-    ConsumeToken();
-  }
+  } while (TryConsumeToken(tok::comma));
 
   return false;
 }
@@ -1258,9 +1227,7 @@
      return;
 
   // Get the FunctionDecl.
-  FunctionTemplateDecl *FunTmplD = dyn_cast<FunctionTemplateDecl>(LPT.D);
-  FunctionDecl *FunD =
-      FunTmplD ? FunTmplD->getTemplatedDecl() : cast<FunctionDecl>(LPT.D);
+  FunctionDecl *FunD = LPT.D->getAsFunction();
   // Track template parameter depth.
   TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
 
@@ -1304,9 +1271,11 @@
       new ParseScope(this, Scope::TemplateParamScope));
 
   DeclaratorDecl *Declarator = dyn_cast<DeclaratorDecl>(FunD);
-  if (Declarator && Declarator->getNumTemplateParameterLists() != 0) {
+  const unsigned DeclaratorNumTemplateParameterLists = 
+      (Declarator ? Declarator->getNumTemplateParameterLists() : 0);
+  if (Declarator && DeclaratorNumTemplateParameterLists != 0) {
     Actions.ActOnReenterDeclaratorTemplateScope(getCurScope(), Declarator);
-    ++CurTemplateDepthTracker;
+    CurTemplateDepthTracker.addDepth(DeclaratorNumTemplateParameterLists);
   }
   Actions.ActOnReenterTemplateScope(getCurScope(), LPT.D);
   ++CurTemplateDepthTracker;
@@ -1341,8 +1310,10 @@
       Actions.ActOnDefaultCtorInitializers(LPT.D);
 
     if (Tok.is(tok::l_brace)) {
-      assert((!FunTmplD || FunTmplD->getTemplateParameters()->getDepth() <
-                               TemplateParameterDepth) &&
+      assert((!isa<FunctionTemplateDecl>(LPT.D) ||
+              cast<FunctionTemplateDecl>(LPT.D)
+                      ->getTemplateParameters()
+                      ->getDepth() < TemplateParameterDepth) &&
              "TemplateParameterDepth should be greater than the depth of "
              "current template being instantiated!");
       ParseFunctionStatementBody(LPT.D, FnScope);
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index a1d6b13..330e541 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -320,9 +320,8 @@
       return TPResult::True();
     }
 
-    if (Tok.isNot(tok::comma))
+    if (!TryConsumeToken(tok::comma))
       break;
-    ConsumeToken(); // the comma.
   }
 
   return TPResult::Ambiguous();
@@ -595,13 +594,10 @@
       }
     }
 
-    if (Tok.is(tok::ellipsis))
-      ConsumeToken();
+    TryConsumeToken(tok::ellipsis);
 
-    if (Tok.isNot(tok::comma))
+    if (!TryConsumeToken(tok::comma))
       break;
-
-    ConsumeToken();
   }
 
   // An attribute must end ']]'.
@@ -935,7 +931,6 @@
   case tok::kw___alignof:
   case tok::kw___builtin_choose_expr:
   case tok::kw___builtin_offsetof:
-  case tok::kw___builtin_types_compatible_p:
   case tok::kw___builtin_va_arg:
   case tok::kw___imag:
   case tok::kw___real:
@@ -943,33 +938,10 @@
   case tok::kw___FUNCDNAME__:
   case tok::kw_L__FUNCTION__:
   case tok::kw___PRETTY_FUNCTION__:
-  case tok::kw___has_nothrow_assign:
-  case tok::kw___has_nothrow_copy:
-  case tok::kw___has_nothrow_constructor:
-  case tok::kw___has_trivial_assign:
-  case tok::kw___has_trivial_copy:
-  case tok::kw___has_trivial_constructor:
-  case tok::kw___has_trivial_destructor:
-  case tok::kw___has_virtual_destructor:
-  case tok::kw___is_abstract:
-  case tok::kw___is_base_of:
-  case tok::kw___is_class:
-  case tok::kw___is_convertible_to:
-  case tok::kw___is_empty:
-  case tok::kw___is_enum:
-  case tok::kw___is_interface_class:
-  case tok::kw___is_final:
-  case tok::kw___is_literal:
-  case tok::kw___is_literal_type:
-  case tok::kw___is_pod:
-  case tok::kw___is_polymorphic:
-  case tok::kw___is_sealed:
-  case tok::kw___is_trivial:
-  case tok::kw___is_trivially_assignable:
-  case tok::kw___is_trivially_constructible:
-  case tok::kw___is_trivially_copyable:
-  case tok::kw___is_union:
   case tok::kw___uuidof:
+#define TYPE_TRAIT(N,Spelling,K) \
+  case tok::kw_##Spelling:
+#include "clang/Basic/TokenKinds.def"
     return TPResult::True();
       
   // Obviously starts a type-specifier-seq:
@@ -1015,14 +987,6 @@
   case tok::kw___vector:
   case tok::kw___pixel:
   case tok::kw__Atomic:
-  case tok::kw_image1d_t:
-  case tok::kw_image1d_array_t:
-  case tok::kw_image1d_buffer_t:
-  case tok::kw_image2d_t:
-  case tok::kw_image2d_array_t:
-  case tok::kw_image3d_t:
-  case tok::kw_sampler_t:
-  case tok::kw_event_t:
   case tok::kw___unknown_anytype:
     return TPResult::False();
 
@@ -1756,9 +1720,8 @@
         return TPResult::False();
     }
 
-    if (Tok.isNot(tok::comma))
+    if (!TryConsumeToken(tok::comma))
       break;
-    ConsumeToken(); // the comma.
   }
 
   return TPResult::Ambiguous();
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index 457dd36..9936a5a 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -12,9 +12,9 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Parse/Parser.h"
-#include "ParsePragma.h"
 #include "RAIIObjectsForParser.h"
 #include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/Parse/ParseDiagnostic.h"
 #include "clang/Sema/DeclSpec.h"
@@ -33,7 +33,7 @@
 public:
   explicit ActionCommentHandler(Sema &S) : S(S) { }
 
-  virtual bool HandleComment(Preprocessor &PP, SourceRange Comment) {
+  bool HandleComment(Preprocessor &PP, SourceRange Comment) override {
     S.ActOnComment(Comment);
     return false;
   }
@@ -63,51 +63,7 @@
 
   // Add #pragma handlers. These are removed and destroyed in the
   // destructor.
-  AlignHandler.reset(new PragmaAlignHandler());
-  PP.AddPragmaHandler(AlignHandler.get());
-
-  GCCVisibilityHandler.reset(new PragmaGCCVisibilityHandler());
-  PP.AddPragmaHandler("GCC", GCCVisibilityHandler.get());
-
-  OptionsHandler.reset(new PragmaOptionsHandler());
-  PP.AddPragmaHandler(OptionsHandler.get());
-
-  PackHandler.reset(new PragmaPackHandler());
-  PP.AddPragmaHandler(PackHandler.get());
-    
-  MSStructHandler.reset(new PragmaMSStructHandler());
-  PP.AddPragmaHandler(MSStructHandler.get());
-
-  UnusedHandler.reset(new PragmaUnusedHandler());
-  PP.AddPragmaHandler(UnusedHandler.get());
-
-  WeakHandler.reset(new PragmaWeakHandler());
-  PP.AddPragmaHandler(WeakHandler.get());
-
-  RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler());
-  PP.AddPragmaHandler(RedefineExtnameHandler.get());
-
-  FPContractHandler.reset(new PragmaFPContractHandler());
-  PP.AddPragmaHandler("STDC", FPContractHandler.get());
-
-  if (getLangOpts().OpenCL) {
-    OpenCLExtensionHandler.reset(new PragmaOpenCLExtensionHandler());
-    PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
-
-    PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
-  }
-  if (getLangOpts().OpenMP)
-    OpenMPHandler.reset(new PragmaOpenMPHandler());
-  else
-    OpenMPHandler.reset(new PragmaNoOpenMPHandler());
-  PP.AddPragmaHandler(OpenMPHandler.get());
-
-  if (getLangOpts().MicrosoftExt) {
-    MSCommentHandler.reset(new PragmaCommentHandler(actions));
-    PP.AddPragmaHandler(MSCommentHandler.get());
-    MSDetectMismatchHandler.reset(new PragmaDetectMismatchHandler(actions));
-    PP.AddPragmaHandler(MSDetectMismatchHandler.get());
-  }
+  initializePragmaHandlers();
 
   CommentSemaHandler.reset(new ActionCommentHandler(actions));
   PP.addCommentHandler(CommentSemaHandler.get());
@@ -152,14 +108,8 @@
   }
 }
 
-/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
-/// input.  If so, it is consumed and false is returned.
-///
-/// If the input is malformed, this emits the specified diagnostic.  Next, if
-/// SkipToTok is specified, it calls SkipUntil(SkipToTok).  Finally, true is
-/// returned.
 bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
-                              const char *Msg, tok::TokenKind SkipToTok) {
+                              const char *Msg) {
   if (Tok.is(ExpectedTok) || Tok.is(tok::code_completion)) {
     ConsumeAnyToken();
     return false;
@@ -168,35 +118,48 @@
   // Detect common single-character typos and resume.
   if (IsCommonTypo(ExpectedTok, Tok)) {
     SourceLocation Loc = Tok.getLocation();
-    Diag(Loc, DiagID)
-      << Msg
-      << FixItHint::CreateReplacement(SourceRange(Loc),
-                                      getTokenSimpleSpelling(ExpectedTok));
-    ConsumeAnyToken();
+    {
+      DiagnosticBuilder DB = Diag(Loc, DiagID);
+      DB << FixItHint::CreateReplacement(
+                SourceRange(Loc), tok::getPunctuatorSpelling(ExpectedTok));
+      if (DiagID == diag::err_expected)
+        DB << ExpectedTok;
+      else if (DiagID == diag::err_expected_after)
+        DB << Msg << ExpectedTok;
+      else
+        DB << Msg;
+    }
 
     // Pretend there wasn't a problem.
+    ConsumeAnyToken();
     return false;
   }
 
-  const char *Spelling = 0;
   SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
-  if (EndLoc.isValid() &&
-      (Spelling = tok::getTokenSimpleSpelling(ExpectedTok))) {
-    // Show what code to insert to fix this problem.
-    Diag(EndLoc, DiagID)
-      << Msg
-      << FixItHint::CreateInsertion(EndLoc, Spelling);
-  } else
-    Diag(Tok, DiagID) << Msg;
+  const char *Spelling = 0;
+  if (EndLoc.isValid())
+    Spelling = tok::getPunctuatorSpelling(ExpectedTok);
 
-  if (SkipToTok != tok::unknown)
-    SkipUntil(SkipToTok, StopAtSemi);
+  DiagnosticBuilder DB =
+      Spelling
+          ? Diag(EndLoc, DiagID) << FixItHint::CreateInsertion(EndLoc, Spelling)
+          : Diag(Tok, DiagID);
+  if (DiagID == diag::err_expected)
+    DB << ExpectedTok;
+  else if (DiagID == diag::err_expected_after)
+    DB << Msg << ExpectedTok;
+  else
+    DB << Msg;
+
   return true;
 }
 
 bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
-  if (Tok.is(tok::semi) || Tok.is(tok::code_completion)) {
-    ConsumeToken();
+  if (TryConsumeToken(tok::semi))
+    return false;
+
+  if (Tok.is(tok::code_completion)) {
+    handleUnexpectedCodeCompletionToken();
     return false;
   }
   
@@ -241,7 +204,8 @@
 
   if (Kind != AfterMemberFunctionDefinition || HadMultipleSemis)
     Diag(StartLoc, diag::ext_extra_semi)
-        << Kind << DeclSpec::getSpecifierName((DeclSpec::TST)TST)
+        << Kind << DeclSpec::getSpecifierName((DeclSpec::TST)TST,
+                                    Actions.getASTContext().getPrintingPolicy())
         << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
   else
     // A single semicolon is valid after a member function definition.
@@ -288,7 +252,7 @@
     if (Toks.size() == 1 && Toks[0] == tok::eof &&
         !HasFlagsSet(Flags, StopAtSemi) &&
         !HasFlagsSet(Flags, StopAtCodeCompletion)) {
-      while (Tok.getKind() != tok::eof)
+      while (Tok.isNot(tok::eof))
         ConsumeAnyToken();
       return true;
     }
@@ -297,10 +261,20 @@
     case tok::eof:
       // Ran out of tokens.
       return false;
-        
+
+    case tok::annot_pragma_openmp_end:
+      // Stop before an OpenMP pragma boundary.
+    case tok::annot_module_begin:
+    case tok::annot_module_end:
+    case tok::annot_module_include:
+      // Stop before we change submodules. They generally indicate a "good"
+      // place to pick up parsing again (except in the special case where
+      // we're trying to skip to EOF).
+      return false;
+
     case tok::code_completion:
       if (!HasFlagsSet(Flags, StopAtCodeCompletion))
-        ConsumeToken();
+        handleUnexpectedCodeCompletionToken();
       return false;
         
     case tok::l_paren:
@@ -435,41 +409,7 @@
   for (unsigned i = 0, e = NumCachedScopes; i != e; ++i)
     delete ScopeCache[i];
 
-  // Remove the pragma handlers we installed.
-  PP.RemovePragmaHandler(AlignHandler.get());
-  AlignHandler.reset();
-  PP.RemovePragmaHandler("GCC", GCCVisibilityHandler.get());
-  GCCVisibilityHandler.reset();
-  PP.RemovePragmaHandler(OptionsHandler.get());
-  OptionsHandler.reset();
-  PP.RemovePragmaHandler(PackHandler.get());
-  PackHandler.reset();
-  PP.RemovePragmaHandler(MSStructHandler.get());
-  MSStructHandler.reset();
-  PP.RemovePragmaHandler(UnusedHandler.get());
-  UnusedHandler.reset();
-  PP.RemovePragmaHandler(WeakHandler.get());
-  WeakHandler.reset();
-  PP.RemovePragmaHandler(RedefineExtnameHandler.get());
-  RedefineExtnameHandler.reset();
-
-  if (getLangOpts().OpenCL) {
-    PP.RemovePragmaHandler("OPENCL", OpenCLExtensionHandler.get());
-    OpenCLExtensionHandler.reset();
-    PP.RemovePragmaHandler("OPENCL", FPContractHandler.get());
-  }
-  PP.RemovePragmaHandler(OpenMPHandler.get());
-  OpenMPHandler.reset();
-
-  if (getLangOpts().MicrosoftExt) {
-    PP.RemovePragmaHandler(MSCommentHandler.get());
-    MSCommentHandler.reset();
-    PP.RemovePragmaHandler(MSDetectMismatchHandler.get());
-    MSDetectMismatchHandler.reset();
-  }
-
-  PP.RemovePragmaHandler("STDC", FPContractHandler.get());
-  FPContractHandler.reset();
+  resetPragmaHandlers();
 
   PP.removeCommentHandler(CommentSemaHandler.get());
 
@@ -574,7 +514,7 @@
 bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
   DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
 
-  // Skip over the EOF token, flagging end of previous input for incremental 
+  // Skip over the EOF token, flagging end of previous input for incremental
   // processing
   if (PP.isIncrementalProcessingEnabled() && Tok.is(tok::eof))
     ConsumeToken();
@@ -592,6 +532,12 @@
     ConsumeToken();
     return false;
 
+  case tok::annot_module_begin:
+  case tok::annot_module_end:
+    // FIXME: Update visibility based on the submodule we're in.
+    ConsumeToken();
+    return false;
+
   case tok::eof:
     // Late template parsing can begin.
     if (getLangOpts().DelayedTemplateParsing)
@@ -679,6 +625,12 @@
   case tok::annot_pragma_openmp:
     ParseOpenMPDeclarativeDirective();
     return DeclGroupPtrTy();
+  case tok::annot_pragma_ms_pointers_to_members:
+    HandlePragmaMSPointersToMembers();
+    return DeclGroupPtrTy();
+  case tok::annot_pragma_ms_vtordisp:
+    HandlePragmaMSVtorDisp();
+    return DeclGroupPtrTy();
   case tok::semi:
     // Either a C++11 empty-declaration or attribute-declaration.
     SingleDecl = Actions.ActOnEmptyDeclaration(getCurScope(),
@@ -706,7 +658,7 @@
     SourceLocation EndLoc;
     ExprResult Result(ParseSimpleAsm(&EndLoc));
 
-    ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+    ExpectAndConsume(tok::semi, diag::err_expected_after,
                      "top-level asm block");
 
     if (Result.isInvalid())
@@ -793,7 +745,6 @@
                   ParseExplicitInstantiation(Declarator::FileContext,
                                              ExternLoc, TemplateLoc, DeclEnd));
     }
-    // FIXME: Detect C++ linkage specifications here?
     goto dont_know;
 
   case tok::kw___if_exists:
@@ -909,7 +860,8 @@
 
     const char *PrevSpec = 0;
     unsigned DiagID;
-    if (DS.SetTypeSpecType(DeclSpec::TST_unspecified, AtLoc, PrevSpec, DiagID))
+    if (DS.SetTypeSpecType(DeclSpec::TST_unspecified, AtLoc, PrevSpec, DiagID,
+                           Actions.getASTContext().getPrintingPolicy()))
       Diag(AtLoc, DiagID) << PrevSpec;
 
     if (Tok.isObjCAtKeyword(tok::objc_protocol))
@@ -922,7 +874,7 @@
   // If the declspec consisted only of 'extern' and we have a string
   // literal following it, this must be a C++ linkage specifier like
   // 'extern "C"'.
-  if (Tok.is(tok::string_literal) && getLangOpts().CPlusPlus &&
+  if (getLangOpts().CPlusPlus && isTokenStringLiteral() &&
       DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
       DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
     Decl *TheDecl = ParseLinkage(DS, Declarator::FileContext);
@@ -949,26 +901,6 @@
   }
 }
 
-
-static inline bool isFunctionDeclaratorRequiringReturnTypeDeduction(
-    const Declarator &D) {
-  if (!D.isFunctionDeclarator() || !D.getDeclSpec().containsPlaceholderType()) 
-    return false;
-  for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) {
-    unsigned chunkIndex = E - I - 1;
-    const DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
-    if (DeclType.Kind == DeclaratorChunk::Function) {
-      const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
-      if (!FTI.hasTrailingReturnType()) 
-        return true;
-      QualType TrailingRetType = FTI.getTrailingReturnType().get();
-      return TrailingRetType->getCanonicalTypeInternal()
-        ->getContainedAutoType();
-    }
-  } 
-  return false;
-}
-
 /// ParseFunctionDefinition - We parsed and verified that the specified
 /// Declarator is well formed.  If this is a K&R-style function, read the
 /// parameters declaration-list, then start the compound-statement.
@@ -996,9 +928,11 @@
   if (getLangOpts().ImplicitInt && D.getDeclSpec().isEmpty()) {
     const char *PrevSpec;
     unsigned DiagID;
+    const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
     D.getMutableDeclSpec().SetTypeSpecType(DeclSpec::TST_int,
                                            D.getIdentifierLoc(),
-                                           PrevSpec, DiagID);
+                                           PrevSpec, DiagID,
+                                           Policy);
     D.SetRangeBegin(D.getDeclSpec().getSourceRange().getBegin());
   }
 
@@ -1029,10 +963,10 @@
   if (Tok.isNot(tok::equal)) {
     AttributeList *DtorAttrs = D.getAttributes();
     while (DtorAttrs) {
-      if (!IsThreadSafetyAttribute(DtorAttrs->getName()->getName()) &&
+      if (DtorAttrs->isKnownToGCC() &&
           !DtorAttrs->isCXX11Attribute()) {
         Diag(DtorAttrs->getLoc(), diag::warn_attribute_on_function_definition)
-          << DtorAttrs->getName()->getName();
+          << DtorAttrs->getName();
       }
       DtorAttrs = DtorAttrs->getNext();
     }
@@ -1042,8 +976,7 @@
   // tokens and store them for late parsing at the end of the translation unit.
   if (getLangOpts().DelayedTemplateParsing && Tok.isNot(tok::equal) &&
       TemplateInfo.Kind == ParsedTemplateInfo::Template &&
-      !D.getDeclSpec().isConstexprSpecified() && 
-      !isFunctionDeclaratorRequiringReturnTypeDeduction(D)) {
+      Actions.canDelayFunctionBody(D)) {
     MultiTemplateParamsArg TemplateParameterLists(*TemplateInfo.TemplateParams);
     
     ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
@@ -1059,12 +992,7 @@
     LexTemplateFunctionForLateParsing(Toks);
 
     if (DP) {
-      FunctionDecl *FnD = 0;
-      if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(DP))
-        FnD = FunTmpl->getTemplatedDecl();
-      else
-        FnD = cast<FunctionDecl>(DP);
-
+      FunctionDecl *FnD = DP->getAsFunction();
       Actions.CheckForFunctionRedefinition(FnD);
       Actions.MarkAsLateParsedTemplate(FnD, DP, Toks);
     }
@@ -1077,7 +1005,7 @@
       Actions.CurContext->isTranslationUnit()) {
     ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
     Scope *ParentScope = getCurScope()->getParent();
-    
+
     D.setFunctionDefinitionKind(FDK_Definition);
     Decl *FuncDecl = Actions.HandleDeclarator(ParentScope, D,
                                               MultiTemplateParamsArg());
@@ -1089,8 +1017,9 @@
       CurParsedObjCImpl->HasCFunction = true;
       return FuncDecl;
     }
+    // FIXME: Should we really fall through here?
   }
-      
+
   // Enter a scope for the function body.
   ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
 
@@ -1108,28 +1037,22 @@
   // safe because we're always the sole owner.
   D.getMutableDeclSpec().abort();
 
-  if (Tok.is(tok::equal)) {
+  if (TryConsumeToken(tok::equal)) {
     assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='");
-    ConsumeToken();
-
     Actions.ActOnFinishFunctionBody(Res, 0, false);
  
     bool Delete = false;
     SourceLocation KWLoc;
-    if (Tok.is(tok::kw_delete)) {
-      Diag(Tok, getLangOpts().CPlusPlus11 ?
-           diag::warn_cxx98_compat_deleted_function :
-           diag::ext_deleted_function);
-
-      KWLoc = ConsumeToken();
+    if (TryConsumeToken(tok::kw_delete, KWLoc)) {
+      Diag(KWLoc, getLangOpts().CPlusPlus11
+                      ? diag::warn_cxx98_compat_deleted_function
+                      : diag::ext_deleted_function);
       Actions.SetDeclDeleted(Res, KWLoc);
       Delete = true;
-    } else if (Tok.is(tok::kw_default)) {
-      Diag(Tok, getLangOpts().CPlusPlus11 ?
-           diag::warn_cxx98_compat_defaulted_function :
-           diag::ext_defaulted_function);
-
-      KWLoc = ConsumeToken();
+    } else if (TryConsumeToken(tok::kw_default, KWLoc)) {
+      Diag(KWLoc, getLangOpts().CPlusPlus11
+                      ? diag::warn_cxx98_compat_defaulted_function
+                      : diag::ext_defaulted_function);
       Actions.SetDeclDefaulted(Res, KWLoc);
     } else {
       llvm_unreachable("function definition after = not 'delete' or 'default'");
@@ -1139,9 +1062,9 @@
       Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
         << Delete;
       SkipUntil(tok::semi);
-    } else {
-      ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
-                       Delete ? "delete" : "default", tok::semi);
+    } else if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+                                Delete ? "delete" : "default")) {
+      SkipUntil(tok::semi);
     }
 
     return Res;
@@ -1195,9 +1118,8 @@
     // NOTE: GCC just makes this an ext-warn.  It's not clear what it does with
     // the declarations though.  It's trivial to ignore them, really hard to do
     // anything else with them.
-    if (Tok.is(tok::semi)) {
+    if (TryConsumeToken(tok::semi)) {
       Diag(DSStart, diag::err_declaration_does_not_declare_param);
-      ConsumeToken();
       continue;
     }
 
@@ -1237,20 +1159,20 @@
         for (unsigned i = 0; ; ++i) {
           // C99 6.9.1p6: those declarators shall declare only identifiers from
           // the identifier list.
-          if (i == FTI.NumArgs) {
+          if (i == FTI.NumParams) {
             Diag(ParmDeclarator.getIdentifierLoc(), diag::err_no_matching_param)
               << ParmDeclarator.getIdentifier();
             break;
           }
 
-          if (FTI.ArgInfo[i].Ident == ParmDeclarator.getIdentifier()) {
+          if (FTI.Params[i].Ident == ParmDeclarator.getIdentifier()) {
             // Reject redefinitions of parameters.
-            if (FTI.ArgInfo[i].Param) {
+            if (FTI.Params[i].Param) {
               Diag(ParmDeclarator.getIdentifierLoc(),
                    diag::err_param_redefinition)
                  << ParmDeclarator.getIdentifier();
             } else {
-              FTI.ArgInfo[i].Param = Param;
+              FTI.Params[i].Param = Param;
             }
             break;
           }
@@ -1271,12 +1193,14 @@
       ParseDeclarator(ParmDeclarator);
     }
 
-    if (ExpectAndConsumeSemi(diag::err_expected_semi_declaration)) {
-      // Skip to end of block or statement
-      SkipUntil(tok::semi);
-      if (Tok.is(tok::semi))
-        ConsumeToken();
-    }
+    // Consume ';' and continue parsing.
+    if (!ExpectAndConsumeSemi(diag::err_expected_semi_declaration))
+      continue;
+
+    // Otherwise recover by skipping to next semi or mandatory function body.
+    if (SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch))
+      break;
+    TryConsumeToken(tok::semi);
   }
 
   // The actions module must verify that all arguments were declared.
@@ -1340,16 +1264,15 @@
 
   ExprResult Result(ParseAsmStringLiteral());
 
-  if (Result.isInvalid()) {
-    SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
-    if (EndLoc)
-      *EndLoc = Tok.getLocation();
-    ConsumeAnyToken();
-  } else {
+  if (!Result.isInvalid()) {
     // Close the paren and get the location of the end bracket
     T.consumeClose();
     if (EndLoc)
       *EndLoc = T.getCloseLocation();
+  } else if (SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch)) {
+    if (EndLoc)
+      *EndLoc = Tok.getLocation();
+    ConsumeParen();
   }
 
   return Result;
@@ -1430,8 +1353,8 @@
 
   // Look up and classify the identifier. We don't perform any typo-correction
   // after a scope specifier, because in general we can't recover from typos
-  // there (eg, after correcting 'A::tempalte B<X>::C', we would need to jump
-  // back into scope specifier parsing).
+  // there (eg, after correcting 'A::tempalte B<X>::C' [sic], we would need to
+  // jump back into scope specifier parsing).
   Sema::NameClassification Classification
     = Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, Next,
                            IsAddressOfOperand, SS.isEmpty() ? CCC : 0);
@@ -1504,16 +1427,34 @@
 }
 
 bool Parser::TryKeywordIdentFallback(bool DisableKeyword) {
-  assert(Tok.isNot(tok::identifier));
+  assert(!Tok.is(tok::identifier) && !Tok.isAnnotation());
   Diag(Tok, diag::ext_keyword_as_ident)
     << PP.getSpelling(Tok)
     << DisableKeyword;
-  if (DisableKeyword)
-    Tok.getIdentifierInfo()->RevertTokenIDToIdentifier();
+  if (DisableKeyword) {
+    IdentifierInfo *II = Tok.getIdentifierInfo();
+    ContextualKeywords[II] = Tok.getKind();
+    II->RevertTokenIDToIdentifier();
+  }
   Tok.setKind(tok::identifier);
   return true;
 }
 
+bool Parser::TryIdentKeywordUpgrade() {
+  assert(Tok.is(tok::identifier));
+  const IdentifierInfo *II = Tok.getIdentifierInfo();
+  assert(II->hasRevertedTokenIDToIdentifier());
+  // If we find that this is in fact the name of a type trait,
+  // update the token kind in place and parse again to treat it as
+  // the appropriate kind of type trait.
+  llvm::SmallDenseMap<const IdentifierInfo *, tok::TokenKind>::iterator Known =
+      ContextualKeywords.find(II);
+  if (Known == ContextualKeywords.end())
+    return false;
+  Tok.setKind(Known->second);
+  return true;
+}
+
 /// TryAnnotateTypeOrScopeToken - If the current token position is on a
 /// typename (possibly qualified in C++) or a C++ scope specifier not followed
 /// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
@@ -1549,7 +1490,7 @@
     // We will consume the typedef token here and put it back after we have
     // parsed the first identifier, transforming it into something more like:
     //   typename T_::D typedef D;
-    if (getLangOpts().MicrosoftMode && NextToken().is(tok::kw_typedef)) {
+    if (getLangOpts().MSVCCompat && NextToken().is(tok::kw_typedef)) {
       Token TypedefToken;
       PP.Lex(TypedefToken);
       bool Result = TryAnnotateTypeOrScopeToken(EnteringContext, NeedType);
@@ -1601,7 +1542,8 @@
                                      Tok.getLocation());
     } else if (Tok.is(tok::annot_template_id)) {
       TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
-      if (TemplateId->Kind == TNK_Function_template) {
+      if (TemplateId->Kind != TNK_Type_template &&
+          TemplateId->Kind != TNK_Dependent_template_name) {
         Diag(Tok, diag::err_typename_refers_to_non_type_template)
           << Tok.getAnnotationRange();
         return true;
@@ -1725,8 +1667,7 @@
       // annotation token to a type annotation token now.
       AnnotateTemplateIdTokenAsType();
       return false;
-    } else if (TemplateId->Kind == TNK_Var_template)
-      return false;
+    }
   }
 
   if (SS.isEmpty())
@@ -1780,8 +1721,8 @@
   case tok::pipeequal:           // |=
   case tok::equalequal:          // ==
     Diag(Tok, diag::err_invalid_token_after_declarator_suggest_equal)
-      << getTokenSimpleSpelling(Kind)
-      << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()), "=");
+        << Kind
+        << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()), "=");
   case tok::equal:
     return true;
   }
@@ -1910,7 +1851,7 @@
   
   BalancedDelimiterTracker Braces(*this, tok::l_brace);
   if (Braces.consumeOpen()) {
-    Diag(Tok, diag::err_expected_lbrace);
+    Diag(Tok, diag::err_expected) << tok::l_brace;
     return;
   }
 
@@ -1928,14 +1869,15 @@
   }
 
   // Parse the declarations.
-  while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+  // FIXME: Support module import within __if_exists?
+  while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
     ParsedAttributesWithRange attrs(AttrFactory);
     MaybeParseCXX11Attributes(attrs);
     MaybeParseMicrosoftAttributes(attrs);
     DeclGroupPtrTy Result = ParseExternalDeclaration(attrs);
     if (Result && !getCurScope()->getParent())
       Actions.getASTConsumer().HandleTopLevelDecl(Result.get());
-  }     
+  }
   Braces.consumeClose();
 }
 
@@ -1991,17 +1933,20 @@
   P.Diag(P.Tok, diag::err_bracket_depth_exceeded)
     << P.getLangOpts().BracketDepth;
   P.Diag(P.Tok, diag::note_bracket_depth);
-  P.SkipUntil(tok::eof);
-  return true;  
+  P.cutOffParsing();
+  return true;
 }
 
 bool BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
-                                            const char *Msg,
-                                            tok::TokenKind SkipToToc ) {
+                                                const char *Msg,
+                                                tok::TokenKind SkipToTok) {
   LOpen = P.Tok.getLocation();
-  if (P.ExpectAndConsume(Kind, DiagID, Msg, SkipToToc))
+  if (P.ExpectAndConsume(Kind, DiagID, Msg)) {
+    if (SkipToTok != tok::unknown)
+      P.SkipUntil(SkipToTok, Parser::StopAtSemi);
     return true;
-  
+  }
+
   if (getDepth() < MaxDepth)
     return false;
     
@@ -2010,17 +1955,9 @@
 
 bool BalancedDelimiterTracker::diagnoseMissingClose() {
   assert(!P.Tok.is(Close) && "Should have consumed closing delimiter");
-  
-  const char *LHSName = "unknown";
-  diag::kind DID;
-  switch (Close) {
-  default: llvm_unreachable("Unexpected balanced token");
-  case tok::r_paren : LHSName = "("; DID = diag::err_expected_rparen; break;
-  case tok::r_brace : LHSName = "{"; DID = diag::err_expected_rbrace; break;
-  case tok::r_square: LHSName = "["; DID = diag::err_expected_rsquare; break;
-  }
-  P.Diag(P.Tok, DID);
-  P.Diag(LOpen, diag::note_matching) << LHSName;
+
+  P.Diag(P.Tok, diag::err_expected) << Close;
+  P.Diag(LOpen, diag::note_matching) << Kind;
 
   // If we're not already at some kind of closing bracket, skip to our closing
   // token.
diff --git a/lib/Parse/RAIIObjectsForParser.h b/lib/Parse/RAIIObjectsForParser.h
index f68a2e0..711dc2a 100644
--- a/lib/Parse/RAIIObjectsForParser.h
+++ b/lib/Parse/RAIIObjectsForParser.h
@@ -415,8 +415,8 @@
       
       return diagnoseOverflow();
     }
-    
-    bool expectAndConsume(unsigned DiagID,
+
+    bool expectAndConsume(unsigned DiagID = diag::err_expected,
                           const char *Msg = "",
                           tok::TokenKind SkipToTok = tok::unknown);
     bool consumeClose() {
diff --git a/lib/Rewrite/Core/CMakeLists.txt b/lib/Rewrite/Core/CMakeLists.txt
index 0797818..896382c 100644
--- a/lib/Rewrite/Core/CMakeLists.txt
+++ b/lib/Rewrite/Core/CMakeLists.txt
@@ -1,24 +1,16 @@
+set(LLVM_LINK_COMPONENTS
+  Support
+  )
+
 add_clang_library(clangRewriteCore
   DeltaTree.cpp
   HTMLRewrite.cpp
   RewriteRope.cpp
   Rewriter.cpp
   TokenRewriter.cpp
-  )
 
-add_dependencies(clangRewriteCore
-  ClangAttrClasses
-  ClangAttrList
-  ClangAttrParsedAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticFrontend
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangRewriteCore
-  clangBasic
+  LINK_LIBS
   clangAST
-  clangParse
+  clangBasic
+  clangLex
   )
diff --git a/lib/Rewrite/Core/HTMLRewrite.cpp b/lib/Rewrite/Core/HTMLRewrite.cpp
index 4da00a8..275fbd0 100644
--- a/lib/Rewrite/Core/HTMLRewrite.cpp
+++ b/lib/Rewrite/Core/HTMLRewrite.cpp
@@ -7,7 +7,7 @@
 //
 //===----------------------------------------------------------------------===//
 //
-//  This file defines the HTMLRewriter clas, which is used to translate the
+//  This file defines the HTMLRewriter class, which is used to translate the
 //  text of a source file into prettified HTML.
 //
 //===----------------------------------------------------------------------===//
@@ -17,11 +17,11 @@
 #include "clang/Lex/Preprocessor.h"
 #include "clang/Lex/TokenConcatenation.h"
 #include "clang/Rewrite/Core/Rewriter.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/raw_ostream.h"
+#include <memory>
 using namespace clang;
 
 
diff --git a/lib/Rewrite/Core/Rewriter.cpp b/lib/Rewrite/Core/Rewriter.cpp
index afb1080..08352a2 100644
--- a/lib/Rewrite/Core/Rewriter.cpp
+++ b/lib/Rewrite/Core/Rewriter.cpp
@@ -21,13 +21,17 @@
 #include "clang/Basic/SourceManager.h"
 #include "clang/Lex/Lexer.h"
 #include "llvm/ADT/SmallString.h"
+#include "llvm/Config/config.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/raw_ostream.h"
 using namespace clang;
 
 raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
-  // FIXME: eliminate the copy by writing out each chunk at a time
-  os << std::string(begin(), end());
+  // Walk RewriteRope chunks efficiently using MoveToNextPiece() instead of the
+  // character iterator.
+  for (RopePieceBTreeIterator I = begin(), E = end(); I != E;
+       I.MoveToNextPiece())
+    os << I.piece();
   return os;
 }
 
@@ -446,7 +450,7 @@
     if (!ok()) return;
 
     FileStream->flush();
-#ifdef _WIN32
+#ifdef LLVM_ON_WIN32
     // Win32 does not allow rename/removing opened files.
     FileStream.reset();
 #endif
@@ -455,21 +459,20 @@
       AllWritten = false;
       Diagnostics.Report(clang::diag::err_unable_to_rename_temp)
         << TempFilename << Filename << ec.message();
-      bool existed;
       // If the remove fails, there's not a lot we can do - this is already an
       // error.
-      llvm::sys::fs::remove(TempFilename.str(), existed);
+      llvm::sys::fs::remove(TempFilename.str());
     }
   }
 
-  bool ok() { return FileStream.isValid(); }
+  bool ok() { return (bool)FileStream; }
   raw_ostream &getStream() { return *FileStream; }
 
 private:
   DiagnosticsEngine &Diagnostics;
   StringRef Filename;
   SmallString<128> TempFilename;
-  OwningPtr<llvm::raw_fd_ostream> FileStream;
+  std::unique_ptr<llvm::raw_fd_ostream> FileStream;
   bool &AllWritten;
 };
 } // end anonymous namespace
diff --git a/lib/Rewrite/Frontend/CMakeLists.txt b/lib/Rewrite/Frontend/CMakeLists.txt
index 903a3ef..85af97d 100644
--- a/lib/Rewrite/Frontend/CMakeLists.txt
+++ b/lib/Rewrite/Frontend/CMakeLists.txt
@@ -1,3 +1,7 @@
+set(LLVM_LINK_COMPONENTS
+  Support
+  )
+
 add_clang_library(clangRewriteFrontend
   FixItRewriter.cpp
   FrontendActions.cpp
@@ -7,23 +11,12 @@
   RewriteModernObjC.cpp
   RewriteObjC.cpp
   RewriteTest.cpp
-  )
 
-add_dependencies(clangRewriteFrontend
-  ClangAttrClasses
-  ClangAttrList
-  ClangAttrParsedAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticFrontend
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangRewriteFrontend
-  clangBasic
+  LINK_LIBS
   clangAST
-  clangParse
+  clangBasic
+  clangEdit
   clangFrontend
+  clangLex
   clangRewriteCore
   )
diff --git a/lib/Rewrite/Frontend/FixItRewriter.cpp b/lib/Rewrite/Frontend/FixItRewriter.cpp
index 8930c35..8b7af71 100644
--- a/lib/Rewrite/Frontend/FixItRewriter.cpp
+++ b/lib/Rewrite/Frontend/FixItRewriter.cpp
@@ -20,10 +20,10 @@
 #include "clang/Edit/Commit.h"
 #include "clang/Edit/EditsReceiver.h"
 #include "clang/Frontend/FrontendDiagnostic.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cstdio>
+#include <memory>
 
 using namespace clang;
 
@@ -62,10 +62,10 @@
 public:
   RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
 
-  virtual void insert(SourceLocation loc, StringRef text) {
+  void insert(SourceLocation loc, StringRef text) override {
     Rewrite.InsertText(loc, text);
   }
-  virtual void replace(CharSourceRange range, StringRef text) {
+  void replace(CharSourceRange range, StringRef text) override {
     Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
   }
 };
@@ -87,12 +87,12 @@
     int fd;
     std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd);
     std::string Err;
-    OwningPtr<llvm::raw_fd_ostream> OS;
+    std::unique_ptr<llvm::raw_fd_ostream> OS;
     if (fd != -1) {
       OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
     } else {
       OS.reset(new llvm::raw_fd_ostream(Filename.c_str(), Err,
-                                        llvm::sys::fs::F_Binary));
+                                        llvm::sys::fs::F_None));
     }
     if (!Err.empty()) {
       Diags.Report(clang::diag::err_fe_unable_to_open_output)
diff --git a/lib/Rewrite/Frontend/FrontendActions.cpp b/lib/Rewrite/Frontend/FrontendActions.cpp
index e9ec388..bf27efe 100644
--- a/lib/Rewrite/Frontend/FrontendActions.cpp
+++ b/lib/Rewrite/Frontend/FrontendActions.cpp
@@ -19,10 +19,10 @@
 #include "clang/Rewrite/Frontend/ASTConsumers.h"
 #include "clang/Rewrite/Frontend/FixItRewriter.h"
 #include "clang/Rewrite/Frontend/Rewriters.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Support/raw_ostream.h"
+#include <memory>
 
 using namespace clang;
 
@@ -48,7 +48,7 @@
 namespace {
 class FixItRewriteInPlace : public FixItOptions {
 public:
-  std::string RewriteFilename(const std::string &Filename, int &fd) {
+  std::string RewriteFilename(const std::string &Filename, int &fd) override {
     fd = -1;
     return Filename;
   }
@@ -63,7 +63,7 @@
       this->FixWhatYouCan = FixWhatYouCan;
   }
 
-  std::string RewriteFilename(const std::string &Filename, int &fd) {
+  std::string RewriteFilename(const std::string &Filename, int &fd) override {
     fd = -1;
     SmallString<128> Path(Filename);
     llvm::sys::path::replace_extension(Path,
@@ -74,7 +74,7 @@
 
 class FixItRewriteToTemp : public FixItOptions {
 public:
-  std::string RewriteFilename(const std::string &Filename, int &fd) {
+  std::string RewriteFilename(const std::string &Filename, int &fd) override {
     SmallString<128> Path;
     llvm::sys::fs::createTemporaryFile(llvm::sys::path::filename(Filename),
                                        llvm::sys::path::extension(Filename), fd,
@@ -110,9 +110,9 @@
   bool err = false;
   {
     const FrontendOptions &FEOpts = CI.getFrontendOpts();
-    OwningPtr<FrontendAction> FixAction(new SyntaxOnlyAction());
+    std::unique_ptr<FrontendAction> FixAction(new SyntaxOnlyAction());
     if (FixAction->BeginSourceFile(CI, FEOpts.Inputs[0])) {
-      OwningPtr<FixItOptions> FixItOpts;
+      std::unique_ptr<FixItOptions> FixItOpts;
       if (FEOpts.FixToTemporaries)
         FixItOpts.reset(new FixItRewriteToTemp());
       else
diff --git a/lib/Rewrite/Frontend/HTMLPrint.cpp b/lib/Rewrite/Frontend/HTMLPrint.cpp
index 79e4447..64da05f 100644
--- a/lib/Rewrite/Frontend/HTMLPrint.cpp
+++ b/lib/Rewrite/Frontend/HTMLPrint.cpp
@@ -42,8 +42,8 @@
       : Out(OS), PP(pp), SyntaxHighlight(_SyntaxHighlight),
         HighlightMacros(_HighlightMacros) {}
 
-    void Initialize(ASTContext &context);
-    void HandleTranslationUnit(ASTContext &Ctx);
+    void Initialize(ASTContext &context) override;
+    void HandleTranslationUnit(ASTContext &Ctx) override;
   };
 }
 
diff --git a/lib/Rewrite/Frontend/InclusionRewriter.cpp b/lib/Rewrite/Frontend/InclusionRewriter.cpp
index 71ceb5f..058960d 100644
--- a/lib/Rewrite/Frontend/InclusionRewriter.cpp
+++ b/lib/Rewrite/Frontend/InclusionRewriter.cpp
@@ -55,21 +55,16 @@
     PredefinesBuffer = Buf;
   }
 private:
-  virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
-                           SrcMgr::CharacteristicKind FileType,
-                           FileID PrevFID);
-  virtual void FileSkipped(const FileEntry &ParentFile,
-                           const Token &FilenameTok,
-                           SrcMgr::CharacteristicKind FileType);
-  virtual void InclusionDirective(SourceLocation HashLoc,
-                                  const Token &IncludeTok,
-                                  StringRef FileName,
-                                  bool IsAngled,
-                                  CharSourceRange FilenameRange,
-                                  const FileEntry *File,
-                                  StringRef SearchPath,
-                                  StringRef RelativePath,
-                                  const Module *Imported);
+  void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+                   SrcMgr::CharacteristicKind FileType,
+                   FileID PrevFID) override;
+  void FileSkipped(const FileEntry &ParentFile, const Token &FilenameTok,
+                   SrcMgr::CharacteristicKind FileType) override;
+  void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+                          StringRef FileName, bool IsAngled,
+                          CharSourceRange FilenameRange, const FileEntry *File,
+                          StringRef SearchPath, StringRef RelativePath,
+                          const Module *Imported) override;
   void WriteLineInfo(const char *Filename, int Line,
                      SrcMgr::CharacteristicKind FileType,
                      StringRef EOL, StringRef Extra = StringRef());
@@ -77,7 +72,7 @@
   void OutputContentUpTo(const MemoryBuffer &FromFile,
                          unsigned &WriteFrom, unsigned WriteTo,
                          StringRef EOL, int &lines,
-                         bool EnsureNewline = false);
+                         bool EnsureNewline);
   void CommentOutDirective(Lexer &DirectivesLex, const Token &StartToken,
                            const MemoryBuffer &FromFile, StringRef EOL,
                            unsigned &NextToWrite, int &Lines);
@@ -250,7 +245,7 @@
                                             StringRef EOL,
                                             unsigned &NextToWrite, int &Line) {
   OutputContentUpTo(FromFile, NextToWrite,
-    SM.getFileOffset(StartToken.getLocation()), EOL, Line);
+    SM.getFileOffset(StartToken.getLocation()), EOL, Line, false);
   Token DirectiveToken;
   do {
     DirectiveLex.LexFromRawLexer(DirectiveToken);
@@ -258,7 +253,7 @@
   OS << "#if 0 /* expanded by -frewrite-includes */" << EOL;
   OutputContentUpTo(FromFile, NextToWrite,
     SM.getFileOffset(DirectiveToken.getLocation()) + DirectiveToken.getLength(),
-    EOL, Line);
+    EOL, Line, true);
   OS << "#endif /* expanded by -frewrite-includes */" << EOL;
 }
 
@@ -335,7 +330,7 @@
   bool isAngled = PP.GetIncludeFilenameSpelling(Tok.getLocation(), Filename);
   const DirectoryLookup *CurDir;
   const FileEntry *File = PP.getHeaderSearchInfo().LookupFile(
-      Filename, isAngled, 0, CurDir,
+      Filename, SourceLocation(), isAngled, 0, CurDir,
       PP.getSourceManager().getFileEntryForID(FileId), 0, 0, 0, false);
 
   FileExists = File != 0;
@@ -363,15 +358,12 @@
   if (SM.getFileIDSize(FileId) == 0)
     return false;
 
-  // The next byte to be copied from the source file
-  unsigned NextToWrite = 0;
+  // The next byte to be copied from the source file, which may be non-zero if
+  // the lexer handled a BOM.
+  unsigned NextToWrite = SM.getFileOffset(RawLex.getSourceLocation());
+  assert(SM.getLineNumber(FileId, NextToWrite) == 1);
   int Line = 1; // The current input file line number.
 
-  // Ignore UTF-8 BOM, otherwise it'd end up somewhere else than the start
-  // of the resulting file.
-  if (FromFile.getBuffer().startswith("\xEF\xBB\xBF"))
-    NextToWrite = 3;
-
   Token RawToken;
   RawLex.LexFromRawLexer(RawToken);
 
@@ -465,12 +457,12 @@
                 // Replace the macro with (0) or (1), followed by the commented
                 // out macro for reference.
                 OutputContentUpTo(FromFile, NextToWrite, SM.getFileOffset(Loc),
-                                  EOL, Line);
+                                  EOL, Line, false);
                 OS << '(' << (int) HasFile << ")/*";
                 OutputContentUpTo(FromFile, NextToWrite,
                                   SM.getFileOffset(RawToken.getLocation()) +
                                   RawToken.getLength(),
-                                  EOL, Line);
+                                  EOL, Line, false);
                 OS << "*/";
               }
             } while (RawToken.isNot(tok::eod));
diff --git a/lib/Rewrite/Frontend/RewriteMacros.cpp b/lib/Rewrite/Frontend/RewriteMacros.cpp
index 4f6a93f..0d0a991 100644
--- a/lib/Rewrite/Frontend/RewriteMacros.cpp
+++ b/lib/Rewrite/Frontend/RewriteMacros.cpp
@@ -16,10 +16,10 @@
 #include "clang/Basic/SourceManager.h"
 #include "clang/Lex/Preprocessor.h"
 #include "clang/Rewrite/Core/Rewriter.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cstdio>
+#include <memory>
 
 using namespace clang;
 
diff --git a/lib/Rewrite/Frontend/RewriteModernObjC.cpp b/lib/Rewrite/Frontend/RewriteModernObjC.cpp
index ae33ac8..9066a5f 100644
--- a/lib/Rewrite/Frontend/RewriteModernObjC.cpp
+++ b/lib/Rewrite/Frontend/RewriteModernObjC.cpp
@@ -24,11 +24,11 @@
 #include "clang/Lex/Lexer.h"
 #include "clang/Rewrite/Core/Rewriter.h"
 #include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/raw_ostream.h"
+#include <memory>
 
 using namespace clang;
 using llvm::utostr;
@@ -185,7 +185,7 @@
   public:
     llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
     // Top Level Driver code.
-    virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+    bool HandleTopLevelDecl(DeclGroupRef D) override {
       for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
         if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
           if (!Class->isThisDeclarationADefinition()) {
@@ -221,8 +221,8 @@
       }
       return true;
     }
-    
-    virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
+
+    void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
       for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
         if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(*I)) {
           if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
@@ -243,8 +243,8 @@
                 bool silenceMacroWarn, bool LineInfo);
     
     ~RewriteModernObjC() {}
-    
-    virtual void HandleTranslationUnit(ASTContext &C);
+
+    void HandleTranslationUnit(ASTContext &C) override;
 
     void ReplaceStmt(Stmt *Old, Stmt *New) {
       Stmt *ReplacingStmt = ReplacedNodes[Old];
@@ -414,9 +414,9 @@
     
     void RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
                                   std::string &Result);
-    
-    virtual void Initialize(ASTContext &context);
-    
+
+    void Initialize(ASTContext &context) override;
+
     // Misc. AST transformation routines. Sometimes they end up calling
     // rewriting routines on the new ASTs.
     CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
@@ -607,6 +607,14 @@
       Selector LoadSel = Context->Selectors.getSelector(0, &II);
       return OD->getClassMethod(LoadSel) != 0;
     }
+
+    StringLiteral *getStringLiteral(StringRef Str) {
+      QualType StrType = Context->getConstantArrayType(
+          Context->CharTy, llvm::APInt(32, Str.size() + 1), ArrayType::Normal,
+          0);
+      return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
+                                   /*Pascal=*/false, StrType, SourceLocation());
+    }
   };
   
 }
@@ -615,9 +623,8 @@
                                                    NamedDecl *D) {
   if (const FunctionProtoType *fproto
       = dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
-    for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(),
-         E = fproto->arg_type_end(); I && (I != E); ++I)
-      if (isTopLevelBlockPointerType(*I)) {
+    for (const auto &I : fproto->param_types())
+      if (isTopLevelBlockPointerType(I)) {
         // All the args are checked/rewritten. Don't call twice!
         RewriteBlockPointerDecl(D);
         break;
@@ -975,7 +982,7 @@
       // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
       Getr += "typedef ";
       const FunctionType *FPRetType = 0;
-      RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr, 
+      RewriteTypeIntoString(PD->getGetterMethodDecl()->getReturnType(), Getr,
                             FPRetType);
       Getr += " _TYPE";
       if (FPRetType) {
@@ -984,14 +991,15 @@
         // Now, emit the argument types (if any).
         if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
           Getr += "(";
-          for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+          for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
             if (i) Getr += ", ";
-            std::string ParamStr = FT->getArgType(i).getAsString(
-                                                          Context->getPrintingPolicy());
+            std::string ParamStr =
+                FT->getParamType(i).getAsString(Context->getPrintingPolicy());
             Getr += ParamStr;
           }
           if (FT->isVariadic()) {
-            if (FT->getNumArgs()) Getr += ", ";
+            if (FT->getNumParams())
+              Getr += ", ";
             Getr += "...";
           }
           Getr += ")";
@@ -1150,18 +1158,13 @@
     ReplaceText(LocStart, 0, "// ");
   }
   
-  for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(),
-       E = CatDecl->prop_end(); I != E; ++I)
-    RewriteProperty(*I);
+  for (auto *I : CatDecl->properties())
+    RewriteProperty(I);
   
-  for (ObjCCategoryDecl::instmeth_iterator
-         I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
-  for (ObjCCategoryDecl::classmeth_iterator
-         I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
+  for (auto *I : CatDecl->instance_methods())
+    RewriteMethodDeclaration(I);
+  for (auto *I : CatDecl->class_methods())
+    RewriteMethodDeclaration(I);
 
   // Lastly, comment out the @end.
   ReplaceText(CatDecl->getAtEndRange().getBegin(), 
@@ -1175,18 +1178,12 @@
   // FIXME: handle protocol headers that are declared across multiple lines.
   ReplaceText(LocStart, 0, "// ");
 
-  for (ObjCProtocolDecl::instmeth_iterator
-         I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
-  for (ObjCProtocolDecl::classmeth_iterator
-         I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
-
-  for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(),
-       E = PDecl->prop_end(); I != E; ++I)
-    RewriteProperty(*I);
+  for (auto *I : PDecl->instance_methods())
+    RewriteMethodDeclaration(I);
+  for (auto *I : PDecl->class_methods())
+    RewriteMethodDeclaration(I);
+  for (auto *I : PDecl->properties())
+    RewriteProperty(I);
   
   // Lastly, comment out the @end.
   SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
@@ -1257,8 +1254,8 @@
     else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
       PointeeTy = BPT->getPointeeType();
     if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
-      ResultStr += FPRetType->getResultType().getAsString(
-        Context->getPrintingPolicy());
+      ResultStr +=
+          FPRetType->getReturnType().getAsString(Context->getPrintingPolicy());
       ResultStr += "(*";
     }
   } else
@@ -1271,7 +1268,7 @@
   //fprintf(stderr,"In RewriteObjCMethodDecl\n");
   const FunctionType *FPRetType = 0;
   ResultStr += "\nstatic ";
-  RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType);
+  RewriteTypeIntoString(OMD->getReturnType(), ResultStr, FPRetType);
   ResultStr += " ";
 
   // Unique method name
@@ -1327,9 +1324,7 @@
   ResultStr += " _cmd";
 
   // Method arguments.
-  for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
-       E = OMD->param_end(); PI != E; ++PI) {
-    ParmVarDecl *PDecl = *PI;
+  for (const auto *PDecl : OMD->params()) {
     ResultStr += ", ";
     if (PDecl->getType()->isObjCQualifiedIdType()) {
       ResultStr += "id ";
@@ -1353,14 +1348,15 @@
     // Now, emit the argument types (if any).
     if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
       ResultStr += "(";
-      for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+      for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
         if (i) ResultStr += ", ";
-        std::string ParamStr = FT->getArgType(i).getAsString(
-          Context->getPrintingPolicy());
+        std::string ParamStr =
+            FT->getParamType(i).getAsString(Context->getPrintingPolicy());
         ResultStr += ParamStr;
       }
       if (FT->isVariadic()) {
-        if (FT->getNumArgs()) ResultStr += ", ";
+        if (FT->getNumParams())
+          ResultStr += ", ";
         ResultStr += "...";
       }
       ResultStr += ")";
@@ -1385,12 +1381,8 @@
   else
     InsertText(CID->getLocStart(), "// ");
 
-  for (ObjCCategoryImplDecl::instmeth_iterator
-       I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(),
-       E = IMD ? IMD->instmeth_end() : CID->instmeth_end();
-       I != E; ++I) {
+  for (auto *OMD : IMD ? IMD->instance_methods() : CID->instance_methods()) {
     std::string ResultStr;
-    ObjCMethodDecl *OMD = *I;
     RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
     SourceLocation LocStart = OMD->getLocStart();
     SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
@@ -1400,12 +1392,8 @@
     ReplaceText(LocStart, endBuf-startBuf, ResultStr);
   }
 
-  for (ObjCCategoryImplDecl::classmeth_iterator
-       I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(),
-       E = IMD ? IMD->classmeth_end() : CID->classmeth_end();
-       I != E; ++I) {
+  for (auto *OMD : IMD ? IMD->class_methods() : CID->class_methods()) {
     std::string ResultStr;
-    ObjCMethodDecl *OMD = *I;
     RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
     SourceLocation LocStart = OMD->getLocStart();
     SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
@@ -1414,12 +1402,8 @@
     const char *endBuf = SM->getCharacterData(LocEnd);
     ReplaceText(LocStart, endBuf-startBuf, ResultStr);
   }
-  for (ObjCCategoryImplDecl::propimpl_iterator
-       I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(),
-       E = IMD ? IMD->propimpl_end() : CID->propimpl_end();
-       I != E; ++I) {
-    RewritePropertyImplDecl(*I, IMD, CID);
-  }
+  for (auto *I : IMD ? IMD->property_impls() : CID->property_impls())
+    RewritePropertyImplDecl(I, IMD, CID);
 
   InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
 }
@@ -1444,17 +1428,12 @@
     // Mark this typedef as having been written into its c++ equivalent.
     ObjCWrittenInterfaces.insert(ClassDecl->getCanonicalDecl());
   
-    for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(),
-         E = ClassDecl->prop_end(); I != E; ++I)
-      RewriteProperty(*I);
-    for (ObjCInterfaceDecl::instmeth_iterator
-         I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end();
-         I != E; ++I)
-      RewriteMethodDeclaration(*I);
-    for (ObjCInterfaceDecl::classmeth_iterator
-         I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end();
-         I != E; ++I)
-      RewriteMethodDeclaration(*I);
+    for (auto *I : ClassDecl->properties())
+      RewriteProperty(I);
+    for (auto *I : ClassDecl->instance_methods())
+      RewriteMethodDeclaration(I);
+    for (auto *I : ClassDecl->class_methods())
+      RewriteMethodDeclaration(I);
 
     // Lastly, comment out the @end.
     ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"), 
@@ -2119,12 +2098,9 @@
 
 Stmt *RewriteModernObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
   // Create a new string expression.
-  QualType StrType = Context->getPointerType(Context->CharTy);
   std::string StrEncoding;
   Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
-  Expr *Replacement = StringLiteral::Create(*Context, StrEncoding,
-                                            StringLiteral::Ascii, false,
-                                            StrType, SourceLocation());
+  Expr *Replacement = getStringLiteral(StrEncoding);
   ReplaceStmt(Exp, Replacement);
 
   // Replace this subexpr in the parent.
@@ -2138,11 +2114,7 @@
   assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
   // Create a call to sel_registerName("selName").
   SmallVector<Expr*, 8> SelExprs;
-  QualType argType = Context->getPointerType(Context->CharTy);
-  SelExprs.push_back(StringLiteral::Create(*Context,
-                                           Exp->getSelector().getAsString(),
-                                           StringLiteral::Ascii, false,
-                                           argType, SourceLocation()));
+  SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
   CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
                                                  &SelExprs[0], SelExprs.size());
   ReplaceStmt(Exp, SelExp);
@@ -2269,7 +2241,7 @@
     proto = dyn_cast<FunctionProtoType>(funcType);
     if (!proto)
       return;
-    Type = proto->getResultType();
+    Type = proto->getReturnType();
   }
   else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
     Loc = FD->getLocation();
@@ -2304,8 +2276,8 @@
   // Now check arguments.
   const char *startBuf = SM->getCharacterData(Loc);
   const char *startFuncBuf = startBuf;
-  for (unsigned i = 0; i < proto->getNumArgs(); i++) {
-    if (needToScanForQualifiers(proto->getArgType(i))) {
+  for (unsigned i = 0; i < proto->getNumParams(); i++) {
+    if (needToScanForQualifiers(proto->getParamType(i))) {
       // Since types are unique, we need to scan the buffer.
 
       const char *endBuf = startBuf;
@@ -2443,14 +2415,14 @@
   const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
   if (!proto)
     return;
-  QualType Type = proto->getResultType();
+  QualType Type = proto->getReturnType();
   std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
   FdStr += " ";
   FdStr += FD->getName();
   FdStr +=  "(";
-  unsigned numArgs = proto->getNumArgs();
+  unsigned numArgs = proto->getNumParams();
   for (unsigned i = 0; i < numArgs; i++) {
-  QualType ArgType = proto->getArgType(i);
+    QualType ArgType = proto->getParamType(i);
   RewriteBlockPointerType(FdStr, ArgType);
   if (i+1 < numArgs)
     FdStr += ", ";
@@ -2623,7 +2595,7 @@
   unsigned i;
   for (i=0; i < tmpName.length(); i++) {
     char c = tmpName.at(i);
-    // replace any non alphanumeric characters with '_'.
+    // replace any non-alphanumeric characters with '_'.
     if (!isAlphanumeric(c))
       tmpName[i] = '_';
   }
@@ -2691,17 +2663,13 @@
   // Synthesize a call to objc_msgSend().
   SmallVector<Expr*, 4> MsgExprs;
   SmallVector<Expr*, 4> ClsExprs;
-  QualType argType = Context->getPointerType(Context->CharTy);
   
   // Create a call to objc_getClass("<BoxingClass>"). It will be the 1st argument.
   ObjCMethodDecl *BoxingMethod = Exp->getBoxingMethod();
   ObjCInterfaceDecl *BoxingClass = BoxingMethod->getClassInterface();
   
   IdentifierInfo *clsName = BoxingClass->getIdentifier();
-  ClsExprs.push_back(StringLiteral::Create(*Context,
-                                           clsName->getName(),
-                                           StringLiteral::Ascii, false,
-                                           argType, SourceLocation()));
+  ClsExprs.push_back(getStringLiteral(clsName->getName()));
   CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
                                                &ClsExprs[0],
                                                ClsExprs.size(), 
@@ -2711,10 +2679,8 @@
   // Create a call to sel_registerName("<BoxingMethod>:"), etc.
   // it will be the 2nd argument.
   SmallVector<Expr*, 4> SelExprs;
-  SelExprs.push_back(StringLiteral::Create(*Context,
-                                           BoxingMethod->getSelector().getAsString(),
-                                           StringLiteral::Ascii, false,
-                                           argType, SourceLocation()));
+  SelExprs.push_back(
+      getStringLiteral(BoxingMethod->getSelector().getAsString()));
   CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
                                                   &SelExprs[0], SelExprs.size(),
                                                   StartLoc, EndLoc);
@@ -2735,9 +2701,8 @@
   SmallVector<QualType, 4> ArgTypes;
   ArgTypes.push_back(Context->getObjCIdType());
   ArgTypes.push_back(Context->getObjCSelType());
-  for (ObjCMethodDecl::param_iterator PI = BoxingMethod->param_begin(),
-       E = BoxingMethod->param_end(); PI != E; ++PI)
-    ArgTypes.push_back((*PI)->getType());
+  for (const auto PI : BoxingMethod->parameters())
+    ArgTypes.push_back(PI->getType());
   
   QualType returnType = Exp->getType();
   // Get the type, we will need to reference it in a couple spots.
@@ -2762,9 +2727,8 @@
   ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
   
   const FunctionType *FT = msgSendType->getAs<FunctionType>();
-  CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
-                                        FT->getResultType(), VK_RValue,
-                                        EndLoc);
+  CallExpr *CE = new (Context)
+      CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
   ReplaceStmt(Exp, CE);
   return CE;
 }
@@ -2828,7 +2792,6 @@
   // Synthesize a call to objc_msgSend().
   SmallVector<Expr*, 32> MsgExprs;
   SmallVector<Expr*, 4> ClsExprs;
-  QualType argType = Context->getPointerType(Context->CharTy);
   QualType expType = Exp->getType();
   
   // Create a call to objc_getClass("NSArray"). It will be th 1st argument.
@@ -2836,10 +2799,7 @@
     expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
   
   IdentifierInfo *clsName = Class->getIdentifier();
-  ClsExprs.push_back(StringLiteral::Create(*Context,
-                                           clsName->getName(),
-                                           StringLiteral::Ascii, false,
-                                           argType, SourceLocation()));
+  ClsExprs.push_back(getStringLiteral(clsName->getName()));
   CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
                                                &ClsExprs[0],
                                                ClsExprs.size(), 
@@ -2850,10 +2810,8 @@
   // it will be the 2nd argument.
   SmallVector<Expr*, 4> SelExprs;
   ObjCMethodDecl *ArrayMethod = Exp->getArrayWithObjectsMethod();
-  SelExprs.push_back(StringLiteral::Create(*Context,
-                                           ArrayMethod->getSelector().getAsString(),
-                                           StringLiteral::Ascii, false,
-                                           argType, SourceLocation()));
+  SelExprs.push_back(
+      getStringLiteral(ArrayMethod->getSelector().getAsString()));
   CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
                                                   &SelExprs[0], SelExprs.size(),
                                                   StartLoc, EndLoc);
@@ -2872,9 +2830,8 @@
   SmallVector<QualType, 4> ArgTypes;
   ArgTypes.push_back(Context->getObjCIdType());
   ArgTypes.push_back(Context->getObjCSelType());
-  for (ObjCMethodDecl::param_iterator PI = ArrayMethod->param_begin(),
-       E = ArrayMethod->param_end(); PI != E; ++PI)
-    ArgTypes.push_back((*PI)->getType());
+  for (const auto *PI : ArrayMethod->params())
+    ArgTypes.push_back(PI->getType());
   
   QualType returnType = Exp->getType();
   // Get the type, we will need to reference it in a couple spots.
@@ -2899,9 +2856,8 @@
   ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
   
   const FunctionType *FT = msgSendType->getAs<FunctionType>();
-  CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
-                                        FT->getResultType(), VK_RValue,
-                                        EndLoc);
+  CallExpr *CE = new (Context)
+      CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
   ReplaceStmt(Exp, CE);
   return CE;
 }
@@ -2991,7 +2947,6 @@
   // Synthesize a call to objc_msgSend().
   SmallVector<Expr*, 32> MsgExprs;
   SmallVector<Expr*, 4> ClsExprs;
-  QualType argType = Context->getPointerType(Context->CharTy);
   QualType expType = Exp->getType();
   
   // Create a call to objc_getClass("NSArray"). It will be th 1st argument.
@@ -2999,10 +2954,7 @@
   expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
   
   IdentifierInfo *clsName = Class->getIdentifier();
-  ClsExprs.push_back(StringLiteral::Create(*Context,
-                                           clsName->getName(),
-                                           StringLiteral::Ascii, false,
-                                           argType, SourceLocation()));
+  ClsExprs.push_back(getStringLiteral(clsName->getName()));
   CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
                                                &ClsExprs[0],
                                                ClsExprs.size(), 
@@ -3013,10 +2965,7 @@
   // it will be the 2nd argument.
   SmallVector<Expr*, 4> SelExprs;
   ObjCMethodDecl *DictMethod = Exp->getDictWithObjectsMethod();
-  SelExprs.push_back(StringLiteral::Create(*Context,
-                                           DictMethod->getSelector().getAsString(),
-                                           StringLiteral::Ascii, false,
-                                           argType, SourceLocation()));
+  SelExprs.push_back(getStringLiteral(DictMethod->getSelector().getAsString()));
   CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
                                                   &SelExprs[0], SelExprs.size(),
                                                   StartLoc, EndLoc);
@@ -3038,9 +2987,8 @@
   SmallVector<QualType, 8> ArgTypes;
   ArgTypes.push_back(Context->getObjCIdType());
   ArgTypes.push_back(Context->getObjCSelType());
-  for (ObjCMethodDecl::param_iterator PI = DictMethod->param_begin(),
-       E = DictMethod->param_end(); PI != E; ++PI) {
-    QualType T = (*PI)->getType();
+  for (const auto *PI : DictMethod->params()) {
+    QualType T = PI->getType();
     if (const PointerType* PT = T->getAs<PointerType>()) {
       QualType PointeeTy = PT->getPointeeType();
       convertToUnqualifiedObjCType(PointeeTy);
@@ -3072,9 +3020,8 @@
   ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
   
   const FunctionType *FT = msgSendType->getAs<FunctionType>();
-  CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
-                                        FT->getResultType(), VK_RValue,
-                                        EndLoc);
+  CallExpr *CE = new (Context)
+      CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
   ReplaceStmt(Exp, CE);
   return CE;
 }
@@ -3331,7 +3278,7 @@
   // May need to use objc_msgSend_stret() as well.
   FunctionDecl *MsgSendStretFlavor = 0;
   if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
-    QualType resultType = mDecl->getResultType();
+    QualType resultType = mDecl->getReturnType();
     if (resultType->isRecordType())
       MsgSendStretFlavor = MsgSendStretFunctionDecl;
     else if (resultType->isRealFloatingType())
@@ -3364,11 +3311,7 @@
 
     // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
     SmallVector<Expr*, 8> ClsExprs;
-    QualType argType = Context->getPointerType(Context->CharTy);
-    ClsExprs.push_back(StringLiteral::Create(*Context,
-                                   ClassDecl->getIdentifier()->getName(),
-                                   StringLiteral::Ascii, false,
-                                   argType, SourceLocation()));
+    ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
     // (Class)objc_getClass("CurrentClass")
     CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
                                                  &ClsExprs[0],
@@ -3435,14 +3378,10 @@
 
   case ObjCMessageExpr::Class: {
     SmallVector<Expr*, 8> ClsExprs;
-    QualType argType = Context->getPointerType(Context->CharTy);
     ObjCInterfaceDecl *Class
       = Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
     IdentifierInfo *clsName = Class->getIdentifier();
-    ClsExprs.push_back(StringLiteral::Create(*Context,
-                                             clsName->getName(),
-                                             StringLiteral::Ascii, false,
-                                             argType, SourceLocation()));
+    ClsExprs.push_back(getStringLiteral(clsName->getName()));
     CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
                                                  &ClsExprs[0],
                                                  ClsExprs.size(), 
@@ -3473,11 +3412,7 @@
     
     // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
     SmallVector<Expr*, 8> ClsExprs;
-    QualType argType = Context->getPointerType(Context->CharTy);
-    ClsExprs.push_back(StringLiteral::Create(*Context,
-                                   ClassDecl->getIdentifier()->getName(),
-                                   StringLiteral::Ascii, false, argType,
-                                   SourceLocation()));
+    ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
     // (Class)objc_getClass("CurrentClass")
     CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
                                                  &ClsExprs[0],
@@ -3555,11 +3490,7 @@
 
   // Create a call to sel_registerName("selName"), it will be the 2nd argument.
   SmallVector<Expr*, 8> SelExprs;
-  QualType argType = Context->getPointerType(Context->CharTy);
-  SelExprs.push_back(StringLiteral::Create(*Context,
-                                       Exp->getSelector().getAsString(),
-                                       StringLiteral::Ascii, false,
-                                       argType, SourceLocation()));
+  SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
   CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
                                                  &SelExprs[0], SelExprs.size(),
                                                   StartLoc,
@@ -3634,11 +3565,10 @@
   ArgTypes.push_back(Context->getObjCSelType());
   if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
     // Push any user argument types.
-    for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
-         E = OMD->param_end(); PI != E; ++PI) {
-      QualType t = (*PI)->getType()->isObjCQualifiedIdType()
+    for (const auto *PI : OMD->params()) {
+      QualType t = PI->getType()->isObjCQualifiedIdType()
                      ? Context->getObjCIdType()
-                     : (*PI)->getType();
+                     : PI->getType();
       // Make sure we convert "t (^)(...)" to "t (*)(...)".
       (void)convertBlockPointerToFunctionPointer(t);
       ArgTypes.push_back(t);
@@ -3677,8 +3607,8 @@
   ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
 
   const FunctionType *FT = msgSendType->getAs<FunctionType>();
-  CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
-                                        FT->getResultType(), VK_RValue, EndLoc);
+  CallExpr *CE = new (Context)
+      CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
   Stmt *ReplacingStmt = CE;
   if (MsgSendStretFlavor) {
     // We have the method which returns a struct/union. Must also generate
@@ -3733,12 +3663,9 @@
                                 SC_Extern);
   DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
                                                VK_LValue, SourceLocation());
-  Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
-                             Context->getPointerType(DRE->getType()),
-                             VK_RValue, OK_Ordinary, SourceLocation());
-  CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
-                                                CK_BitCast,
-                                                DerefExpr);
+  CastExpr *castExpr =
+    NoTypeInfoCStyleCastExpr(
+      Context, Context->getPointerType(DRE->getType()), CK_BitCast, DRE);
   ReplaceStmt(Exp, castExpr);
   ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
   // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
@@ -3831,11 +3758,8 @@
         return true;
       }
       Result += " {\n";
-      for (RecordDecl::field_iterator i = RD->field_begin(), 
-           e = RD->field_end(); i != e; ++i) {
-        FieldDecl *FD = *i;
+      for (auto *FD : RD->fields())
         RewriteObjCFieldDecl(FD, Result);
-      }
       Result += "\t} "; 
       return true;
     }
@@ -3852,8 +3776,7 @@
       }
       
       Result += " {\n";
-      for (EnumDecl::enumerator_iterator EC = ED->enumerator_begin(),
-           ECEnd = ED->enumerator_end(); EC != ECEnd; ++EC) {
+      for (const auto *EC : ED->enumerators()) {
         Result += "\t"; Result += EC->getName(); Result += " = ";
         llvm::APSInt Val = EC->getInitVal();
         Result += Val.toString(10);
@@ -4226,7 +4149,7 @@
                                                    StringRef funcName,
                                                    std::string Tag) {
   const FunctionType *AFT = CE->getFunctionType();
-  QualType RT = AFT->getResultType();
+  QualType RT = AFT->getReturnType();
   std::string StructRef = "struct " + Tag;
   SourceLocation BlockLoc = CE->getExprLoc();
   std::string S;
@@ -4731,13 +4654,12 @@
   // FTP will be null for closures that don't take arguments.
   // Generate a funky cast.
   SmallVector<QualType, 8> ArgTypes;
-  QualType Res = FT->getResultType();
+  QualType Res = FT->getReturnType();
   bool modified = convertObjCTypeToCStyleType(Res);
   
   if (FTP) {
-    for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
-         E = FTP->arg_type_end(); I && (I != E); ++I) {
-      QualType t = *I;
+    for (auto &I : FTP->param_types()) {
+      QualType t = I;
       // Make sure we convert "t (^)(...)" to "t (*)(...)".
       if (convertObjCTypeToCStyleType(t))
         modified = true;
@@ -4803,9 +4725,8 @@
   // Push the block argument type.
   ArgTypes.push_back(PtrBlock);
   if (FTP) {
-    for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
-         E = FTP->arg_type_end(); I && (I != E); ++I) {
-      QualType t = *I;
+    for (auto &I : FTP->param_types()) {
+      QualType t = I;
       // Make sure we convert "t (^)(...)" to "t (*)(...)".
       if (!convertBlockPointerToFunctionPointer(t))
         convertToUnqualifiedObjCType(t);
@@ -5023,9 +4944,8 @@
     FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
   }
   if (FTP) {
-    for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
-         E = FTP->arg_type_end(); I != E; ++I)
-      if (isTopLevelBlockPointerType(*I))
+    for (const auto &I : FTP->param_types())
+      if (isTopLevelBlockPointerType(I))
         return true;
   }
   return false;
@@ -5042,12 +4962,11 @@
     FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
   }
   if (FTP) {
-    for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
-         E = FTP->arg_type_end(); I != E; ++I) {
-      if ((*I)->isObjCQualifiedIdType())
+    for (const auto &I : FTP->param_types()) {
+      if (I->isObjCQualifiedIdType())
         return true;
-      if ((*I)->isObjCObjectPointerType() &&
-          (*I)->getPointeeType()->isObjCQualifiedInterfaceType())
+      if (I->isObjCObjectPointerType() &&
+          I->getPointeeType()->isObjCQualifiedInterfaceType())
         return true;
     }
         
@@ -5580,11 +5499,10 @@
                                       SourceLocation());
       bool isNestedCapturedVar = false;
       if (block)
-        for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
-             ce = block->capture_end(); ci != ce; ++ci) {
-          const VarDecl *variable = ci->getVariable();
-          if (variable == ND && ci->isNested()) {
-            assert (ci->isByRef() && 
+        for (const auto &CI : block->captures()) {
+          const VarDecl *variable = CI.getVariable();
+          if (variable == ND && CI.isNested()) {
+            assert (CI.isByRef() && 
                     "SynthBlockInitExpr - captured block variable is not byref");
             isNestedCapturedVar = true;
             break;
@@ -5887,9 +5805,7 @@
 }
 
 void RewriteModernObjC::RewriteRecordBody(RecordDecl *RD) {
-  for (RecordDecl::field_iterator i = RD->field_begin(), 
-                                  e = RD->field_end(); i != e; ++i) {
-    FieldDecl *FD = *i;
+  for (auto *FD : RD->fields()) {
     if (isTopLevelBlockPointerType(FD->getType()))
       RewriteBlockPointerDecl(FD);
     if (FD->getType()->isObjCQualifiedIdType() ||
@@ -6049,9 +5965,9 @@
   RewriteInclude();
 
   for (unsigned i = 0, e = FunctionDefinitionsSeen.size(); i < e; i++) {
-    // translation of function bodies were postponed untill all class and
+    // translation of function bodies were postponed until all class and
     // their extensions and implementations are seen. This is because, we
-    // cannot build grouping structs for bitfields untill they are all seen.
+    // cannot build grouping structs for bitfields until they are all seen.
     FunctionDecl *FDecl = FunctionDefinitionsSeen[i];
     HandleTopLevelSingleDecl(FDecl);
   }
@@ -6192,7 +6108,11 @@
   Preamble += "  int *isa;\n";
   Preamble += "  int flags;\n";
   Preamble += "  char *str;\n";
+  Preamble += "#if __LLP64__\n";
+  Preamble += "  long long length;\n";
+  Preamble += "#else\n";
   Preamble += "  long length;\n";
+  Preamble += "#endif\n";
   Preamble += "};\n";
   Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
   Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
@@ -7065,17 +6985,13 @@
     PDecl = Def;
   // Must write out all protocol definitions in current qualifier list,
   // and in their nested qualifiers before writing out current definition.
-  for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
-       E = PDecl->protocol_end(); I != E; ++I)
-    RewriteObjCProtocolMetaData(*I, Result);
+  for (auto *I : PDecl->protocols())
+    RewriteObjCProtocolMetaData(I, Result);
   
   // Construct method lists.
   std::vector<ObjCMethodDecl *> InstanceMethods, ClassMethods;
   std::vector<ObjCMethodDecl *> OptInstanceMethods, OptClassMethods;
-  for (ObjCProtocolDecl::instmeth_iterator
-       I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
-       I != E; ++I) {
-    ObjCMethodDecl *MD = *I;
+  for (auto *MD : PDecl->instance_methods()) {
     if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
       OptInstanceMethods.push_back(MD);
     } else {
@@ -7083,10 +6999,7 @@
     }
   }
   
-  for (ObjCProtocolDecl::classmeth_iterator
-       I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
-       I != E; ++I) {
-    ObjCMethodDecl *MD = *I;
+  for (auto *MD : PDecl->class_methods()) {
     if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
       OptClassMethods.push_back(MD);
     } else {
@@ -7108,11 +7021,7 @@
                                          "_OBJC_PROTOCOL_METHOD_TYPES_",
                                          PDecl->getNameAsString());
   // Protocol's super protocol list
-  std::vector<ObjCProtocolDecl *> SuperProtocols;
-  for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
-       E = PDecl->protocol_end(); I != E; ++I)
-    SuperProtocols.push_back(*I);
-  
+  SmallVector<ObjCProtocolDecl *, 8> SuperProtocols(PDecl->protocols());  
   Write_protocol_list_initializer(Context, Result, SuperProtocols,
                                   "_OBJC_PROTOCOL_REFS_",
                                   PDecl->getNameAsString());
@@ -7134,11 +7043,7 @@
                                   PDecl->getNameAsString(), false);
   
   // Protocol's property metadata.
-  std::vector<ObjCPropertyDecl *> ProtocolProperties;
-  for (ObjCContainerDecl::prop_iterator I = PDecl->prop_begin(),
-       E = PDecl->prop_end(); I != E; ++I)
-    ProtocolProperties.push_back(*I);
-  
+  SmallVector<ObjCPropertyDecl *, 8> ProtocolProperties(PDecl->properties());
   Write_prop_list_t_initializer(*this, Context, Result, ProtocolProperties,
                                  /* Container */0,
                                  "_OBJC_PROTOCOL_PROPERTIES_",
@@ -7300,14 +7205,11 @@
                                  CDecl);
   
   // Build _objc_method_list for class's instance methods if needed
-  SmallVector<ObjCMethodDecl *, 32>
-    InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+  SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
   
   // If any of our property implementations have associated getters or
   // setters, produce metadata for them as well.
-  for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
-       PropEnd = IDecl->propimpl_end();
-       Prop != PropEnd; ++Prop) {
+  for (const auto *Prop : IDecl->property_impls()) {
     if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
       continue;
     if (!Prop->getPropertyIvarDecl())
@@ -7329,8 +7231,7 @@
                                   "_OBJC_$_INSTANCE_METHODS_",
                                   IDecl->getNameAsString(), true);
   
-  SmallVector<ObjCMethodDecl *, 32>
-    ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end());
+  SmallVector<ObjCMethodDecl *, 32> ClassMethods(IDecl->class_methods());
   
   Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
                                   "_OBJC_$_CLASS_METHODS_",
@@ -7355,11 +7256,7 @@
                                   IDecl->getNameAsString());
   
   // Protocol's property metadata.
-  std::vector<ObjCPropertyDecl *> ClassProperties;
-  for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(),
-       E = CDecl->prop_end(); I != E; ++I)
-    ClassProperties.push_back(*I);
-  
+  SmallVector<ObjCPropertyDecl *, 8> ClassProperties(CDecl->properties());
   Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
                                  /* Container */IDecl,
                                  "_OBJC_$_PROP_LIST_",
@@ -7561,14 +7458,11 @@
   FullCategoryName += CDecl->getNameAsString();
   
   // Build _objc_method_list for class's instance methods if needed
-  SmallVector<ObjCMethodDecl *, 32>
-  InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+  SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
   
   // If any of our property implementations have associated getters or
   // setters, produce metadata for them as well.
-  for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
-       PropEnd = IDecl->propimpl_end();
-       Prop != PropEnd; ++Prop) {
+  for (const auto *Prop : IDecl->property_impls()) {
     if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
       continue;
     if (!Prop->getPropertyIvarDecl())
@@ -7588,8 +7482,7 @@
                                   "_OBJC_$_CATEGORY_INSTANCE_METHODS_",
                                   FullCategoryName, true);
   
-  SmallVector<ObjCMethodDecl *, 32>
-    ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end());
+  SmallVector<ObjCMethodDecl *, 32> ClassMethods(IDecl->class_methods());
   
   Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
                                   "_OBJC_$_CATEGORY_CLASS_METHODS_",
@@ -7597,16 +7490,11 @@
   
   // Protocols referenced in class declaration?
   // Protocol's super protocol list
-  std::vector<ObjCProtocolDecl *> RefedProtocols;
-  for (ObjCInterfaceDecl::protocol_iterator I = CDecl->protocol_begin(),
-                                            E = CDecl->protocol_end();
-
-         I != E; ++I) {
-    RefedProtocols.push_back(*I);
+  SmallVector<ObjCProtocolDecl *, 8> RefedProtocols(CDecl->protocols());
+  for (auto *I : CDecl->protocols())
     // Must write out all protocol definitions in current qualifier list,
     // and in their nested qualifiers before writing out current definition.
-    RewriteObjCProtocolMetaData(*I, Result);
-  }
+    RewriteObjCProtocolMetaData(I, Result);
   
   Write_protocol_list_initializer(Context, Result, 
                                   RefedProtocols,
@@ -7614,11 +7502,7 @@
                                   FullCategoryName);
   
   // Protocol's property metadata.
-  std::vector<ObjCPropertyDecl *> ClassProperties;
-  for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(),
-       E = CDecl->prop_end(); I != E; ++I)
-    ClassProperties.push_back(*I);
-  
+  SmallVector<ObjCPropertyDecl *, 8> ClassProperties(CDecl->properties());
   Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
                                 /* Container */IDecl,
                                 "_OBJC_$_PROP_LIST_",
diff --git a/lib/Rewrite/Frontend/RewriteObjC.cpp b/lib/Rewrite/Frontend/RewriteObjC.cpp
index 3dda2c5..80d6cc6 100644
--- a/lib/Rewrite/Frontend/RewriteObjC.cpp
+++ b/lib/Rewrite/Frontend/RewriteObjC.cpp
@@ -23,11 +23,11 @@
 #include "clang/Lex/Lexer.h"
 #include "clang/Rewrite/Core/Rewriter.h"
 #include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/raw_ostream.h"
+#include <memory>
 
 using namespace clang;
 using llvm::utostr;
@@ -165,7 +165,7 @@
   public:
 
     // Top Level Driver code.
-    virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+    bool HandleTopLevelDecl(DeclGroupRef D) override {
       for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
         if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
           if (!Class->isThisDeclarationADefinition()) {
@@ -193,7 +193,7 @@
 
     ~RewriteObjC() {}
 
-    virtual void HandleTranslationUnit(ASTContext &C);
+    void HandleTranslationUnit(ASTContext &C) override;
 
     void ReplaceStmt(Stmt *Old, Stmt *New) {
       Stmt *ReplacingStmt = ReplacedNodes[Old];
@@ -328,9 +328,9 @@
     
     void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
                                       std::string &Result);
-    
-    virtual void Initialize(ASTContext &context) = 0;
-    
+
+    virtual void Initialize(ASTContext &context) override = 0;
+
     // Metadata Rewriting.
     virtual void RewriteMetaDataIntoBuffer(std::string &Result) = 0;
     virtual void RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Prots,
@@ -501,6 +501,14 @@
       return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo,
                                     SourceLocation(), SourceLocation());
     }
+
+    StringLiteral *getStringLiteral(StringRef Str) {
+      QualType StrType = Context->getConstantArrayType(
+          Context->CharTy, llvm::APInt(32, Str.size() + 1), ArrayType::Normal,
+          0);
+      return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
+                                   /*Pascal=*/false, StrType, SourceLocation());
+    }
   };
   
   class RewriteObjCFragileABI : public RewriteObjC {
@@ -513,8 +521,8 @@
                                                      silenceMacroWarn) {}
     
     ~RewriteObjCFragileABI() {}
-    virtual void Initialize(ASTContext &context);
-    
+    virtual void Initialize(ASTContext &context) override;
+
     // Rewriting metadata
     template<typename MethodIterator>
     void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
@@ -523,23 +531,22 @@
                                     StringRef prefix,
                                     StringRef ClassName,
                                     std::string &Result);
-    virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
-                                             StringRef prefix,
-                                             StringRef ClassName,
-                                             std::string &Result);
-    virtual void RewriteObjCProtocolListMetaData(
-                  const ObjCList<ObjCProtocolDecl> &Prots,
-                  StringRef prefix, StringRef ClassName, std::string &Result);
-    virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
-                                          std::string &Result);
-    virtual void RewriteMetaDataIntoBuffer(std::string &Result);
-    virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
-                                             std::string &Result);
-    
+    void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+                                     StringRef prefix, StringRef ClassName,
+                                     std::string &Result) override;
+    void RewriteObjCProtocolListMetaData(
+          const ObjCList<ObjCProtocolDecl> &Prots,
+          StringRef prefix, StringRef ClassName, std::string &Result) override;
+    void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+                                  std::string &Result) override;
+    void RewriteMetaDataIntoBuffer(std::string &Result) override;
+    void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+                                     std::string &Result) override;
+
     // Rewriting ivar
-    virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
-                                              std::string &Result);
-    virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
+    void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+                                      std::string &Result) override;
+    Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) override;
   };
 }
 
@@ -547,9 +554,8 @@
                                                    NamedDecl *D) {
   if (const FunctionProtoType *fproto
       = dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
-    for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(),
-         E = fproto->arg_type_end(); I && (I != E); ++I)
-      if (isTopLevelBlockPointerType(*I)) {
+    for (const auto &I : fproto->param_types())
+      if (isTopLevelBlockPointerType(I)) {
         // All the args are checked/rewritten. Don't call twice!
         RewriteBlockPointerDecl(D);
         break;
@@ -809,7 +815,7 @@
       // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
       Getr += "typedef ";
       const FunctionType *FPRetType = 0;
-      RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr, 
+      RewriteTypeIntoString(PD->getGetterMethodDecl()->getReturnType(), Getr,
                             FPRetType);
       Getr += " _TYPE";
       if (FPRetType) {
@@ -818,14 +824,15 @@
         // Now, emit the argument types (if any).
         if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
           Getr += "(";
-          for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+          for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
             if (i) Getr += ", ";
-            std::string ParamStr = FT->getArgType(i).getAsString(
-                                                          Context->getPrintingPolicy());
+            std::string ParamStr =
+                FT->getParamType(i).getAsString(Context->getPrintingPolicy());
             Getr += ParamStr;
           }
           if (FT->isVariadic()) {
-            if (FT->getNumArgs()) Getr += ", ";
+            if (FT->getNumParams())
+              Getr += ", ";
             Getr += "...";
           }
           Getr += ")";
@@ -970,18 +977,12 @@
   // FIXME: handle category headers that are declared across multiple lines.
   ReplaceText(LocStart, 0, "// ");
 
-  for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(),
-       E = CatDecl->prop_end(); I != E; ++I)
-    RewriteProperty(*I);
-  
-  for (ObjCCategoryDecl::instmeth_iterator
-         I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
-  for (ObjCCategoryDecl::classmeth_iterator
-         I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
+  for (auto *I : CatDecl->properties())
+    RewriteProperty(I);  
+  for (auto *I : CatDecl->instance_methods())
+    RewriteMethodDeclaration(I);
+  for (auto *I : CatDecl->class_methods())
+    RewriteMethodDeclaration(I);
 
   // Lastly, comment out the @end.
   ReplaceText(CatDecl->getAtEndRange().getBegin(), 
@@ -995,18 +996,12 @@
   // FIXME: handle protocol headers that are declared across multiple lines.
   ReplaceText(LocStart, 0, "// ");
 
-  for (ObjCProtocolDecl::instmeth_iterator
-         I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
-  for (ObjCProtocolDecl::classmeth_iterator
-         I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
-
-  for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(),
-       E = PDecl->prop_end(); I != E; ++I)
-    RewriteProperty(*I);
+  for (auto *I : PDecl->instance_methods())
+    RewriteMethodDeclaration(I);
+  for (auto *I : PDecl->class_methods())
+    RewriteMethodDeclaration(I);
+  for (auto *I : PDecl->properties())
+    RewriteProperty(I);
   
   // Lastly, comment out the @end.
   SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
@@ -1061,8 +1056,8 @@
     else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
       PointeeTy = BPT->getPointeeType();
     if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
-      ResultStr += FPRetType->getResultType().getAsString(
-        Context->getPrintingPolicy());
+      ResultStr +=
+          FPRetType->getReturnType().getAsString(Context->getPrintingPolicy());
       ResultStr += "(*";
     }
   } else
@@ -1075,7 +1070,7 @@
   //fprintf(stderr,"In RewriteObjCMethodDecl\n");
   const FunctionType *FPRetType = 0;
   ResultStr += "\nstatic ";
-  RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType);
+  RewriteTypeIntoString(OMD->getReturnType(), ResultStr, FPRetType);
   ResultStr += " ";
 
   // Unique method name
@@ -1131,9 +1126,7 @@
   ResultStr += " _cmd";
 
   // Method arguments.
-  for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
-       E = OMD->param_end(); PI != E; ++PI) {
-    ParmVarDecl *PDecl = *PI;
+  for (const auto *PDecl : OMD->params()) {
     ResultStr += ", ";
     if (PDecl->getType()->isObjCQualifiedIdType()) {
       ResultStr += "id ";
@@ -1157,14 +1150,15 @@
     // Now, emit the argument types (if any).
     if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
       ResultStr += "(";
-      for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+      for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
         if (i) ResultStr += ", ";
-        std::string ParamStr = FT->getArgType(i).getAsString(
-          Context->getPrintingPolicy());
+        std::string ParamStr =
+            FT->getParamType(i).getAsString(Context->getPrintingPolicy());
         ResultStr += ParamStr;
       }
       if (FT->isVariadic()) {
-        if (FT->getNumArgs()) ResultStr += ", ";
+        if (FT->getNumParams())
+          ResultStr += ", ";
         ResultStr += "...";
       }
       ResultStr += ")";
@@ -1179,12 +1173,8 @@
 
   InsertText(IMD ? IMD->getLocStart() : CID->getLocStart(), "// ");
 
-  for (ObjCCategoryImplDecl::instmeth_iterator
-       I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(),
-       E = IMD ? IMD->instmeth_end() : CID->instmeth_end();
-       I != E; ++I) {
+  for (auto *OMD : IMD ? IMD->instance_methods() : CID->instance_methods()) {
     std::string ResultStr;
-    ObjCMethodDecl *OMD = *I;
     RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
     SourceLocation LocStart = OMD->getLocStart();
     SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
@@ -1194,12 +1184,8 @@
     ReplaceText(LocStart, endBuf-startBuf, ResultStr);
   }
 
-  for (ObjCCategoryImplDecl::classmeth_iterator
-       I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(),
-       E = IMD ? IMD->classmeth_end() : CID->classmeth_end();
-       I != E; ++I) {
+  for (auto *OMD : IMD ? IMD->class_methods() : CID->class_methods()) {
     std::string ResultStr;
-    ObjCMethodDecl *OMD = *I;
     RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
     SourceLocation LocStart = OMD->getLocStart();
     SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
@@ -1208,12 +1194,8 @@
     const char *endBuf = SM->getCharacterData(LocEnd);
     ReplaceText(LocStart, endBuf-startBuf, ResultStr);
   }
-  for (ObjCCategoryImplDecl::propimpl_iterator
-       I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(),
-       E = IMD ? IMD->propimpl_end() : CID->propimpl_end();
-       I != E; ++I) {
-    RewritePropertyImplDecl(*I, IMD, CID);
-  }
+  for (auto *I : IMD ? IMD->property_impls() : CID->property_impls())
+    RewritePropertyImplDecl(I, IMD, CID);
 
   InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
 }
@@ -1236,17 +1218,12 @@
   }
   RewriteObjCInternalStruct(ClassDecl, ResultStr);
 
-  for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(),
-         E = ClassDecl->prop_end(); I != E; ++I)
-    RewriteProperty(*I);
-  for (ObjCInterfaceDecl::instmeth_iterator
-         I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
-  for (ObjCInterfaceDecl::classmeth_iterator
-         I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end();
-       I != E; ++I)
-    RewriteMethodDeclaration(*I);
+  for (auto *I : ClassDecl->properties())
+    RewriteProperty(I);
+  for (auto *I : ClassDecl->instance_methods())
+    RewriteMethodDeclaration(I);
+  for (auto *I : ClassDecl->class_methods())
+    RewriteMethodDeclaration(I);
 
   // Lastly, comment out the @end.
   ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"), 
@@ -2009,12 +1986,9 @@
 
 Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
   // Create a new string expression.
-  QualType StrType = Context->getPointerType(Context->CharTy);
   std::string StrEncoding;
   Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
-  Expr *Replacement = StringLiteral::Create(*Context, StrEncoding,
-                                            StringLiteral::Ascii, false,
-                                            StrType, SourceLocation());
+  Expr *Replacement = getStringLiteral(StrEncoding);
   ReplaceStmt(Exp, Replacement);
 
   // Replace this subexpr in the parent.
@@ -2028,11 +2002,7 @@
   assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
   // Create a call to sel_registerName("selName").
   SmallVector<Expr*, 8> SelExprs;
-  QualType argType = Context->getPointerType(Context->CharTy);
-  SelExprs.push_back(StringLiteral::Create(*Context,
-                                           Exp->getSelector().getAsString(),
-                                           StringLiteral::Ascii, false,
-                                           argType, SourceLocation()));
+  SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
   CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
                                                  &SelExprs[0], SelExprs.size());
   ReplaceStmt(Exp, SelExp);
@@ -2159,7 +2129,7 @@
     proto = dyn_cast<FunctionProtoType>(funcType);
     if (!proto)
       return;
-    Type = proto->getResultType();
+    Type = proto->getReturnType();
   }
   else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
     Loc = FD->getLocation();
@@ -2190,8 +2160,8 @@
   // Now check arguments.
   const char *startBuf = SM->getCharacterData(Loc);
   const char *startFuncBuf = startBuf;
-  for (unsigned i = 0; i < proto->getNumArgs(); i++) {
-    if (needToScanForQualifiers(proto->getArgType(i))) {
+  for (unsigned i = 0; i < proto->getNumParams(); i++) {
+    if (needToScanForQualifiers(proto->getParamType(i))) {
       // Since types are unique, we need to scan the buffer.
 
       const char *endBuf = startBuf;
@@ -2330,14 +2300,14 @@
   const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
   if (!proto)
     return;
-  QualType Type = proto->getResultType();
+  QualType Type = proto->getReturnType();
   std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
   FdStr += " ";
   FdStr += FD->getName();
   FdStr +=  "(";
-  unsigned numArgs = proto->getNumArgs();
+  unsigned numArgs = proto->getNumParams();
   for (unsigned i = 0; i < numArgs; i++) {
-    QualType ArgType = proto->getArgType(i);
+    QualType ArgType = proto->getParamType(i);
     RewriteBlockPointerType(FdStr, ArgType);
     if (i+1 < numArgs)
       FdStr += ", ";
@@ -2523,7 +2493,7 @@
   unsigned i;
   for (i=0; i < tmpName.length(); i++) {
     char c = tmpName.at(i);
-    // replace any non alphanumeric characters with '_'.
+    // replace any non-alphanumeric characters with '_'.
     if (!isAlphanumeric(c))
       tmpName[i] = '_';
   }
@@ -2647,9 +2617,8 @@
   ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
   
   const FunctionType *FT = msgSendType->getAs<FunctionType>();
-  CallExpr *STCE = new (Context) CallExpr(*Context, PE, MsgExprs,
-                                          FT->getResultType(), VK_RValue,
-                                          SourceLocation());
+  CallExpr *STCE = new (Context) CallExpr(
+      *Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, SourceLocation());
   return STCE;
   
 }
@@ -2682,7 +2651,7 @@
   // May need to use objc_msgSend_stret() as well.
   FunctionDecl *MsgSendStretFlavor = 0;
   if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
-    QualType resultType = mDecl->getResultType();
+    QualType resultType = mDecl->getReturnType();
     if (resultType->isRecordType())
       MsgSendStretFlavor = MsgSendStretFunctionDecl;
     else if (resultType->isRealFloatingType())
@@ -2715,11 +2684,7 @@
 
     // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
     SmallVector<Expr*, 8> ClsExprs;
-    QualType argType = Context->getPointerType(Context->CharTy);
-    ClsExprs.push_back(StringLiteral::Create(*Context,
-                                   ClassDecl->getIdentifier()->getName(),
-                                   StringLiteral::Ascii, false,
-                                   argType, SourceLocation()));
+    ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
     CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
                                                  &ClsExprs[0],
                                                  ClsExprs.size(),
@@ -2789,14 +2754,10 @@
 
   case ObjCMessageExpr::Class: {
     SmallVector<Expr*, 8> ClsExprs;
-    QualType argType = Context->getPointerType(Context->CharTy);
     ObjCInterfaceDecl *Class
       = Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
     IdentifierInfo *clsName = Class->getIdentifier();
-    ClsExprs.push_back(StringLiteral::Create(*Context,
-                                             clsName->getName(),
-                                             StringLiteral::Ascii, false,
-                                             argType, SourceLocation()));
+    ClsExprs.push_back(getStringLiteral(clsName->getName()));
     CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
                                                  &ClsExprs[0],
                                                  ClsExprs.size(), 
@@ -2824,11 +2785,7 @@
     
     // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
     SmallVector<Expr*, 8> ClsExprs;
-    QualType argType = Context->getPointerType(Context->CharTy);
-    ClsExprs.push_back(StringLiteral::Create(*Context,
-                                   ClassDecl->getIdentifier()->getName(),
-                                   StringLiteral::Ascii, false, argType,
-                                   SourceLocation()));
+    ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
     CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
                                                  &ClsExprs[0],
                                                  ClsExprs.size(), 
@@ -2909,11 +2866,7 @@
 
   // Create a call to sel_registerName("selName"), it will be the 2nd argument.
   SmallVector<Expr*, 8> SelExprs;
-  QualType argType = Context->getPointerType(Context->CharTy);
-  SelExprs.push_back(StringLiteral::Create(*Context,
-                                       Exp->getSelector().getAsString(),
-                                       StringLiteral::Ascii, false,
-                                       argType, SourceLocation()));
+  SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
   CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
                                                  &SelExprs[0], SelExprs.size(),
                                                   StartLoc,
@@ -2988,11 +2941,10 @@
   ArgTypes.push_back(Context->getObjCSelType());
   if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
     // Push any user argument types.
-    for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
-         E = OMD->param_end(); PI != E; ++PI) {
-      QualType t = (*PI)->getType()->isObjCQualifiedIdType()
+    for (const auto *PI : OMD->params()) {
+      QualType t = PI->getType()->isObjCQualifiedIdType()
                      ? Context->getObjCIdType()
-                     : (*PI)->getType();
+                     : PI->getType();
       // Make sure we convert "t (^)(...)" to "t (*)(...)".
       (void)convertBlockPointerToFunctionPointer(t);
       ArgTypes.push_back(t);
@@ -3031,9 +2983,8 @@
   ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
 
   const FunctionType *FT = msgSendType->getAs<FunctionType>();
-  CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
-                                        FT->getResultType(), VK_RValue,
-                                        EndLoc);
+  CallExpr *CE = new (Context)
+      CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
   Stmt *ReplacingStmt = CE;
   if (MsgSendStretFlavor) {
     // We have the method which returns a struct/union. Must also generate
@@ -3327,7 +3278,7 @@
                                                    StringRef funcName,
                                                    std::string Tag) {
   const FunctionType *AFT = CE->getFunctionType();
-  QualType RT = AFT->getResultType();
+  QualType RT = AFT->getReturnType();
   std::string StructRef = "struct " + Tag;
   std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
                   funcName.str() + "_" + "block_func_" + utostr(i);
@@ -3784,13 +3735,12 @@
   // FTP will be null for closures that don't take arguments.
   // Generate a funky cast.
   SmallVector<QualType, 8> ArgTypes;
-  QualType Res = FT->getResultType();
+  QualType Res = FT->getReturnType();
   bool HasBlockType = convertBlockPointerToFunctionPointer(Res);
   
   if (FTP) {
-    for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
-         E = FTP->arg_type_end(); I && (I != E); ++I) {
-      QualType t = *I;
+    for (auto &I : FTP->param_types()) {
+      QualType t = I;
       // Make sure we convert "t (^)(...)" to "t (*)(...)".
       if (convertBlockPointerToFunctionPointer(t))
         HasBlockType = true;
@@ -3858,9 +3808,8 @@
   // Push the block argument type.
   ArgTypes.push_back(PtrBlock);
   if (FTP) {
-    for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
-         E = FTP->arg_type_end(); I && (I != E); ++I) {
-      QualType t = *I;
+    for (auto &I : FTP->param_types()) {
+      QualType t = I;
       // Make sure we convert "t (^)(...)" to "t (*)(...)".
       if (!convertBlockPointerToFunctionPointer(t))
         convertToUnqualifiedObjCType(t);
@@ -4061,9 +4010,8 @@
     FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
   }
   if (FTP) {
-    for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
-         E = FTP->arg_type_end(); I != E; ++I)
-      if (isTopLevelBlockPointerType(*I))
+    for (const auto &I : FTP->param_types())
+      if (isTopLevelBlockPointerType(I))
         return true;
   }
   return false;
@@ -4080,12 +4028,11 @@
     FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
   }
   if (FTP) {
-    for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
-         E = FTP->arg_type_end(); I != E; ++I) {
-      if ((*I)->isObjCQualifiedIdType())
+    for (const auto &I : FTP->param_types()) {
+      if (I->isObjCQualifiedIdType())
         return true;
-      if ((*I)->isObjCObjectPointerType() &&
-          (*I)->getPointeeType()->isObjCQualifiedInterfaceType())
+      if (I->isObjCObjectPointerType() &&
+          I->getPointeeType()->isObjCQualifiedInterfaceType())
         return true;
     }
         
@@ -4601,11 +4548,10 @@
                                       SourceLocation());
       bool isNestedCapturedVar = false;
       if (block)
-        for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
-             ce = block->capture_end(); ci != ce; ++ci) {
-          const VarDecl *variable = ci->getVariable();
-          if (variable == ND && ci->isNested()) {
-            assert (ci->isByRef() && 
+        for (const auto &CI : block->captures()) {
+          const VarDecl *variable = CI.getVariable();
+          if (variable == ND && CI.isNested()) {
+            assert (CI.isByRef() && 
                     "SynthBlockInitExpr - captured block variable is not byref");
             isNestedCapturedVar = true;
             break;
@@ -4795,9 +4741,7 @@
       RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
 
     // Blocks rewrite rules.
-    for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
-         DI != DE; ++DI) {
-      Decl *SD = *DI;
+    for (auto *SD : DS->decls()) {
       if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
         if (isTopLevelBlockPointerType(ND->getType()))
           RewriteBlockPointerDecl(ND);
@@ -4876,9 +4820,7 @@
 }
 
 void RewriteObjC::RewriteRecordBody(RecordDecl *RD) {
-  for (RecordDecl::field_iterator i = RD->field_begin(), 
-                                  e = RD->field_end(); i != e; ++i) {
-    FieldDecl *FD = *i;
+  for (auto *FD : RD->fields()) {
     if (isTopLevelBlockPointerType(FD->getType()))
       RewriteBlockPointerDecl(FD);
     if (FD->getType()->isObjCQualifiedIdType() ||
@@ -5428,10 +5370,8 @@
     ObjCInterfaceDecl::ivar_iterator IVI, IVE;
     SmallVector<ObjCIvarDecl *, 8> IVars;
     if (!IDecl->ivar_empty()) {
-      for (ObjCInterfaceDecl::ivar_iterator
-           IV = IDecl->ivar_begin(), IVEnd = IDecl->ivar_end();
-           IV != IVEnd; ++IV)
-        IVars.push_back(*IV);
+      for (auto *IV : IDecl->ivars())
+        IVars.push_back(IV);
       IVI = IDecl->ivar_begin();
       IVE = IDecl->ivar_end();
     } else {
@@ -5465,14 +5405,11 @@
   }
   
   // Build _objc_method_list for class's instance methods if needed
-  SmallVector<ObjCMethodDecl *, 32>
-  InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+  SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
   
   // If any of our property implementations have associated getters or
   // setters, produce metadata for them as well.
-  for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
-       PropEnd = IDecl->propimpl_end();
-       Prop != PropEnd; ++Prop) {
+  for (const auto *Prop : IDecl->property_impls()) {
     if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
       continue;
     if (!Prop->getPropertyIvarDecl())
@@ -5747,14 +5684,11 @@
   FullCategoryName += IDecl->getNameAsString();
   
   // Build _objc_method_list for class's instance methods if needed
-  SmallVector<ObjCMethodDecl *, 32>
-  InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+  SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
   
   // If any of our property implementations have associated getters or
   // setters, produce metadata for them as well.
-  for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
-       PropEnd = IDecl->propimpl_end();
-       Prop != PropEnd; ++Prop) {
+  for (const auto *Prop : IDecl->property_impls()) {
     if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
       continue;
     if (!Prop->getPropertyIvarDecl())
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index 93e3ecf..3e40485 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -65,16 +65,157 @@
   public:
     UnreachableCodeHandler(Sema &s) : S(s) {}
 
-    void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) {
-      S.Diag(L, diag::warn_unreachable) << R1 << R2;
+    void HandleUnreachable(reachable_code::UnreachableKind UK,
+                           SourceLocation L,
+                           SourceRange SilenceableCondVal,
+                           SourceRange R1,
+                           SourceRange R2) override {
+      unsigned diag = diag::warn_unreachable;
+      switch (UK) {
+        case reachable_code::UK_Break:
+          diag = diag::warn_unreachable_break;
+          break;
+        case reachable_code::UK_Return:
+          diag = diag::warn_unreachable_return;
+          break;
+        case reachable_code::UK_Loop_Increment:
+          diag = diag::warn_unreachable_loop_increment;
+          break;
+        case reachable_code::UK_Other:
+          break;
+      }
+
+      S.Diag(L, diag) << R1 << R2;
+      
+      SourceLocation Open = SilenceableCondVal.getBegin();
+      if (Open.isValid()) {
+        SourceLocation Close = SilenceableCondVal.getEnd();        
+        Close = S.PP.getLocForEndOfToken(Close);
+        if (Close.isValid()) {
+          S.Diag(Open, diag::note_unreachable_silence)
+            << FixItHint::CreateInsertion(Open, "/* DISABLES CODE */ (")
+            << FixItHint::CreateInsertion(Close, ")");
+        }
+      }
     }
   };
 }
 
 /// CheckUnreachable - Check for unreachable code.
 static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
+  // As a heuristic prune all diagnostics not in the main file.  Currently
+  // the majority of warnings in headers are false positives.  These
+  // are largely caused by configuration state, e.g. preprocessor
+  // defined code, etc.
+  //
+  // Note that this is also a performance optimization.  Analyzing
+  // headers many times can be expensive.
+  if (!S.getSourceManager().isInMainFile(AC.getDecl()->getLocStart()))
+    return;
+
   UnreachableCodeHandler UC(S);
-  reachable_code::FindUnreachableCode(AC, UC);
+  reachable_code::FindUnreachableCode(AC, S.getPreprocessor(), UC);
+}
+
+//===----------------------------------------------------------------------===//
+// Check for infinite self-recursion in functions
+//===----------------------------------------------------------------------===//
+
+// All blocks are in one of three states.  States are ordered so that blocks
+// can only move to higher states.
+enum RecursiveState {
+  FoundNoPath,
+  FoundPath,
+  FoundPathWithNoRecursiveCall
+};
+
+static void checkForFunctionCall(Sema &S, const FunctionDecl *FD,
+                                 CFGBlock &Block, unsigned ExitID,
+                                 llvm::SmallVectorImpl<RecursiveState> &States,
+                                 RecursiveState State) {
+  unsigned ID = Block.getBlockID();
+
+  // A block's state can only move to a higher state.
+  if (States[ID] >= State)
+    return;
+
+  States[ID] = State;
+
+  // Found a path to the exit node without a recursive call.
+  if (ID == ExitID && State == FoundPathWithNoRecursiveCall)
+    return;
+
+  if (State == FoundPathWithNoRecursiveCall) {
+    // If the current state is FoundPathWithNoRecursiveCall, the successors
+    // will be either FoundPathWithNoRecursiveCall or FoundPath.  To determine
+    // which, process all the Stmt's in this block to find any recursive calls.
+    for (CFGBlock::iterator I = Block.begin(), E = Block.end(); I != E; ++I) {
+      if (I->getKind() != CFGElement::Statement)
+        continue;
+
+      const CallExpr *CE = dyn_cast<CallExpr>(I->getAs<CFGStmt>()->getStmt());
+      if (CE && CE->getCalleeDecl() &&
+          CE->getCalleeDecl()->getCanonicalDecl() == FD) {
+
+        // Skip function calls which are qualified with a templated class.
+        if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(
+                CE->getCallee()->IgnoreParenImpCasts())) {
+          if (NestedNameSpecifier *NNS = DRE->getQualifier()) {
+            if (NNS->getKind() == NestedNameSpecifier::TypeSpec &&
+                isa<TemplateSpecializationType>(NNS->getAsType())) {
+               continue;
+            }
+          }
+        }
+
+        if (const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE)) {
+          if (isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) ||
+              !MCE->getMethodDecl()->isVirtual()) {
+            State = FoundPath;
+            break;
+          }
+        } else {
+          State = FoundPath;
+          break;
+        }
+      }
+    }
+  }
+
+  for (CFGBlock::succ_iterator I = Block.succ_begin(), E = Block.succ_end();
+       I != E; ++I)
+    if (*I)
+      checkForFunctionCall(S, FD, **I, ExitID, States, State);
+}
+
+static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
+                                   const Stmt *Body,
+                                   AnalysisDeclContext &AC) {
+  FD = FD->getCanonicalDecl();
+
+  // Only run on non-templated functions and non-templated members of
+  // templated classes.
+  if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate &&
+      FD->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization)
+    return;
+
+  CFG *cfg = AC.getCFG();
+  if (cfg == 0) return;
+
+  // If the exit block is unreachable, skip processing the function.
+  if (cfg->getExit().pred_empty())
+    return;
+
+  // Mark all nodes as FoundNoPath, then begin processing the entry block.
+  llvm::SmallVector<RecursiveState, 16> states(cfg->getNumBlockIDs(),
+                                               FoundNoPath);
+  checkForFunctionCall(S, FD, cfg->getEntry(), cfg->getExit().getBlockID(),
+                       states, FoundPathWithNoRecursiveCall);
+
+  // Check that the exit block is reachable.  This prevents triggering the
+  // warning on functions that do not terminate.
+  if (states[cfg->getExit().getBlockID()] == FoundPath)
+    S.Diag(Body->getLocStart(), diag::warn_infinite_recursive_function);
 }
 
 //===----------------------------------------------------------------------===//
@@ -330,18 +471,18 @@
   bool HasNoReturn = false;
 
   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
-    ReturnsVoid = FD->getResultType()->isVoidType();
+    ReturnsVoid = FD->getReturnType()->isVoidType();
     HasNoReturn = FD->isNoReturn();
   }
   else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
-    ReturnsVoid = MD->getResultType()->isVoidType();
+    ReturnsVoid = MD->getReturnType()->isVoidType();
     HasNoReturn = MD->hasAttr<NoReturnAttr>();
   }
   else if (isa<BlockDecl>(D)) {
     QualType BlockTy = blkExpr->getType();
     if (const FunctionType *FT =
           BlockTy->getPointeeType()->getAs<FunctionType>()) {
-      if (FT->getResultType()->isVoidType())
+      if (FT->getReturnType()->isVoidType())
         ReturnsVoid = true;
       if (FT->getNoReturnAttr())
         HasNoReturn = true;
@@ -776,6 +917,7 @@
       while (!BlockQueue.empty()) {
         const CFGBlock *P = BlockQueue.front();
         BlockQueue.pop_front();
+        if (!P) continue;
 
         const Stmt *Term = P->getTerminator();
         if (Term && isa<SwitchStmt>(Term))
@@ -977,24 +1119,6 @@
 
 }
 
-namespace {
-typedef std::pair<const Stmt *,
-                  sema::FunctionScopeInfo::WeakObjectUseMap::const_iterator>
-        StmtUsesPair;
-
-class StmtUseSorter {
-  const SourceManager &SM;
-
-public:
-  explicit StmtUseSorter(const SourceManager &SM) : SM(SM) { }
-
-  bool operator()(const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
-    return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(),
-                                        RHS.first->getLocStart());
-  }
-};
-}
-
 static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
                      const Stmt *S) {
   assert(S);
@@ -1029,6 +1153,8 @@
   typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy;
   typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap;
   typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector;
+  typedef std::pair<const Stmt *, WeakObjectUseMap::const_iterator>
+  StmtUsesPair;
 
   ASTContext &Ctx = S.getASTContext();
 
@@ -1087,8 +1213,12 @@
     return;
 
   // Sort by first use so that we emit the warnings in a deterministic order.
+  SourceManager &SM = S.getSourceManager();
   std::sort(UsesByStmt.begin(), UsesByStmt.end(),
-            StmtUseSorter(S.getSourceManager()));
+            [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
+    return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(),
+                                        RHS.first->getLocStart());
+  });
 
   // Classify the current code body for better warning text.
   // This enum should stay in sync with the cases in
@@ -1169,19 +1299,7 @@
   }
 }
 
-
 namespace {
-struct SLocSort {
-  bool operator()(const UninitUse &a, const UninitUse &b) {
-    // Prefer a more confident report over a less confident one.
-    if (a.getKind() != b.getKind())
-      return a.getKind() > b.getKind();
-    SourceLocation aLoc = a.getUser()->getLocStart();
-    SourceLocation bLoc = b.getUser()->getLocStart();
-    return aLoc.getRawEncoding() < bLoc.getRawEncoding();
-  }
-};
-
 class UninitValsDiagReporter : public UninitVariablesHandler {
   Sema &S;
   typedef SmallVector<UninitUse, 2> UsesVec;
@@ -1208,12 +1326,13 @@
     
     return V;
   }
-  
-  void handleUseOfUninitVariable(const VarDecl *vd, const UninitUse &use) {
+
+  void handleUseOfUninitVariable(const VarDecl *vd,
+                                 const UninitUse &use) override {
     getUses(vd).getPointer()->push_back(use);
   }
   
-  void handleSelfInit(const VarDecl *vd) {
+  void handleSelfInit(const VarDecl *vd) override {
     getUses(vd).setInt(true);
   }
   
@@ -1240,8 +1359,14 @@
         // Sort the uses by their SourceLocations.  While not strictly
         // guaranteed to produce them in line/column order, this will provide
         // a stable ordering.
-        std::sort(vec->begin(), vec->end(), SLocSort());
-        
+        std::sort(vec->begin(), vec->end(),
+                  [](const UninitUse &a, const UninitUse &b) {
+          // Prefer a more confident report over a less confident one.
+          if (a.getKind() != b.getKind())
+            return a.getKind() > b.getKind();
+          return a.getUser()->getLocStart() < b.getUser()->getLocStart();
+        });
+
         for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve;
              ++vi) {
           // If we have self-init, downgrade all uses to 'may be uninitialized'.
@@ -1304,12 +1429,13 @@
   SourceLocation FunLocation, FunEndLocation;
 
   // Helper functions
-  void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) {
+  void warnLockMismatch(unsigned DiagID, StringRef Kind, Name LockName,
+                        SourceLocation Loc) {
     // Gracefully handle rare cases when the analysis can't get a more
     // precise source location.
     if (!Loc.isValid())
       Loc = FunLocation;
-    PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << LockName);
+    PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind << LockName);
     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
   }
 
@@ -1332,22 +1458,33 @@
     }
   }
 
-  void handleInvalidLockExp(SourceLocation Loc) {
-    PartialDiagnosticAt Warning(Loc,
-                                S.PDiag(diag::warn_cannot_resolve_lock) << Loc);
+  void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override {
+    PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock)
+                                         << Loc);
     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
   }
-  void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) {
-    warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc);
+  void handleUnmatchedUnlock(StringRef Kind, Name LockName,
+                             SourceLocation Loc) override {
+    warnLockMismatch(diag::warn_unlock_but_no_lock, Kind, LockName, Loc);
+  }
+  void handleIncorrectUnlockKind(StringRef Kind, Name LockName,
+                                 LockKind Expected, LockKind Received,
+                                 SourceLocation Loc) override {
+    if (Loc.isInvalid())
+      Loc = FunLocation;
+    PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_kind_mismatch)
+                                         << Kind << LockName << Received
+                                         << Expected);
+    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+  }
+  void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation Loc) override {
+    warnLockMismatch(diag::warn_double_lock, Kind, LockName, Loc);
   }
 
-  void handleDoubleLock(Name LockName, SourceLocation Loc) {
-    warnLockMismatch(diag::warn_double_lock, LockName, Loc);
-  }
-
-  void handleMutexHeldEndOfScope(Name LockName, SourceLocation LocLocked,
+  void handleMutexHeldEndOfScope(StringRef Kind, Name LockName,
+                                 SourceLocation LocLocked,
                                  SourceLocation LocEndOfScope,
-                                 LockErrorKind LEK){
+                                 LockErrorKind LEK) override {
     unsigned DiagID = 0;
     switch (LEK) {
       case LEK_LockedSomePredecessors:
@@ -1366,29 +1503,33 @@
     if (LocEndOfScope.isInvalid())
       LocEndOfScope = FunEndLocation;
 
-    PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << LockName);
+    PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind
+                                                               << LockName);
     if (LocLocked.isValid()) {
-      PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here));
+      PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here)
+                                              << Kind);
       Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
       return;
     }
     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
   }
 
-
-  void handleExclusiveAndShared(Name LockName, SourceLocation Loc1,
-                                SourceLocation Loc2) {
-    PartialDiagnosticAt Warning(
-      Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName);
-    PartialDiagnosticAt Note(
-      Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << LockName);
+  void handleExclusiveAndShared(StringRef Kind, Name LockName,
+                                SourceLocation Loc1,
+                                SourceLocation Loc2) override {
+    PartialDiagnosticAt Warning(Loc1,
+                                S.PDiag(diag::warn_lock_exclusive_and_shared)
+                                    << Kind << LockName);
+    PartialDiagnosticAt Note(Loc2, S.PDiag(diag::note_lock_exclusive_and_shared)
+                                       << Kind << LockName);
     Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
   }
 
-  void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
-                         AccessKind AK, SourceLocation Loc) {
-    assert((POK == POK_VarAccess || POK == POK_VarDereference)
-             && "Only works for variables");
+  void handleNoMutexHeld(StringRef Kind, const NamedDecl *D,
+                         ProtectedOperationKind POK, AccessKind AK,
+                         SourceLocation Loc) override {
+    assert((POK == POK_VarAccess || POK == POK_VarDereference) &&
+           "Only works for variables");
     unsigned DiagID = POK == POK_VarAccess?
                         diag::warn_variable_requires_any_lock:
                         diag::warn_var_deref_requires_any_lock;
@@ -1397,9 +1538,10 @@
     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
   }
 
-  void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK,
-                          Name LockName, LockKind LK, SourceLocation Loc,
-                          Name *PossibleMatch) {
+  void handleMutexNotHeld(StringRef Kind, const NamedDecl *D,
+                          ProtectedOperationKind POK, Name LockName,
+                          LockKind LK, SourceLocation Loc,
+                          Name *PossibleMatch) override {
     unsigned DiagID = 0;
     if (PossibleMatch) {
       switch (POK) {
@@ -1413,10 +1555,11 @@
           DiagID = diag::warn_fun_requires_lock_precise;
           break;
       }
-      PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
-        << D->getNameAsString() << LockName << LK);
+      PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
+                                                       << D->getNameAsString()
+                                                       << LockName << LK);
       PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match)
-                               << *PossibleMatch);
+                                        << *PossibleMatch);
       Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
     } else {
       switch (POK) {
@@ -1430,15 +1573,17 @@
           DiagID = diag::warn_fun_requires_lock;
           break;
       }
-      PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
-        << D->getNameAsString() << LockName << LK);
+      PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
+                                                       << D->getNameAsString()
+                                                       << LockName << LK);
       Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
     }
   }
 
-  void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) {
-    PartialDiagnosticAt Warning(Loc,
-      S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName);
+  void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName,
+                             SourceLocation Loc) override {
+    PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex)
+                                         << Kind << FunName << LockName);
     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
   }
 };
@@ -1461,8 +1606,8 @@
 public:
   
   ConsumedWarningsHandler(Sema &S) : S(S) {}
-  
-  void emitDiagnostics() {
+
+  void emitDiagnostics() override {
     Warnings.sort(SortDiagBySourceLocation(S.getSourceManager()));
     
     for (DiagList::iterator I = Warnings.begin(), E = Warnings.end();
@@ -1476,8 +1621,9 @@
       }
     }
   }
-  
-  void warnLoopStateMismatch(SourceLocation Loc, StringRef VariableName) {
+
+  void warnLoopStateMismatch(SourceLocation Loc,
+                             StringRef VariableName) override {
     PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_loop_state_mismatch) <<
       VariableName);
     
@@ -1487,7 +1633,7 @@
   void warnParamReturnTypestateMismatch(SourceLocation Loc,
                                         StringRef VariableName,
                                         StringRef ExpectedState,
-                                        StringRef ObservedState) {
+                                        StringRef ObservedState) override {
     
     PartialDiagnosticAt Warning(Loc, S.PDiag(
       diag::warn_param_return_typestate_mismatch) << VariableName <<
@@ -1497,7 +1643,7 @@
   }
   
   void warnParamTypestateMismatch(SourceLocation Loc, StringRef ExpectedState,
-                                  StringRef ObservedState) {
+                                  StringRef ObservedState) override {
     
     PartialDiagnosticAt Warning(Loc, S.PDiag(
       diag::warn_param_typestate_mismatch) << ExpectedState << ObservedState);
@@ -1506,7 +1652,7 @@
   }
   
   void warnReturnTypestateForUnconsumableType(SourceLocation Loc,
-                                              StringRef TypeName) {
+                                              StringRef TypeName) override {
     PartialDiagnosticAt Warning(Loc, S.PDiag(
       diag::warn_return_typestate_for_unconsumable_type) << TypeName);
     
@@ -1514,7 +1660,7 @@
   }
   
   void warnReturnTypestateMismatch(SourceLocation Loc, StringRef ExpectedState,
-                                   StringRef ObservedState) {
+                                   StringRef ObservedState) override {
                                     
     PartialDiagnosticAt Warning(Loc, S.PDiag(
       diag::warn_return_typestate_mismatch) << ExpectedState << ObservedState);
@@ -1523,7 +1669,7 @@
   }
   
   void warnUseOfTempInInvalidState(StringRef MethodName, StringRef State,
-                                   SourceLocation Loc) {
+                                   SourceLocation Loc) override {
                                                     
     PartialDiagnosticAt Warning(Loc, S.PDiag(
       diag::warn_use_of_temp_in_invalid_state) << MethodName << State);
@@ -1532,7 +1678,7 @@
   }
   
   void warnUseInInvalidState(StringRef MethodName, StringRef VariableName,
-                                  StringRef State, SourceLocation Loc) {
+                             StringRef State, SourceLocation Loc) override {
   
     PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_use_in_invalid_state) <<
                                 MethodName << VariableName << State);
@@ -1554,6 +1700,11 @@
   enableConsumedAnalysis = 0;
 }
 
+static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) {
+  return (unsigned) D.getDiagnosticLevel(diag, SourceLocation()) !=
+                    DiagnosticsEngine::Ignored;
+}
+
 clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
   : S(s),
     NumFunctionsAnalyzed(0),
@@ -1565,16 +1716,21 @@
     MaxUninitAnalysisVariablesPerFunction(0),
     NumUninitAnalysisBlockVisits(0),
     MaxUninitAnalysisBlockVisitsPerFunction(0) {
+
+  using namespace diag;
   DiagnosticsEngine &D = S.getDiagnostics();
-  DefaultPolicy.enableCheckUnreachable = (unsigned)
-    (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
-        DiagnosticsEngine::Ignored);
-  DefaultPolicy.enableThreadSafetyAnalysis = (unsigned)
-    (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) !=
-     DiagnosticsEngine::Ignored);
-  DefaultPolicy.enableConsumedAnalysis = (unsigned)
-    (D.getDiagnosticLevel(diag::warn_use_in_invalid_state, SourceLocation()) !=
-     DiagnosticsEngine::Ignored);
+
+  DefaultPolicy.enableCheckUnreachable =
+    isEnabled(D, warn_unreachable) ||
+    isEnabled(D, warn_unreachable_break) ||
+    isEnabled(D, warn_unreachable_return) ||
+    isEnabled(D, warn_unreachable_loop_increment);
+
+  DefaultPolicy.enableThreadSafetyAnalysis =
+    isEnabled(D, warn_double_lock);
+
+  DefaultPolicy.enableConsumedAnalysis =
+    isEnabled(D, warn_use_in_invalid_state);
 }
 
 static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) {
@@ -1629,6 +1785,7 @@
   AC.getCFGBuildOptions().AddInitializers = true;
   AC.getCFGBuildOptions().AddImplicitDtors = true;
   AC.getCFGBuildOptions().AddTemporaryDtors = true;
+  AC.getCFGBuildOptions().AddCXXNewAllocator = false;
 
   // Force that certain expressions appear as CFGElements in the CFG.  This
   // is used to speed up various analyses.
@@ -1789,6 +1946,16 @@
                                D->getLocStart()) != DiagnosticsEngine::Ignored)
     diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap());
 
+
+  // Check for infinite self-recursion in functions
+  if (Diags.getDiagnosticLevel(diag::warn_infinite_recursive_function,
+                               D->getLocStart())
+      != DiagnosticsEngine::Ignored) {
+    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+      checkRecursiveFunction(S, FD, Body, AC);
+    }
+  }
+
   // Collect statistics about the CFG if it was built.
   if (S.CollectStats && AC.isCFGBuilt()) {
     ++NumFunctionsAnalyzed;
diff --git a/lib/Sema/Android.mk b/lib/Sema/Android.mk
index b9b496a..77a2e93 100644
--- a/lib/Sema/Android.mk
+++ b/lib/Sema/Android.mk
@@ -13,6 +13,7 @@
 	AttrParsedAttrList.inc    \
 	AttrSpellingListIndex.inc \
 	AttrTemplateInstantiate.inc	\
+        AttrVisitor.inc \
 	CommentCommandList.inc \
 	CommentNodes.inc \
 	DeclNodes.inc	\
@@ -69,7 +70,6 @@
 	SemaTemplateInstantiateDecl.cpp	\
 	SemaTemplateVariadic.cpp	\
 	SemaType.cpp	\
-	TargetAttributesSema.cpp	\
 	TypeLocBuilder.cpp
 
 LOCAL_SRC_FILES := $(clang_sema_SRC_FILES)
diff --git a/lib/Sema/AttributeList.cpp b/lib/Sema/AttributeList.cpp
index c980772..476a22b 100644
--- a/lib/Sema/AttributeList.cpp
+++ b/lib/Sema/AttributeList.cpp
@@ -13,8 +13,11 @@
 
 #include "clang/Sema/AttributeList.h"
 #include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
 #include "clang/AST/Expr.h"
 #include "clang/Basic/IdentifierTable.h"
+#include "clang/Sema/SemaInternal.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/StringSwitch.h"
 using namespace clang;
@@ -103,14 +106,6 @@
   } while (pool);
 }
 
-AttributeList *
-AttributePool::createIntegerAttribute(ASTContext &C, IdentifierInfo *Name,
-                                      SourceLocation TokLoc, int Arg) {
-  ArgsUnion IArg = IntegerLiteral::Create(C, llvm::APInt(32, (uint64_t) Arg),
-                                      C.IntTy, TokLoc);
-  return create(Name, TokLoc, 0, TokLoc, &IArg, 1, AttributeList::AS_GNU);
-}
-
 #include "clang/Sema/AttrParsedAttrKinds.inc"
 
 AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
@@ -118,21 +113,25 @@
                                            Syntax SyntaxUsed) {
   StringRef AttrName = Name->getName();
 
-  // Normalize the attribute name, __foo__ becomes foo.
-  if (AttrName.startswith("__") && AttrName.endswith("__") &&
-      AttrName.size() >= 4)
-    AttrName = AttrName.substr(2, AttrName.size() - 4);
-
-  SmallString<64> Buf;
+  SmallString<64> FullName;
   if (ScopeName)
-    Buf += ScopeName->getName();
+    FullName += ScopeName->getName();
+
+  // Normalize the attribute name, __foo__ becomes foo. This is only allowable
+  // for GNU attributes.
+  bool IsGNU = SyntaxUsed == AS_GNU || (SyntaxUsed == AS_CXX11 &&
+                                        FullName == "gnu");
+  if (IsGNU && AttrName.size() >= 4 && AttrName.startswith("__") &&
+      AttrName.endswith("__"))
+    AttrName = AttrName.slice(2, AttrName.size() - 2);
+
   // Ensure that in the case of C++11 attributes, we look for '::foo' if it is
   // unscoped.
   if (ScopeName || SyntaxUsed == AS_CXX11)
-    Buf += "::";
-  Buf += AttrName;
+    FullName += "::";
+  FullName += AttrName;
 
-  return ::getAttrKind(Buf);
+  return ::getAttrKind(FullName, SyntaxUsed);
 }
 
 unsigned AttributeList::getAttributeSpellingListIndex() const {
@@ -149,6 +148,15 @@
   unsigned NumArgs : 4;
   unsigned OptArgs : 4;
   unsigned HasCustomParsing : 1;
+  unsigned IsTargetSpecific : 1;
+  unsigned IsType : 1;
+  unsigned IsKnownToGCC : 1;
+
+  bool (*DiagAppertainsToDecl)(Sema &S, const AttributeList &Attr,
+                               const Decl *);
+  bool (*DiagLangOpts)(Sema &S, const AttributeList &Attr);
+  bool (*ExistsInTarget)(const llvm::Triple &T);
+  unsigned (*SpellingIndexToSemanticSpelling)(const AttributeList &Attr);
 };
 
 namespace {
@@ -170,3 +178,31 @@
 bool AttributeList::hasCustomParsing() const {
   return getInfo(*this).HasCustomParsing;
 }
+
+bool AttributeList::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
+  return getInfo(*this).DiagAppertainsToDecl(S, *this, D);
+}
+
+bool AttributeList::diagnoseLangOpts(Sema &S) const {
+  return getInfo(*this).DiagLangOpts(S, *this);
+}
+
+bool AttributeList::isTargetSpecificAttr() const {
+  return getInfo(*this).IsTargetSpecific;
+}
+
+bool AttributeList::isTypeAttr() const {
+  return getInfo(*this).IsType;
+}
+
+bool AttributeList::existsInTarget(const llvm::Triple &T) const {
+  return getInfo(*this).ExistsInTarget(T);
+}
+
+bool AttributeList::isKnownToGCC() const {
+  return getInfo(*this).IsKnownToGCC;
+}
+
+unsigned AttributeList::getSemanticSpelling() const {
+  return getInfo(*this).SpellingIndexToSemanticSpelling(*this);
+}
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
index 5e09140..7847d2c 100644
--- a/lib/Sema/CMakeLists.txt
+++ b/lib/Sema/CMakeLists.txt
@@ -1,8 +1,5 @@
 set(LLVM_LINK_COMPONENTS
-  ${LLVM_TARGETS_TO_BUILD}
-  asmparser
-  support
-  mc
+  Support
   )
 
 add_clang_library(clangSema
@@ -50,30 +47,9 @@
   SemaTemplateInstantiateDecl.cpp
   SemaTemplateVariadic.cpp
   SemaType.cpp
-  TargetAttributesSema.cpp
   TypeLocBuilder.cpp
-  )
 
-add_dependencies(clangSema
-  ClangARMNeon
-  ClangAttrClasses
-  ClangAttrList
-  ClangAttrParsedAttrList
-  ClangAttrParsedAttrKinds
-  ClangAttrParsedAttrImpl
-  ClangAttrSpellingListIndex
-  ClangAttrTemplateInstantiate
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticAST
-  ClangDiagnosticComment
-  ClangDiagnosticCommon
-  ClangDiagnosticParse
-  ClangDiagnosticSema
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangSema
+  LINK_LIBS
   clangAST
   clangAnalysis
   clangBasic
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index c2f1615..5c2006f 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -149,8 +149,8 @@
 DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
                                              bool isAmbiguous,
                                              SourceLocation LParenLoc,
-                                             ParamInfo *ArgInfo,
-                                             unsigned NumArgs,
+                                             ParamInfo *Params,
+                                             unsigned NumParams,
                                              SourceLocation EllipsisLoc,
                                              SourceLocation RParenLoc,
                                              unsigned TypeQuals,
@@ -185,10 +185,10 @@
   I.Fun.LParenLoc               = LParenLoc.getRawEncoding();
   I.Fun.EllipsisLoc             = EllipsisLoc.getRawEncoding();
   I.Fun.RParenLoc               = RParenLoc.getRawEncoding();
-  I.Fun.DeleteArgInfo           = false;
+  I.Fun.DeleteParams            = false;
   I.Fun.TypeQuals               = TypeQuals;
-  I.Fun.NumArgs                 = NumArgs;
-  I.Fun.ArgInfo                 = 0;
+  I.Fun.NumParams               = NumParams;
+  I.Fun.Params                  = 0;
   I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef;
   I.Fun.RefQualifierLoc         = RefQualifierLoc.getRawEncoding();
   I.Fun.ConstQualifierLoc       = ConstQualifierLoc.getRawEncoding();
@@ -203,22 +203,22 @@
                                   TrailingReturnType.isInvalid();
   I.Fun.TrailingReturnType      = TrailingReturnType.get();
 
-  // new[] an argument array if needed.
-  if (NumArgs) {
+  // new[] a parameter array if needed.
+  if (NumParams) {
     // If the 'InlineParams' in Declarator is unused and big enough, put our
     // parameter list there (in an effort to avoid new/delete traffic).  If it
     // is already used (consider a function returning a function pointer) or too
-    // small (function taking too many arguments), go to the heap.
+    // small (function with too many parameters), go to the heap.
     if (!TheDeclarator.InlineParamsUsed &&
-        NumArgs <= llvm::array_lengthof(TheDeclarator.InlineParams)) {
-      I.Fun.ArgInfo = TheDeclarator.InlineParams;
-      I.Fun.DeleteArgInfo = false;
+        NumParams <= llvm::array_lengthof(TheDeclarator.InlineParams)) {
+      I.Fun.Params = TheDeclarator.InlineParams;
+      I.Fun.DeleteParams = false;
       TheDeclarator.InlineParamsUsed = true;
     } else {
-      I.Fun.ArgInfo = new DeclaratorChunk::ParamInfo[NumArgs];
-      I.Fun.DeleteArgInfo = true;
+      I.Fun.Params = new DeclaratorChunk::ParamInfo[NumParams];
+      I.Fun.DeleteParams = true;
     }
-    memcpy(I.Fun.ArgInfo, ArgInfo, sizeof(ArgInfo[0])*NumArgs);
+    memcpy(I.Fun.Params, Params, sizeof(Params[0]) * NumParams);
   }
 
   // Check what exception specification information we should actually store.
@@ -285,14 +285,6 @@
     case TST_unspecified:
     case TST_void:
     case TST_wchar:
-    case TST_image1d_t:
-    case TST_image1d_array_t:
-    case TST_image1d_buffer_t:
-    case TST_image2d_t:
-    case TST_image2d_array_t:
-    case TST_image3d_t:
-    case TST_sampler_t:
-    case TST_event_t:
       return false;
 
     case TST_decltype_auto:
@@ -426,12 +418,13 @@
   llvm_unreachable("Unknown typespec!");
 }
 
-const char *DeclSpec::getSpecifierName(DeclSpec::TST T) {
+const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
+                                       const PrintingPolicy &Policy) {
   switch (T) {
   case DeclSpec::TST_unspecified: return "unspecified";
   case DeclSpec::TST_void:        return "void";
   case DeclSpec::TST_char:        return "char";
-  case DeclSpec::TST_wchar:       return "wchar_t";
+  case DeclSpec::TST_wchar:       return Policy.MSWChar ? "__wchar_t" : "wchar_t";
   case DeclSpec::TST_char16:      return "char16_t";
   case DeclSpec::TST_char32:      return "char32_t";
   case DeclSpec::TST_int:         return "int";
@@ -439,7 +432,7 @@
   case DeclSpec::TST_half:        return "half";
   case DeclSpec::TST_float:       return "float";
   case DeclSpec::TST_double:      return "double";
-  case DeclSpec::TST_bool:        return "_Bool";
+  case DeclSpec::TST_bool:        return Policy.Bool ? "bool" : "_Bool";
   case DeclSpec::TST_decimal32:   return "_Decimal32";
   case DeclSpec::TST_decimal64:   return "_Decimal64";
   case DeclSpec::TST_decimal128:  return "_Decimal128";
@@ -457,14 +450,6 @@
   case DeclSpec::TST_underlyingType: return "__underlying_type";
   case DeclSpec::TST_unknown_anytype: return "__unknown_anytype";
   case DeclSpec::TST_atomic: return "_Atomic";
-  case DeclSpec::TST_image1d_t:   return "image1d_t";
-  case DeclSpec::TST_image1d_array_t: return "image1d_array_t";
-  case DeclSpec::TST_image1d_buffer_t: return "image1d_buffer_t";
-  case DeclSpec::TST_image2d_t:   return "image2d_t";
-  case DeclSpec::TST_image2d_array_t: return "image2d_array_t";
-  case DeclSpec::TST_image3d_t:   return "image3d_t";
-  case DeclSpec::TST_sampler_t:   return "sampler_t";
-  case DeclSpec::TST_event_t:     return "event_t";
   case DeclSpec::TST_error:       return "(error)";
   }
   llvm_unreachable("Unknown typespec!");
@@ -483,7 +468,8 @@
 
 bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
                                    const char *&PrevSpec,
-                                   unsigned &DiagID) {
+                                   unsigned &DiagID,
+                                   const PrintingPolicy &Policy) {
   // OpenCL v1.1 s6.8g: "The extern, static, auto and register storage-class
   // specifiers are not supported.
   // It seems sensible to prohibit private_extern too
@@ -518,10 +504,10 @@
     bool isInvalid = true;
     if (TypeSpecType == TST_unspecified && S.getLangOpts().CPlusPlus) {
       if (SC == SCS_auto)
-        return SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID);
+        return SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID, Policy);
       if (StorageClassSpec == SCS_auto) {
         isInvalid = SetTypeSpecType(TST_auto, StorageClassSpecLoc,
-                                    PrevSpec, DiagID);
+                                    PrevSpec, DiagID, Policy);
         assert(!isInvalid && "auto SCS -> TST recovery failed");
       }
     }
@@ -557,7 +543,8 @@
 /// specified).
 bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc,
                                 const char *&PrevSpec,
-                                unsigned &DiagID) {
+                                unsigned &DiagID,
+                                const PrintingPolicy &Policy) {
   // Overwrite TSWLoc only if TypeSpecWidth was unspecified, so that
   // for 'long long' we will keep the source location of the first 'long'.
   if (TypeSpecWidth == TSW_unspecified)
@@ -568,7 +555,7 @@
   TypeSpecWidth = W;
   if (TypeAltiVecVector && !TypeAltiVecBool &&
       ((TypeSpecWidth == TSW_long) || (TypeSpecWidth == TSW_longlong))) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::warn_vector_long_decl_spec_combination;
     return true;
   }
@@ -598,19 +585,21 @@
 bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
                                const char *&PrevSpec,
                                unsigned &DiagID,
-                               ParsedType Rep) {
-  return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep);
+                               ParsedType Rep,
+                               const PrintingPolicy &Policy) {
+  return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep, Policy);
 }
 
 bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
                                SourceLocation TagNameLoc,
                                const char *&PrevSpec,
                                unsigned &DiagID,
-                               ParsedType Rep) {
+                               ParsedType Rep,
+                               const PrintingPolicy &Policy) {
   assert(isTypeRep(T) && "T does not store a type");
   assert(Rep && "no type provided!");
   if (TypeSpecType != TST_unspecified) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::err_invalid_decl_spec_combination;
     return true;
   }
@@ -625,11 +614,12 @@
 bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
                                const char *&PrevSpec,
                                unsigned &DiagID,
-                               Expr *Rep) {
+                               Expr *Rep,
+                               const PrintingPolicy &Policy) {
   assert(isExprRep(T) && "T does not store an expr");
   assert(Rep && "no expression provided!");
   if (TypeSpecType != TST_unspecified) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::err_invalid_decl_spec_combination;
     return true;
   }
@@ -644,20 +634,22 @@
 bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
                                const char *&PrevSpec,
                                unsigned &DiagID,
-                               Decl *Rep, bool Owned) {
-  return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep, Owned);
+                               Decl *Rep, bool Owned,
+                               const PrintingPolicy &Policy) {
+  return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep, Owned, Policy);
 }
 
 bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
                                SourceLocation TagNameLoc,
                                const char *&PrevSpec,
                                unsigned &DiagID,
-                               Decl *Rep, bool Owned) {
+                               Decl *Rep, bool Owned,
+                               const PrintingPolicy &Policy) {
   assert(isDeclRep(T) && "T does not store a decl");
   // Unlike the other cases, we don't assert that we actually get a decl.
 
   if (TypeSpecType != TST_unspecified) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::err_invalid_decl_spec_combination;
     return true;
   }
@@ -671,11 +663,12 @@
 
 bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
                                const char *&PrevSpec,
-                               unsigned &DiagID) {
+                               unsigned &DiagID,
+                               const PrintingPolicy &Policy) {
   assert(!isDeclRep(T) && !isTypeRep(T) && !isExprRep(T) &&
          "rep required for these type-spec kinds!");
   if (TypeSpecType != TST_unspecified) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::err_invalid_decl_spec_combination;
     return true;
   }
@@ -688,7 +681,7 @@
   TypeSpecType = T;
   TypeSpecOwned = false;
   if (TypeAltiVecVector && !TypeAltiVecBool && (TypeSpecType == TST_double)) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::err_invalid_vector_decl_spec;
     return true;
   }
@@ -696,9 +689,10 @@
 }
 
 bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc,
-                          const char *&PrevSpec, unsigned &DiagID) {
+                          const char *&PrevSpec, unsigned &DiagID,
+                          const PrintingPolicy &Policy) {
   if (TypeSpecType != TST_unspecified) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::err_invalid_vector_decl_spec_combination;
     return true;
   }
@@ -708,10 +702,11 @@
 }
 
 bool DeclSpec::SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc,
-                          const char *&PrevSpec, unsigned &DiagID) {
+                          const char *&PrevSpec, unsigned &DiagID,
+                          const PrintingPolicy &Policy) {
   if (!TypeAltiVecVector || TypeAltiVecPixel ||
       (TypeSpecType != TST_unspecified)) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::err_invalid_pixel_decl_spec_combination;
     return true;
   }
@@ -722,10 +717,11 @@
 }
 
 bool DeclSpec::SetTypeAltiVecBool(bool isAltiVecBool, SourceLocation Loc,
-                          const char *&PrevSpec, unsigned &DiagID) {
+                                  const char *&PrevSpec, unsigned &DiagID,
+                                  const PrintingPolicy &Policy) {
   if (!TypeAltiVecVector || TypeAltiVecBool ||
       (TypeSpecType != TST_unspecified)) {
-    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+    PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
     DiagID = diag::err_invalid_vector_bool_decl_spec;
     return true;
   }
@@ -843,7 +839,12 @@
                              unsigned &DiagID) {
   if (Friend_specified) {
     PrevSpec = "friend";
-    DiagID = diag::ext_duplicate_declspec;
+    // Keep the later location, so that we can later diagnose ill-formed
+    // declarations like 'friend class X friend;'. Per [class.friend]p3,
+    // 'friend' must be the first token in a friend declaration that is
+    // not a function declaration.
+    FriendLoc = Loc;
+    DiagID = diag::warn_duplicate_declspec;
     return true;
   }
 
@@ -866,7 +867,13 @@
 
 bool DeclSpec::SetConstexprSpec(SourceLocation Loc, const char *&PrevSpec,
                                 unsigned &DiagID) {
-  // 'constexpr constexpr' is ok.
+  // 'constexpr constexpr' is ok, but warn as this is likely not what the user
+  // intended.
+  if (Constexpr_specified) {
+    DiagID = diag::warn_duplicate_declspec;
+    PrevSpec = "constexpr";
+    return true;
+  }
   Constexpr_specified = true;
   ConstexprLoc = Loc;
   return false;
@@ -906,7 +913,7 @@
 /// "_Imaginary" (lacking an FP type).  This returns a diagnostic to issue or
 /// diag::NUM_DIAGNOSTICS if there is no error.  After calling this method,
 /// DeclSpec is guaranteed self-consistent, even if an error occurred.
-void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP) {
+void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPolicy &Policy) {
   // Before possibly changing their values, save specs as written.
   SaveWrittenBuiltinSpecs();
 
@@ -959,7 +966,7 @@
            (TypeSpecType != TST_int)) || TypeAltiVecPixel) {
         Diag(D, TSTLoc, diag::err_invalid_vector_bool_decl_spec)
           << (TypeAltiVecPixel ? "__pixel" :
-                                 getSpecifierName((TST)TypeSpecType));
+                                 getSpecifierName((TST)TypeSpecType, Policy));
       }
 
       // Only 'short' is valid with vector bool. (PIM 2.1)
@@ -989,7 +996,7 @@
     else if (TypeSpecType != TST_int  && TypeSpecType != TST_int128 &&
              TypeSpecType != TST_char && TypeSpecType != TST_wchar) {
       Diag(D, TSSLoc, diag::err_invalid_sign_spec)
-        << getSpecifierName((TST)TypeSpecType);
+        << getSpecifierName((TST)TypeSpecType, Policy);
       // signed double -> double.
       TypeSpecSign = TSS_unspecified;
     }
@@ -1006,7 +1013,7 @@
       Diag(D, TSWLoc,
            TypeSpecWidth == TSW_short ? diag::err_invalid_short_spec
                                       : diag::err_invalid_longlong_spec)
-        <<  getSpecifierName((TST)TypeSpecType);
+        <<  getSpecifierName((TST)TypeSpecType, Policy);
       TypeSpecType = TST_int;
       TypeSpecOwned = false;
     }
@@ -1016,7 +1023,7 @@
       TypeSpecType = TST_int;  // long -> long int.
     else if (TypeSpecType != TST_int && TypeSpecType != TST_double) {
       Diag(D, TSWLoc, diag::err_invalid_long_spec)
-        << getSpecifierName((TST)TypeSpecType);
+        << getSpecifierName((TST)TypeSpecType, Policy);
       TypeSpecType = TST_int;
       TypeSpecOwned = false;
     }
@@ -1038,7 +1045,7 @@
         Diag(D, TSTLoc, diag::ext_integer_complex);
     } else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
       Diag(D, TSCLoc, diag::err_invalid_complex_spec)
-        << getSpecifierName((TST)TypeSpecType);
+        << getSpecifierName((TST)TypeSpecType, Policy);
       TypeSpecComplex = TSC_unspecified;
     }
   }
diff --git a/lib/Sema/DelayedDiagnostic.cpp b/lib/Sema/DelayedDiagnostic.cpp
index 3100432..533b7ef 100644
--- a/lib/Sema/DelayedDiagnostic.cpp
+++ b/lib/Sema/DelayedDiagnostic.cpp
@@ -19,13 +19,22 @@
 using namespace clang;
 using namespace sema;
 
-DelayedDiagnostic DelayedDiagnostic::makeDeprecation(SourceLocation Loc,
+DelayedDiagnostic
+DelayedDiagnostic::makeAvailability(Sema::AvailabilityDiagnostic AD,
+                                    SourceLocation Loc,
                                     const NamedDecl *D,
                                     const ObjCInterfaceDecl *UnknownObjCClass,
                                     const ObjCPropertyDecl  *ObjCProperty,
                                     StringRef Msg) {
   DelayedDiagnostic DD;
-  DD.Kind = Deprecation;
+  switch (AD) {
+    case Sema::AD_Deprecation:
+      DD.Kind = Deprecation;
+      break;
+    case Sema::AD_Unavailable:
+      DD.Kind = Unavailable;
+      break;
+  }
   DD.Triggered = false;
   DD.Loc = Loc;
   DD.DeprecationData.Decl = D;
diff --git a/lib/Sema/IdentifierResolver.cpp b/lib/Sema/IdentifierResolver.cpp
index 6e354b9..705fb07 100644
--- a/lib/Sema/IdentifierResolver.cpp
+++ b/lib/Sema/IdentifierResolver.cpp
@@ -95,7 +95,7 @@
 /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
 /// true if 'D' belongs to the given declaration context.
 bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S,
-                             bool ExplicitInstantiationOrSpecialization) const {
+                                       bool AllowInlineNamespace) const {
   Ctx = Ctx->getRedeclContext();
 
   if (Ctx->isFunctionOrMethod() || S->isFunctionPrototypeScope()) {
@@ -131,9 +131,8 @@
   }
 
   DeclContext *DCtx = D->getDeclContext()->getRedeclContext();
-  return ExplicitInstantiationOrSpecialization
-           ? Ctx->InEnclosingNamespaceSetOf(DCtx)
-           : Ctx->Equals(DCtx);
+  return AllowInlineNamespace ? Ctx->InEnclosingNamespaceSetOf(DCtx)
+                              : Ctx->Equals(DCtx);
 }
 
 /// AddDecl - Link the decl to its shadowed decl chain.
@@ -274,10 +273,8 @@
 
     // If the existing declaration is somewhere in the previous declaration
     // chain of the new declaration, then prefer the new declaration.
-    for (Decl::redecl_iterator RD = New->redecls_begin(), 
-                            RDEnd = New->redecls_end();
-         RD != RDEnd; ++RD) {
-      if (*RD == Existing)
+    for (auto RD : New->redecls()) {
+      if (RD == Existing)
         return DMK_Replace;
         
       if (RD->isCanonicalDecl())
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index d3de173..1f5d682 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -121,9 +121,11 @@
 
 /// GetDiagForGotoScopeDecl - If this decl induces a new goto scope, return a
 /// diagnostic that should be emitted if control goes over it. If not, return 0.
-static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) {
+static ScopePair GetDiagForGotoScopeDecl(Sema &S, const Decl *D) {
   if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
     unsigned InDiag = 0;
+    unsigned OutDiag = 0;
+
     if (VD->getType()->isVariablyModifiedType())
       InDiag = diag::note_protected_by_vla;
 
@@ -135,21 +137,24 @@
       return ScopePair(diag::note_protected_by_cleanup,
                        diag::note_exits_cleanup);
 
-    if (Context.getLangOpts().ObjCAutoRefCount && VD->hasLocalStorage()) {
-      switch (VD->getType().getObjCLifetime()) {
-      case Qualifiers::OCL_None:
-      case Qualifiers::OCL_ExplicitNone:
-      case Qualifiers::OCL_Autoreleasing:
-        break;
-
-      case Qualifiers::OCL_Strong:
-      case Qualifiers::OCL_Weak:
+    if (VD->hasLocalStorage()) {
+      switch (VD->getType().isDestructedType()) {
+      case QualType::DK_objc_strong_lifetime:
+      case QualType::DK_objc_weak_lifetime:
         return ScopePair(diag::note_protected_by_objc_ownership,
                          diag::note_exits_objc_ownership);
+
+      case QualType::DK_cxx_destructor:
+        OutDiag = diag::note_exits_dtor;
+        break;
+
+      case QualType::DK_none:
+        break;
       }
     }
 
-    if (Context.getLangOpts().CPlusPlus && VD->hasLocalStorage()) {
+    const Expr *Init = VD->getInit();
+    if (S.Context.getLangOpts().CPlusPlus && VD->hasLocalStorage() && Init) {
       // C++11 [stmt.dcl]p3:
       //   A program that jumps from a point where a variable with automatic
       //   storage duration is not in scope to a point where it is in scope
@@ -164,68 +169,34 @@
       //   where it is in scope is ill-formed unless the variable has
       //   POD type and is declared without an initializer.
 
-      const Expr *Init = VD->getInit();
-      if (!Init)
-        return ScopePair(InDiag, 0);
+      InDiag = diag::note_protected_by_variable_init;
 
-      const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init);
-      if (EWC)
-        Init = EWC->getSubExpr();
-
-      const MaterializeTemporaryExpr *M = NULL;
-      Init = Init->findMaterializedTemporary(M);
-
-      SmallVector<const Expr *, 2> CommaLHSs;
-      SmallVector<SubobjectAdjustment, 2> Adjustments;
-      Init = Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
-
-      QualType QT = Init->getType();
-      if (QT.isNull())
-        return ScopePair(diag::note_protected_by_variable_init, 0);
-
-      const Type *T = QT.getTypePtr();
-      if (T->isArrayType())
-        T = T->getBaseElementTypeUnsafe();
-
-      const CXXRecordDecl *Record = T->getAsCXXRecordDecl();
-      if (!Record)
-        return ScopePair(diag::note_protected_by_variable_init, 0);
-
-      // If we need to call a non trivial destructor for this variable,
-      // record an out diagnostic.
-      unsigned OutDiag = 0;
-      if (!Init->isGLValue() && !Record->hasTrivialDestructor())
-        OutDiag = diag::note_exits_dtor;
-
-      if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(Init)) {
-        const CXXConstructorDecl *ctor = cce->getConstructor();
-        // For a variable declared without an initializer, we will have
-        // call-style initialization and the initializer will be the
-        // CXXConstructExpr with no intervening nodes.
-        if (ctor->isTrivial() && ctor->isDefaultConstructor() &&
-            VD->getInit() == Init && VD->getInitStyle() == VarDecl::CallInit) {
+      // For a variable of (array of) class type declared without an
+      // initializer, we will have call-style initialization and the initializer
+      // will be the CXXConstructExpr with no intervening nodes.
+      if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+        const CXXConstructorDecl *Ctor = CCE->getConstructor();
+        if (Ctor->isTrivial() && Ctor->isDefaultConstructor() &&
+            VD->getInitStyle() == VarDecl::CallInit) {
           if (OutDiag)
             InDiag = diag::note_protected_by_variable_nontriv_destructor;
-          else if (!Record->isPOD())
+          else if (!Ctor->getParent()->isPOD())
             InDiag = diag::note_protected_by_variable_non_pod;
-          return ScopePair(InDiag, OutDiag);
+          else
+            InDiag = 0;
         }
       }
-
-      return ScopePair(diag::note_protected_by_variable_init, OutDiag);
     }
 
-    return ScopePair(InDiag, 0);
+    return ScopePair(InDiag, OutDiag);
   }
 
-  if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
+  if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
     if (TD->getUnderlyingType()->isVariablyModifiedType())
-      return ScopePair(diag::note_protected_by_vla_typedef, 0);
-  }
-
-  if (const TypeAliasDecl *TD = dyn_cast<TypeAliasDecl>(D)) {
-    if (TD->getUnderlyingType()->isVariablyModifiedType())
-      return ScopePair(diag::note_protected_by_vla_type_alias, 0);
+      return ScopePair(isa<TypedefDecl>(TD)
+                           ? diag::note_protected_by_vla_typedef
+                           : diag::note_protected_by_vla_type_alias,
+                       0);
   }
 
   return ScopePair(0U, 0U);
@@ -234,7 +205,7 @@
 /// \brief Build scope information for a declaration that is part of a DeclStmt.
 void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
   // If this decl causes a new scope, push and switch to it.
-  std::pair<unsigned,unsigned> Diags = GetDiagForGotoScopeDecl(S.Context, D);
+  std::pair<unsigned,unsigned> Diags = GetDiagForGotoScopeDecl(S, D);
   if (Diags.first || Diags.second) {
     Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second,
                                D->getLocation()));
@@ -396,9 +367,8 @@
     if (DeclStmt *DS = dyn_cast<DeclStmt>(SubStmt)) {
       // The decl statement creates a scope if any of the decls in it are VLAs
       // or have the cleanup attribute.
-      for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
-           I != E; ++I)
-        BuildScopeInformation(*I, ParentScope);
+      for (auto *I : DS->decls())
+        BuildScopeInformation(I, ParentScope);
       continue;
     }
     // Disallow jumps into any part of an @try statement by pushing a scope and
@@ -474,14 +444,32 @@
     if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(SubStmt)) {
       for (unsigned i = 0, e = EWC->getNumObjects(); i != e; ++i) {
         const BlockDecl *BDecl = EWC->getObject(i);
-        for (BlockDecl::capture_const_iterator ci = BDecl->capture_begin(),
-             ce = BDecl->capture_end(); ci != ce; ++ci) {
-          VarDecl *variable = ci->getVariable();
+        for (const auto &CI : BDecl->captures()) {
+          VarDecl *variable = CI.getVariable();
           BuildScopeInformation(variable, BDecl, ParentScope);
         }
       }
     }
-    
+
+    // Disallow jumps out of scopes containing temporaries lifetime-extended to
+    // automatic storage duration.
+    if (MaterializeTemporaryExpr *MTE =
+            dyn_cast<MaterializeTemporaryExpr>(SubStmt)) {
+      if (MTE->getStorageDuration() == SD_Automatic) {
+        SmallVector<const Expr *, 4> CommaLHS;
+        SmallVector<SubobjectAdjustment, 4> Adjustments;
+        const Expr *ExtendedObject =
+            MTE->GetTemporaryExpr()->skipRValueSubobjectAdjustments(
+                CommaLHS, Adjustments);
+        if (ExtendedObject->getType().isDestructedType()) {
+          Scopes.push_back(GotoScope(ParentScope, 0,
+                                     diag::note_exits_temporary_dtor,
+                                     ExtendedObject->getExprLoc()));
+          ParentScope = Scopes.size()-1;
+        }
+      }
+    }
+
     // Recursively walk the AST.
     BuildScopeInformation(SubStmt, ParentScope);
   }
@@ -762,7 +750,7 @@
   SmallVector<unsigned, 10> ToScopesError;
   SmallVector<unsigned, 10> ToScopesWarning;
   for (unsigned I = ToScope; I != CommonScope; I = Scopes[I].ParentScope) {
-    if (S.getLangOpts().MicrosoftMode && JumpDiagWarning != 0 &&
+    if (S.getLangOpts().MSVCCompat && JumpDiagWarning != 0 &&
         IsMicrosoftJumpWarning(JumpDiagError, Scopes[I].InDiag))
       ToScopesWarning.push_back(I);
     else if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
diff --git a/lib/Sema/Scope.cpp b/lib/Sema/Scope.cpp
index 10f12ce..494768d 100644
--- a/lib/Sema/Scope.cpp
+++ b/lib/Sema/Scope.cpp
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Sema/Scope.h"
+#include "llvm/Support/raw_ostream.h"
 
 using namespace clang;
 
@@ -36,16 +37,24 @@
     FnParent       = parent->FnParent;
     BlockParent    = parent->BlockParent;
     TemplateParamParent = parent->TemplateParamParent;
+    MSLocalManglingParent = parent->MSLocalManglingParent;
   } else {
     Depth = 0;
     PrototypeDepth = 0;
     PrototypeIndex = 0;
-    FnParent = BlockParent = 0;
+    MSLocalManglingParent = FnParent = BlockParent = 0;
     TemplateParamParent = 0;
+    MSLocalManglingNumber = 1;
   }
 
   // If this scope is a function or contains breaks/continues, remember it.
   if (flags & FnScope)            FnParent = this;
+  // The MS mangler uses the number of scopes that can hold declarations as
+  // part of an external name.
+  if (Flags & (ClassScope | FnScope)) {
+    MSLocalManglingNumber = getMSLocalManglingNumber();
+    MSLocalManglingParent = this;
+  }
   if (flags & BreakScope)         BreakParent = this;
   if (flags & ContinueScope)      ContinueParent = this;
   if (flags & BlockScope)         BlockParent = this;
@@ -53,6 +62,16 @@
 
   // If this is a prototype scope, record that.
   if (flags & FunctionPrototypeScope) PrototypeDepth++;
+  if (flags & DeclScope) {
+    if (flags & FunctionPrototypeScope)
+      ; // Prototype scopes are uninteresting.
+    else if ((flags & ClassScope) && getParent()->isClassScope())
+      ; // Nested class scopes aren't ambiguous.
+    else if ((flags & ClassScope) && getParent()->getFlags() == DeclScope)
+      ; // Classes inside of namespaces aren't ambiguous.
+    else
+      incrementMSLocalManglingNumber();
+  }
 
   DeclsInScope.clear();
   UsingDirectives.clear();
@@ -69,3 +88,92 @@
   }
   return false;
 }
+
+void Scope::AddFlags(unsigned FlagsToSet) {
+  assert((FlagsToSet & ~(BreakScope | ContinueScope)) == 0 &&
+         "Unsupported scope flags");
+  if (FlagsToSet & BreakScope) {
+    assert((Flags & BreakScope) == 0 && "Already set");
+    BreakParent = this;
+  }
+  if (FlagsToSet & ContinueScope) {
+    assert((Flags & ContinueScope) == 0 && "Already set");
+    ContinueParent = this;
+  }
+  Flags |= FlagsToSet;
+}
+
+void Scope::dump() const { dumpImpl(llvm::errs()); }
+
+void Scope::dumpImpl(raw_ostream &OS) const {
+  unsigned Flags = getFlags();
+  bool HasFlags = Flags != 0;
+
+  if (HasFlags)
+    OS << "Flags: ";
+
+  while (Flags) {
+    if (Flags & FnScope) {
+      OS << "FnScope";
+      Flags &= ~FnScope;
+    } else if (Flags & BreakScope) {
+      OS << "BreakScope";
+      Flags &= ~BreakScope;
+    } else if (Flags & ContinueScope) {
+      OS << "ContinueScope";
+      Flags &= ~ContinueScope;
+    } else if (Flags & DeclScope) {
+      OS << "DeclScope";
+      Flags &= ~DeclScope;
+    } else if (Flags & ControlScope) {
+      OS << "ControlScope";
+      Flags &= ~ControlScope;
+    } else if (Flags & ClassScope) {
+      OS << "ClassScope";
+      Flags &= ~ClassScope;
+    } else if (Flags & BlockScope) {
+      OS << "BlockScope";
+      Flags &= ~BlockScope;
+    } else if (Flags & TemplateParamScope) {
+      OS << "TemplateParamScope";
+      Flags &= ~TemplateParamScope;
+    } else if (Flags & FunctionPrototypeScope) {
+      OS << "FunctionPrototypeScope";
+      Flags &= ~FunctionPrototypeScope;
+    } else if (Flags & FunctionDeclarationScope) {
+      OS << "FunctionDeclarationScope";
+      Flags &= ~FunctionDeclarationScope;
+    } else if (Flags & AtCatchScope) {
+      OS << "AtCatchScope";
+      Flags &= ~AtCatchScope;
+    } else if (Flags & ObjCMethodScope) {
+      OS << "ObjCMethodScope";
+      Flags &= ~ObjCMethodScope;
+    } else if (Flags & SwitchScope) {
+      OS << "SwitchScope";
+      Flags &= ~SwitchScope;
+    } else if (Flags & TryScope) {
+      OS << "TryScope";
+      Flags &= ~TryScope;
+    } else if (Flags & FnTryCatchScope) {
+      OS << "FnTryCatchScope";
+      Flags &= ~FnTryCatchScope;
+    } else if (Flags & OpenMPDirectiveScope) {
+      OS << "OpenMPDirectiveScope";
+      Flags &= ~OpenMPDirectiveScope;
+    }
+
+    if (Flags)
+      OS << " | ";
+  }
+  if (HasFlags)
+    OS << '\n';
+
+  if (const Scope *Parent = getParent())
+    OS << "Parent: (clang::Scope*)" << Parent << '\n';
+
+  OS << "Depth: " << Depth << '\n';
+  OS << "MSLocalManglingNumber: " << getMSLocalManglingNumber() << '\n';
+  if (const DeclContext *DC = getEntity())
+    OS << "Entity : (clang::DeclContext*)" << DC << '\n';
+}
diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp
index 8b3493e..1e54c5a 100644
--- a/lib/Sema/ScopeInfo.cpp
+++ b/lib/Sema/ScopeInfo.cpp
@@ -26,6 +26,12 @@
   HasBranchProtectedScope = false;
   HasBranchIntoScope = false;
   HasIndirectGoto = false;
+  HasDroppedStmt = false;
+  ObjCShouldCallSuper = false;
+  ObjCIsDesignatedInit = false;
+  ObjCWarnForNoDesignatedInitChain = false;
+  ObjCIsSecondaryInit = false;
+  ObjCWarnForNoInitDelegation = false;
 
   SwitchStack.clear();
   Returns.clear();
@@ -184,10 +190,11 @@
   ThisUse->markSafe();
 }
 
-void LambdaScopeInfo::getPotentialVariableCapture(unsigned Idx, VarDecl *&VD, Expr *&E) {
+void LambdaScopeInfo::getPotentialVariableCapture(unsigned Idx, VarDecl *&VD,
+                                                  Expr *&E) const {
   assert(Idx < getNumPotentialVariableCaptures() &&
-    "Index of potential capture must be within 0 to less than the "
-    "number of captures!");
+         "Index of potential capture must be within 0 to less than the "
+         "number of captures!");
   E = PotentiallyCapturingExprs[Idx];
   if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
     VD = dyn_cast<VarDecl>(DRE->getFoundDecl());
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 4d01fb0..3612bb9 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -13,7 +13,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Sema/SemaInternal.h"
-#include "TargetAttributesSema.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/ASTDiagnostic.h"
 #include "clang/AST/DeclCXX.h"
@@ -70,13 +69,17 @@
 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
            TranslationUnitKind TUKind,
            CodeCompleteConsumer *CodeCompleter)
-  : TheTargetAttributesSema(0), ExternalSource(0),
+  : ExternalSource(0),
     isMultiplexExternalSource(false), FPFeatures(pp.getLangOpts()),
     LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
     Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
     CollectStats(false), CodeCompleter(CodeCompleter),
     CurContext(0), OriginalLexicalContext(0),
-    PackContext(0), MSStructPragmaOn(false), VisContext(0),
+    PackContext(0), MSStructPragmaOn(false),
+    MSPointerToMemberRepresentationMethod(
+        LangOpts.getMSPointerToMemberRepresentationMethod()),
+    VtorDispModeStack(1, MSVtorDispAttr::Mode(LangOpts.VtorDispMode)),
+    VisContext(0),
     IsBuildingRecoveryCallExpr(false),
     ExprNeedsCleanups(false), LateTemplateParser(0), OpaqueParser(0),
     IdResolver(pp), StdInitializerList(0), CXXTypeInfoDecl(0), MSVCGuidDecl(0),
@@ -86,7 +89,7 @@
     NSDictionaryDecl(0), DictionaryWithObjectsMethod(0),
     GlobalNewDeleteDeclared(false),
     TUKind(TUKind),
-    NumSFINAEErrors(0), InFunctionDeclarator(0),
+    NumSFINAEErrors(0),
     AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
     NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
     CurrentInstantiationScope(0), DisableTypoCorrection(false),
@@ -120,6 +123,12 @@
   InitDataSharingAttributesStack();
 }
 
+void Sema::addImplicitTypedef(StringRef Name, QualType T) {
+  DeclarationName DN = &Context.Idents.get(Name);
+  if (IdResolver.begin(DN) == IdResolver.end())
+    PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
+}
+
 void Sema::Initialize() {
   // Tell the AST consumer about this Sema object.
   Consumer.Initialize(Context);
@@ -172,20 +181,36 @@
       PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
   }
 
+  // Initialize Microsoft "predefined C++ types".
+  if (PP.getLangOpts().MSVCCompat && PP.getLangOpts().CPlusPlus) {
+    if (IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
+      PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class),
+                        TUScope);
+
+    addImplicitTypedef("size_t", Context.getSizeType());
+  }
+
+  // Initialize predefined OpenCL types.
+  if (PP.getLangOpts().OpenCL) {
+    addImplicitTypedef("image1d_t", Context.OCLImage1dTy);
+    addImplicitTypedef("image1d_array_t", Context.OCLImage1dArrayTy);
+    addImplicitTypedef("image1d_buffer_t", Context.OCLImage1dBufferTy);
+    addImplicitTypedef("image2d_t", Context.OCLImage2dTy);
+    addImplicitTypedef("image2d_array_t", Context.OCLImage2dArrayTy);
+    addImplicitTypedef("image3d_t", Context.OCLImage3dTy);
+    addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
+    addImplicitTypedef("event_t", Context.OCLEventTy);
+  }
+
   DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
   if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
     PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
 }
 
 Sema::~Sema() {
-  for (LateParsedTemplateMapT::iterator I = LateParsedTemplateMap.begin(),
-                                        E = LateParsedTemplateMap.end();
-       I != E; ++I)
-    delete I->second;
+  llvm::DeleteContainerSeconds(LateParsedTemplateMap);
   if (PackContext) FreePackedContext();
   if (VisContext) FreeVisContext();
-  delete TheTargetAttributesSema;
-  MSStructPragmaOn = false;
   // Kill all the active scopes.
   for (unsigned I = 1, E = FunctionScopes.size(); I != E; ++I)
     delete FunctionScopes[I];
@@ -230,7 +255,7 @@
   // If the function is already unavailable, it's not an error.
   if (fn->hasAttr<UnavailableAttr>()) return true;
 
-  fn->addAttr(new (Context) UnavailableAttr(loc, Context, msg));
+  fn->addAttr(UnavailableAttr::CreateImplicit(Context, msg, loc));
   return true;
 }
 
@@ -384,25 +409,6 @@
   return false;
 }
 
-namespace {
-  struct SortUndefinedButUsed {
-    const SourceManager &SM;
-    explicit SortUndefinedButUsed(SourceManager &SM) : SM(SM) {}
-
-    bool operator()(const std::pair<NamedDecl *, SourceLocation> &l,
-                    const std::pair<NamedDecl *, SourceLocation> &r) const {
-      if (l.second.isValid() && !r.second.isValid())
-        return true;
-      if (!l.second.isValid() && r.second.isValid())
-        return false;
-      if (l.second != r.second)
-        return SM.isBeforeInTranslationUnit(l.second, r.second);
-      return SM.isBeforeInTranslationUnit(l.first->getLocation(),
-                                          r.first->getLocation());
-    }
-  };
-}
-
 /// Obtains a sorted list of functions that are undefined but ODR-used.
 void Sema::getUndefinedButUsed(
     SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
@@ -435,8 +441,19 @@
 
   // Sort (in order of use site) so that we're not dependent on the iteration
   // order through an llvm::DenseMap.
+  SourceManager &SM = Context.getSourceManager();
   std::sort(Undefined.begin(), Undefined.end(),
-            SortUndefinedButUsed(Context.getSourceManager()));
+            [&SM](const std::pair<NamedDecl *, SourceLocation> &l,
+                  const std::pair<NamedDecl *, SourceLocation> &r) {
+    if (l.second.isValid() && !r.second.isValid())
+      return true;
+    if (!l.second.isValid() && r.second.isValid())
+      return false;
+    if (l.second != r.second)
+      return SM.isBeforeInTranslationUnit(l.second, r.second);
+    return SM.isBeforeInTranslationUnit(l.first->getLocation(),
+                                        r.first->getLocation());
+  });
 }
 
 /// checkUndefinedButUsed - Check for undefined objects with internal linkage
@@ -603,7 +620,15 @@
     // so it will find some names that are not required to be found. This is
     // valid, but we could do better by diagnosing if an instantiation uses a
     // name that was not visible at its first point of instantiation.
+    if (ExternalSource) {
+      // Load pending instantiations from the external source.
+      SmallVector<PendingImplicitInstantiation, 4> Pending;
+      ExternalSource->ReadPendingInstantiations(Pending);
+      PendingInstantiations.insert(PendingInstantiations.begin(),
+                                   Pending.begin(), Pending.end());
+    }
     PerformPendingInstantiations();
+
     CheckDelayedMemberExceptionSpecs();
   }
 
@@ -1215,7 +1240,7 @@
             ZeroArgCallReturnTy = QualType();
             Ambiguous = true;
           } else
-            ZeroArgCallReturnTy = OverloadDecl->getResultType();
+            ZeroArgCallReturnTy = OverloadDecl->getReturnType();
         }
       }
     }
@@ -1244,7 +1269,7 @@
   if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
     if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
       if (Fun->getMinRequiredArguments() == 0)
-        ZeroArgCallReturnTy = Fun->getResultType();
+        ZeroArgCallReturnTy = Fun->getReturnType();
       return true;
     }
   }
@@ -1261,8 +1286,8 @@
 
   if (const FunctionProtoType *FPT =
       dyn_cast_or_null<FunctionProtoType>(FunTy)) {
-    if (FPT->getNumArgs() == 0)
-      ZeroArgCallReturnTy = FunTy->getResultType();
+    if (FPT->getNumParams() == 0)
+      ZeroArgCallReturnTy = FunTy->getReturnType();
     return true;
   }
   return false;
@@ -1313,7 +1338,7 @@
   for (OverloadExpr::decls_iterator It = Overloads.begin(),
          DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
     const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
-    QualType OverloadResultTy = OverloadDecl->getResultType();
+    QualType OverloadResultTy = OverloadDecl->getReturnType();
     if (IsPlausibleResult(OverloadResultTy))
       PlausibleOverloads.addDecl(It.getDecl());
   }
diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp
index 61dc157..f7806d2 100644
--- a/lib/Sema/SemaAccess.cpp
+++ b/lib/Sema/SemaAccess.cpp
@@ -288,12 +288,10 @@
     if (Derived->isDependentContext() && !Derived->hasDefinition())
       return AR_dependent;
     
-    for (CXXRecordDecl::base_class_const_iterator
-           I = Derived->bases_begin(), E = Derived->bases_end(); I != E; ++I) {
-
+    for (const auto &I : Derived->bases()) {
       const CXXRecordDecl *RD;
 
-      QualType T = I->getType();
+      QualType T = I.getType();
       if (const RecordType *RT = T->getAs<RecordType>()) {
         RD = cast<CXXRecordDecl>(RT->getDecl());
       } else if (const InjectedClassNameType *IT
@@ -376,18 +374,16 @@
   if (FriendTy.getQualifiers() != ContextTy.getQualifiers())
     return false;
 
-  if (FriendTy->getNumArgs() != ContextTy->getNumArgs())
+  if (FriendTy->getNumParams() != ContextTy->getNumParams())
     return false;
 
-  if (!MightInstantiateTo(S,
-                          ContextTy->getResultType(),
-                          FriendTy->getResultType()))
+  if (!MightInstantiateTo(S, ContextTy->getReturnType(),
+                          FriendTy->getReturnType()))
     return false;
 
-  for (unsigned I = 0, E = FriendTy->getNumArgs(); I != E; ++I)
-    if (!MightInstantiateTo(S,
-                            ContextTy->getArgType(I),
-                            FriendTy->getArgType(I)))
+  for (unsigned I = 0, E = FriendTy->getNumParams(); I != E; ++I)
+    if (!MightInstantiateTo(S, ContextTy->getParamType(I),
+                            FriendTy->getParamType(I)))
       return false;
 
   return true;
@@ -575,10 +571,7 @@
   AccessResult OnFailure = AR_inaccessible;
 
   // Okay, check friends.
-  for (CXXRecordDecl::friend_iterator I = Class->friend_begin(),
-         E = Class->friend_end(); I != E; ++I) {
-    FriendDecl *Friend = *I;
-
+  for (auto *Friend : Class->friends()) {
     switch (MatchesFriend(S, EC, Friend)) {
     case AR_accessible:
       return AR_accessible;
@@ -648,18 +641,16 @@
       EverDependent = true;
 
     // Recurse into the base classes.
-    for (CXXRecordDecl::base_class_const_iterator
-           I = Cur->bases_begin(), E = Cur->bases_end(); I != E; ++I) {
-
+    for (const auto &I : Cur->bases()) {
       // If this is private inheritance, then a public member of the
       // base will not have any access in classes derived from Cur.
       unsigned BasePrivateDepth = PrivateDepth;
-      if (I->getAccessSpecifier() == AS_private)
+      if (I.getAccessSpecifier() == AS_private)
         BasePrivateDepth = CurPath.size() - 1;
 
       const CXXRecordDecl *RD;
 
-      QualType T = I->getType();
+      QualType T = I.getType();
       if (const RecordType *RT = T->getAs<RecordType>()) {
         RD = cast<CXXRecordDecl>(RT->getDecl());
       } else if (const InjectedClassNameType *IT
@@ -797,7 +788,7 @@
         // Emulate a MSVC bug where the creation of pointer-to-member
         // to protected member of base class is allowed but only from
         // static member functions.
-        if (S.getLangOpts().MicrosoftMode && !EC.Functions.empty())
+        if (S.getLangOpts().MSVCCompat && !EC.Functions.empty())
           if (CXXMethodDecl* MD = dyn_cast<CXXMethodDecl>(EC.Functions.front()))
             if (MD->isStatic()) return AR_accessible;
 
@@ -1082,15 +1073,15 @@
         (isa<FunctionTemplateDecl>(D) &&
          isa<CXXConstructorDecl>(
                 cast<FunctionTemplateDecl>(D)->getTemplatedDecl()))) {
-      S.Diag(D->getLocation(), diag::note_access_protected_restricted_ctordtor)
-        << isa<CXXDestructorDecl>(D);
-      return true;
+      return S.Diag(D->getLocation(),
+                    diag::note_access_protected_restricted_ctordtor)
+             << isa<CXXDestructorDecl>(D->getAsFunction());
     }
 
     // Otherwise, use the generic diagnostic.
-    S.Diag(D->getLocation(), diag::note_access_protected_restricted_object)
-      << S.Context.getTypeDeclType(ECRecord);
-    return true;
+    return S.Diag(D->getLocation(),
+                  diag::note_access_protected_restricted_object)
+           << S.Context.getTypeDeclType(ECRecord);
   }
 
   return false;
@@ -1140,11 +1131,9 @@
   // Check whether there's an AccessSpecDecl preceding this in the
   // chain of the DeclContext.
   bool isImplicit = true;
-  for (CXXRecordDecl::decl_iterator
-         I = DeclaringClass->decls_begin(), E = DeclaringClass->decls_end();
-       I != E; ++I) {
-    if (*I == ImmediateChild) break;
-    if (isa<AccessSpecDecl>(*I)) {
+  for (const auto *I : DeclaringClass->decls()) {
+    if (I == ImmediateChild) break;
+    if (isa<AccessSpecDecl>(I)) {
       isImplicit = false;
       break;
     }
@@ -1422,16 +1411,15 @@
                                          AccessTarget &Entity) {
   assert(Entity.getAccess() != AS_public && "called for public access!");
 
-  if (S.getLangOpts().MicrosoftMode &&
-      IsMicrosoftUsingDeclarationAccessBug(S, Loc, Entity))
-    return AR_accessible;
-
   switch (IsAccessible(S, EC, Entity)) {
   case AR_dependent:
     DelayDependentAccess(S, EC, Loc, Entity);
     return AR_dependent;
 
   case AR_inaccessible:
+    if (S.getLangOpts().MSVCCompat &&
+        IsMicrosoftUsingDeclarationAccessBug(S, Loc, Entity))
+      return AR_accessible;
     if (!Entity.isQuiet())
       DiagnoseBadAccess(S, Loc, EC, Entity);
     return AR_inaccessible;
@@ -1482,11 +1470,10 @@
   // However, this does not apply to local extern declarations.
 
   DeclContext *DC = D->getDeclContext();
-  if (FunctionDecl *FN = dyn_cast<FunctionDecl>(D)) {
-    if (D->getLexicalDeclContext()->isFunctionOrMethod())
-      DC = D->getLexicalDeclContext();
-    else
-      DC = FN;
+  if (D->isLocalExternDecl()) {
+    DC = D->getLexicalDeclContext();
+  } else if (FunctionDecl *FN = dyn_cast<FunctionDecl>(D)) {
+    DC = FN;
   } else if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) {
     DC = cast<DeclContext>(TD->getTemplatedDecl());
   }
@@ -1750,10 +1737,7 @@
 
 /// Checks access to the target of a friend declaration.
 Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) {
-  assert(isa<CXXMethodDecl>(target) ||
-         (isa<FunctionTemplateDecl>(target) &&
-          isa<CXXMethodDecl>(cast<FunctionTemplateDecl>(target)
-                               ->getTemplatedDecl())));
+  assert(isa<CXXMethodDecl>(target->getAsFunction()));
 
   // Friendship lookup is a redeclaration lookup, so there's never an
   // inheritance path modifying access.
@@ -1762,10 +1746,7 @@
   if (!getLangOpts().AccessControl || access == AS_public)
     return AR_accessible;
 
-  CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(target);
-  if (!method)
-    method = cast<CXXMethodDecl>(
-                     cast<FunctionTemplateDecl>(target)->getTemplatedDecl());
+  CXXMethodDecl *method = cast<CXXMethodDecl>(target->getAsFunction());
   assert(method->getQualifier());
 
   AccessTarget entity(Context, AccessTarget::Member,
diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp
index 8f9ab32..2e344ff 100644
--- a/lib/Sema/SemaAttr.cpp
+++ b/lib/Sema/SemaAttr.cpp
@@ -122,18 +122,23 @@
   // Otherwise, check to see if we need a max field alignment attribute.
   if (unsigned Alignment = Stack->getAlignment()) {
     if (Alignment == PackStackEntry::kMac68kAlignmentSentinel)
-      RD->addAttr(::new (Context) AlignMac68kAttr(SourceLocation(), Context));
+      RD->addAttr(AlignMac68kAttr::CreateImplicit(Context));
     else
-      RD->addAttr(::new (Context) MaxFieldAlignmentAttr(SourceLocation(),
-                                                        Context,
+      RD->addAttr(MaxFieldAlignmentAttr::CreateImplicit(Context,
                                                         Alignment * 8));
   }
 }
 
 void Sema::AddMsStructLayoutForRecord(RecordDecl *RD) {
-  if (!MSStructPragmaOn)
-    return;
-  RD->addAttr(::new (Context) MsStructAttr(SourceLocation(), Context));
+  if (MSStructPragmaOn)
+    RD->addAttr(MsStructAttr::CreateImplicit(Context));
+
+  // FIXME: We should merge AddAlignmentAttributesForRecord with
+  // AddMsStructLayoutForRecord into AddPragmaAttributesForRecord, which takes
+  // all active pragmas and applies them as attributes to class definitions.
+  if (VtorDispModeStack.back() != getLangOpts().VtorDispMode)
+    RD->addAttr(
+        MSVtorDispAttr::CreateImplicit(Context, VtorDispModeStack.back()));
 }
 
 void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
@@ -247,8 +252,8 @@
       // If a name was specified then failure indicates the name
       // wasn't found. Otherwise failure indicates the stack was
       // empty.
-      Diag(PragmaLoc, diag::warn_pragma_pack_pop_failed)
-        << (Name ? "no record matching name" : "stack empty");
+      Diag(PragmaLoc, diag::warn_pragma_pop_failed)
+          << "pack" << (Name ? "no record matching name" : "stack empty");
 
       // FIXME: Warn about popping named records as MSVC does.
     } else {
@@ -288,6 +293,38 @@
   Consumer.HandleDetectMismatch(Name, Value);
 }
 
+void Sema::ActOnPragmaMSPointersToMembers(
+    LangOptions::PragmaMSPointersToMembersKind RepresentationMethod,
+    SourceLocation PragmaLoc) {
+  MSPointerToMemberRepresentationMethod = RepresentationMethod;
+  ImplicitMSInheritanceAttrLoc = PragmaLoc;
+}
+
+void Sema::ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind,
+                                 SourceLocation PragmaLoc,
+                                 MSVtorDispAttr::Mode Mode) {
+  switch (Kind) {
+  case PVDK_Set:
+    VtorDispModeStack.back() = Mode;
+    break;
+  case PVDK_Push:
+    VtorDispModeStack.push_back(Mode);
+    break;
+  case PVDK_Reset:
+    VtorDispModeStack.clear();
+    VtorDispModeStack.push_back(MSVtorDispAttr::Mode(LangOpts.VtorDispMode));
+    break;
+  case PVDK_Pop:
+    VtorDispModeStack.pop_back();
+    if (VtorDispModeStack.empty()) {
+      Diag(PragmaLoc, diag::warn_pragma_pop_failed) << "vtordisp"
+                                                    << "stack empty";
+      VtorDispModeStack.push_back(MSVtorDispAttr::Mode(LangOpts.VtorDispMode));
+    }
+    break;
+  }
+}
+
 void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
                              SourceLocation PragmaLoc) {
 
@@ -312,7 +349,7 @@
   if (VD->isUsed())
     Diag(PragmaLoc, diag::warn_used_but_marked_unused) << Name;
 
-  VD->addAttr(::new (Context) UnusedAttr(IdTok.getLocation(), Context));
+  VD->addAttr(UnusedAttr::CreateImplicit(Context, IdTok.getLocation()));
 }
 
 void Sema::AddCFAuditedAttribute(Decl *D) {
@@ -324,11 +361,11 @@
       D->hasAttr<CFUnknownTransferAttr>())
     return;
 
-  D->addAttr(::new (Context) CFAuditedTransferAttr(Loc, Context));
+  D->addAttr(CFAuditedTransferAttr::CreateImplicit(Context, Loc));
 }
 
 typedef std::vector<std::pair<unsigned, SourceLocation> > VisStack;
-enum { NoVisibility = (unsigned) -1 };
+enum : unsigned { NoVisibility = ~0U };
 
 void Sema::AddPushedVisibilityAttribute(Decl *D) {
   if (!VisContext)
@@ -346,7 +383,7 @@
     = (VisibilityAttr::VisibilityType) rawType;
   SourceLocation loc = Stack->back().second;
 
-  D->addAttr(::new (Context) VisibilityAttr(loc, Context, type));
+  D->addAttr(VisibilityAttr::CreateImplicit(Context, type, loc));
 }
 
 /// FreeVisContext - Deallocate and null out VisContext.
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index 554a114..ce47cff 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -363,7 +363,7 @@
   explicit NestedNameSpecifierValidatorCCC(Sema &SRef)
       : SRef(SRef) {}
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     return SRef.isAcceptableNestedNameSpecifier(candidate.getCorrectionDecl());
   }
 
@@ -484,7 +484,7 @@
   
   // FIXME: Deal with ambiguities cleanly.
 
-  if (Found.empty() && !ErrorRecoveryLookup && !getLangOpts().MicrosoftMode) {
+  if (Found.empty() && !ErrorRecoveryLookup && !getLangOpts().MSVCCompat) {
     // We haven't found anything, and we're not recovering from a
     // different kind of error, so look for typos.
     DeclarationName Name = Found.getLookupName();
@@ -646,7 +646,7 @@
   // public:
   //   void foo() { D::foo2(); }
   // };
-  if (getLangOpts().MicrosoftMode) {
+  if (getLangOpts().MSVCCompat) {
     DeclContext *DC = LookupCtx ? LookupCtx : CurContext;
     if (DC->isDependentContext() && DC->isFunctionOrMethod()) {
       SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
@@ -654,20 +654,23 @@
     }
   }
 
-  unsigned DiagID;
-  if (!Found.empty())
-    DiagID = diag::err_expected_class_or_namespace;
-  else if (SS.isSet()) {
-    Diag(IdentifierLoc, diag::err_no_member) 
-      << &Identifier << LookupCtx << SS.getRange();
-    return true;
-  } else
-    DiagID = diag::err_undeclared_var_use;
-
-  if (SS.isSet())
-    Diag(IdentifierLoc, DiagID) << &Identifier << SS.getRange();
+  if (!Found.empty()) {
+    if (TypeDecl *TD = Found.getAsSingle<TypeDecl>())
+      Diag(IdentifierLoc, diag::err_expected_class_or_namespace)
+          << QualType(TD->getTypeForDecl(), 0) << getLangOpts().CPlusPlus;
+    else {
+      Diag(IdentifierLoc, diag::err_expected_class_or_namespace)
+          << &Identifier << getLangOpts().CPlusPlus;
+      if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
+        Diag(ND->getLocation(),
+             diag::note_expected_class_or_namespace_declared_here)
+          << &Identifier;
+    }
+  } else if (SS.isSet())
+    Diag(IdentifierLoc, diag::err_no_member) << &Identifier << LookupCtx
+                                             << SS.getRange();
   else
-    Diag(IdentifierLoc, DiagID) << &Identifier;
+    Diag(IdentifierLoc, diag::err_undeclared_var_use) << &Identifier;
 
   return true;
 }
@@ -698,7 +701,7 @@
 
   QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
   if (!T->isDependentType() && !T->getAs<TagType>()) {
-    Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class) 
+    Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class_or_namespace) 
       << T << getLangOpts().CPlusPlus;
     return true;
   }
@@ -749,7 +752,8 @@
   TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
   translateTemplateArguments(TemplateArgsIn, TemplateArgs);
 
-  if (DependentTemplateName *DTN = Template.get().getAsDependentTemplateName()){
+  DependentTemplateName *DTN = Template.get().getAsDependentTemplateName();
+  if (DTN && DTN->isIdentifier()) {
     // Handle a dependent template specialization for which we cannot resolve
     // the template name.
     assert(DTN->getQualifier() == SS.getScopeRep());
@@ -775,20 +779,20 @@
               CCLoc);
     return false;
   }
-  
-  
-  if (Template.get().getAsOverloadedTemplate() ||
-      isa<FunctionTemplateDecl>(Template.get().getAsTemplateDecl())) {
+
+  TemplateDecl *TD = Template.get().getAsTemplateDecl();
+  if (Template.get().getAsOverloadedTemplate() || DTN ||
+      isa<FunctionTemplateDecl>(TD) || isa<VarTemplateDecl>(TD)) {
     SourceRange R(TemplateNameLoc, RAngleLoc);
     if (SS.getRange().isValid())
       R.setBegin(SS.getRange().getBegin());
-      
+
     Diag(CCLoc, diag::err_non_type_template_in_nested_name_specifier)
-      << Template.get() << R;
+      << (TD && isa<VarTemplateDecl>(TD)) << Template.get() << R;
     NoteAllFoundTemplates(Template.get());
     return true;
   }
-                                
+
   // We were able to resolve the template name to an actual template. 
   // Build an appropriate nested-name-specifier.
   QualType T = CheckTemplateIdType(Template.get(), TemplateNameLoc, 
diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp
index ba00b71..5d49225 100644
--- a/lib/Sema/SemaCast.cpp
+++ b/lib/Sema/SemaCast.cpp
@@ -21,6 +21,7 @@
 #include "clang/AST/ExprObjC.h"
 #include "clang/AST/RecordLayout.h"
 #include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
 #include "clang/Sema/Initialization.h"
 #include "llvm/ADT/SmallVector.h"
 #include <set>
@@ -1346,7 +1347,8 @@
   QualType DestClass(DestMemPtr->getClass(), 0);
   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
                   /*DetectVirtual=*/true);
-  if (!Self.IsDerivedFrom(SrcClass, DestClass, Paths)) {
+  if (Self.RequireCompleteType(OpRange.getBegin(), SrcClass, 0) ||
+      !Self.IsDerivedFrom(SrcClass, DestClass, Paths)) {
     return TC_NotApplicable;
   }
 
@@ -1431,6 +1433,10 @@
       msg = 0;
       return TC_Failed;
     }
+  } else if (DestType->isMemberPointerType()) {
+    if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+      Self.RequireCompleteType(OpRange.getBegin(), DestType, 0);
+    }
   }
 
   InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
@@ -1778,6 +1784,13 @@
       return TC_Failed;
     }
 
+    if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+      // We need to determine the inheritance model that the class will use if
+      // haven't yet.
+      Self.RequireCompleteType(OpRange.getBegin(), SrcType, 0);
+      Self.RequireCompleteType(OpRange.getBegin(), DestType, 0);
+    }
+
     // Don't allow casting between member pointers of different sizes.
     if (Self.Context.getTypeSize(DestMemPtr) !=
         Self.Context.getTypeSize(SrcMemPtr)) {
@@ -2074,6 +2087,8 @@
 
   if (Self.getLangOpts().ObjCAutoRefCount && tcr == TC_Success)
     checkObjCARCConversion(CCK);
+  else if (Self.getLangOpts().ObjC1 && tcr == TC_Success)
+    Self.CheckTollFreeBridgeCast(DestType, SrcExpr.get());
 
   if (tcr != TC_Success && msg != 0) {
     if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
@@ -2168,6 +2183,21 @@
 
   assert(!SrcType->isPlaceholderType());
 
+  // OpenCL v1 s6.5: Casting a pointer to address space A to a pointer to
+  // address space B is illegal.
+  if (Self.getLangOpts().OpenCL && DestType->isPointerType() &&
+      SrcType->isPointerType()) {
+    if (DestType->getPointeeType().getAddressSpace() !=
+        SrcType->getPointeeType().getAddressSpace()) {
+      Self.Diag(OpRange.getBegin(),
+                diag::err_typecheck_incompatible_address_space)
+          << SrcType << DestType << Sema::AA_Casting
+          << SrcExpr.get()->getSourceRange();
+      SrcExpr = ExprError();
+      return;
+    }
+  }
+
   if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
                                diag::err_typecheck_cast_to_incomplete)) {
     SrcExpr = ExprError();
@@ -2319,6 +2349,9 @@
       return;
     }
   }
+  else if (Self.getLangOpts().ObjC1)
+    Self.CheckTollFreeBridgeCast(DestType, SrcExpr.get());
+  
   DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
   DiagnoseBadFunctionCast(Self, SrcExpr, DestType);
   Kind = Self.PrepareScalarCast(SrcExpr, DestType);
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 0b95c48..27cc8a3 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -32,9 +32,9 @@
 #include "clang/Sema/Lookup.h"
 #include "clang/Sema/ScopeInfo.h"
 #include "clang/Sema/Sema.h"
+#include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallBitVector.h"
 #include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/ConvertUTF.h"
 #include "llvm/Support/raw_ostream.h"
 #include <limits>
@@ -142,6 +142,7 @@
     break;
   case Builtin::BI__builtin_stdarg_start:
   case Builtin::BI__builtin_va_start:
+  case Builtin::BI__va_start:
     if (SemaBuiltinVAStart(TheCall))
       return ExprError();
     break;
@@ -302,11 +303,18 @@
   if (BuiltinID >= Builtin::FirstTSBuiltin) {
     switch (Context.getTargetInfo().getTriple().getArch()) {
       case llvm::Triple::arm:
+      case llvm::Triple::armeb:
       case llvm::Triple::thumb:
+      case llvm::Triple::thumbeb:
         if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
           return ExprError();
         break;
+      case llvm::Triple::arm64:
+        if (CheckARM64BuiltinFunctionCall(BuiltinID, TheCall))
+          return ExprError();
+        break;
       case llvm::Triple::aarch64:
+      case llvm::Triple::aarch64_be:
         if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
           return ExprError();
         break;
@@ -317,6 +325,11 @@
         if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
           return ExprError();
         break;
+      case llvm::Triple::x86:
+      case llvm::Triple::x86_64:
+        if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
+          return ExprError();
+        break;
       default:
         break;
     }
@@ -326,9 +339,9 @@
 }
 
 // Get the valid immediate range for the specified NEON type code.
-static unsigned RFT(unsigned t, bool shift = false) {
+static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
   NeonTypeFlags Type(t);
-  int IsQuad = Type.isQuad();
+  int IsQuad = ForceQuad ? true : Type.isQuad();
   switch (Type.getEltType()) {
   case NeonTypeFlags::Int8:
   case NeonTypeFlags::Poly8:
@@ -341,6 +354,8 @@
   case NeonTypeFlags::Int64:
   case NeonTypeFlags::Poly64:
     return shift ? 63 : (1 << IsQuad) - 1;
+  case NeonTypeFlags::Poly128:
+    return shift ? 127 : (1 << IsQuad) - 1;
   case NeonTypeFlags::Float16:
     assert(!shift && "cannot shift float types!");
     return (4 << IsQuad) - 1;
@@ -358,7 +373,7 @@
 /// the vector type specified by the NeonTypeFlags.  This is used to check
 /// the pointer arguments for Neon load/store intrinsics.
 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
-                               bool IsAArch64) {
+                               bool IsPolyUnsigned, bool IsInt64Long) {
   switch (Flags.getEltType()) {
   case NeonTypeFlags::Int8:
     return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
@@ -367,13 +382,19 @@
   case NeonTypeFlags::Int32:
     return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
   case NeonTypeFlags::Int64:
-    return Flags.isUnsigned() ? Context.UnsignedLongLongTy : Context.LongLongTy;
+    if (IsInt64Long)
+      return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
+    else
+      return Flags.isUnsigned() ? Context.UnsignedLongLongTy
+                                : Context.LongLongTy;
   case NeonTypeFlags::Poly8:
-    return IsAArch64 ? Context.UnsignedCharTy : Context.SignedCharTy;
+    return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
   case NeonTypeFlags::Poly16:
-    return IsAArch64 ? Context.UnsignedShortTy : Context.ShortTy;
+    return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
   case NeonTypeFlags::Poly64:
-    return Context.UnsignedLongLongTy;
+    return Context.UnsignedLongTy;
+  case NeonTypeFlags::Poly128:
+    break;
   case NeonTypeFlags::Float16:
     return Context.HalfTy;
   case NeonTypeFlags::Float32:
@@ -384,24 +405,21 @@
   llvm_unreachable("Invalid NeonTypeFlag!");
 }
 
-bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
-                                           CallExpr *TheCall) {
-
+bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
   llvm::APSInt Result;
-
   uint64_t mask = 0;
   unsigned TV = 0;
   int PtrArgNum = -1;
   bool HasConstPtr = false;
   switch (BuiltinID) {
-#define GET_NEON_AARCH64_OVERLOAD_CHECK
+#define GET_NEON_OVERLOAD_CHECK
 #include "clang/Basic/arm_neon.inc"
-#undef GET_NEON_AARCH64_OVERLOAD_CHECK
+#undef GET_NEON_OVERLOAD_CHECK
   }
 
   // For NEON intrinsics which are overloaded on vector element type, validate
   // the immediate which specifies which variant to emit.
-  unsigned ImmArg = TheCall->getNumArgs() - 1;
+  unsigned ImmArg = TheCall->getNumArgs()-1;
   if (mask) {
     if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
       return true;
@@ -409,7 +427,7 @@
     TV = Result.getLimitedValue(64);
     if ((TV > 63) || (mask & (1ULL << TV)) == 0)
       return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
-             << TheCall->getArg(ImmArg)->getSourceRange();
+        << TheCall->getArg(ImmArg)->getSourceRange();
   }
 
   if (PtrArgNum >= 0) {
@@ -419,7 +437,14 @@
       Arg = ICE->getSubExpr();
     ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
     QualType RHSTy = RHS.get()->getType();
-    QualType EltTy = getNeonEltType(NeonTypeFlags(TV), Context, true);
+
+    llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
+    bool IsPolyUnsigned =
+        Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::arm64;
+    bool IsInt64Long =
+        Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
+    QualType EltTy =
+        getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
     if (HasConstPtr)
       EltTy = EltTy.withConst();
     QualType LHSTy = Context.getPointerType(EltTy);
@@ -438,9 +463,9 @@
   switch (BuiltinID) {
   default:
     return false;
-#define GET_NEON_AARCH64_IMMEDIATE_CHECK
+#define GET_NEON_IMMEDIATE_CHECK
 #include "clang/Basic/arm_neon.inc"
-#undef GET_NEON_AARCH64_IMMEDIATE_CHECK
+#undef GET_NEON_IMMEDIATE_CHECK
   }
   ;
 
@@ -462,11 +487,23 @@
   return false;
 }
 
-bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
+                                           CallExpr *TheCall) {
+  if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+    return true;
+
+  return false;
+}
+
+bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
+                                        unsigned MaxWidth) {
   assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
-          BuiltinID == ARM::BI__builtin_arm_strex) &&
+          BuiltinID == ARM::BI__builtin_arm_strex ||
+          BuiltinID == ARM64::BI__builtin_arm_ldrex ||
+          BuiltinID == ARM64::BI__builtin_arm_strex) &&
          "unexpected ARM builtin");
-  bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex;
+  bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
+                 BuiltinID == ARM64::BI__builtin_arm_ldrex;
 
   DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
 
@@ -527,7 +564,8 @@
   }
 
   // But ARM doesn't have instructions to deal with 128-bit versions.
-  if (Context.getTypeSize(ValType) > 64) {
+  if (Context.getTypeSize(ValType) > MaxWidth) {
+    assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
     Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size)
       << PointerArg->getType() << PointerArg->getSourceRange();
     return true;
@@ -573,53 +611,13 @@
 
   if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
       BuiltinID == ARM::BI__builtin_arm_strex) {
-    return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall);
+    return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
   }
 
-  uint64_t mask = 0;
-  unsigned TV = 0;
-  int PtrArgNum = -1;
-  bool HasConstPtr = false;
-  switch (BuiltinID) {
-#define GET_NEON_OVERLOAD_CHECK
-#include "clang/Basic/arm_neon.inc"
-#undef GET_NEON_OVERLOAD_CHECK
-  }
-  
-  // For NEON intrinsics which are overloaded on vector element type, validate
-  // the immediate which specifies which variant to emit.
-  unsigned ImmArg = TheCall->getNumArgs()-1;
-  if (mask) {
-    if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
-      return true;
-    
-    TV = Result.getLimitedValue(64);
-    if ((TV > 63) || (mask & (1ULL << TV)) == 0)
-      return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
-        << TheCall->getArg(ImmArg)->getSourceRange();
-  }
+  if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+    return true;
 
-  if (PtrArgNum >= 0) {
-    // Check that pointer arguments have the specified type.
-    Expr *Arg = TheCall->getArg(PtrArgNum);
-    if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
-      Arg = ICE->getSubExpr();
-    ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
-    QualType RHSTy = RHS.get()->getType();
-    QualType EltTy = getNeonEltType(NeonTypeFlags(TV), Context, false);
-    if (HasConstPtr)
-      EltTy = EltTy.withConst();
-    QualType LHSTy = Context.getPointerType(EltTy);
-    AssignConvertType ConvTy;
-    ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
-    if (RHS.isInvalid())
-      return true;
-    if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy,
-                                 RHS.get(), AA_Assigning))
-      return true;
-  }
-  
-  // For NEON intrinsics which take an immediate value as part of the 
+  // For NEON intrinsics which take an immediate value as part of the
   // instruction, range check them here.
   unsigned i = 0, l = 0, u = 0;
   switch (BuiltinID) {
@@ -630,9 +628,6 @@
   case ARM::BI__builtin_arm_vcvtr_d: i = 1; u = 1; break;
   case ARM::BI__builtin_arm_dmb:
   case ARM::BI__builtin_arm_dsb: l = 0; u = 15; break;
-#define GET_NEON_IMMEDIATE_CHECK
-#include "clang/Basic/arm_neon.inc"
-#undef GET_NEON_IMMEDIATE_CHECK
   };
 
   // We can't check the value of a dependent argument.
@@ -654,6 +649,21 @@
   return false;
 }
 
+bool Sema::CheckARM64BuiltinFunctionCall(unsigned BuiltinID,
+                                         CallExpr *TheCall) {
+  llvm::APSInt Result;
+
+  if (BuiltinID == ARM64::BI__builtin_arm_ldrex ||
+      BuiltinID == ARM64::BI__builtin_arm_strex) {
+    return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
+  }
+
+  if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+    return true;
+
+  return false;
+}
+
 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
   unsigned i = 0, l = 0, u = 0;
   switch (BuiltinID) {
@@ -686,6 +696,14 @@
   return false;
 }
 
+bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+  switch (BuiltinID) {
+  case X86::BI_mm_prefetch:
+    return SemaBuiltinMMPrefetch(TheCall);
+  }
+  return false;
+}
+
 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
 /// Returns true when the format fits the function and the FormatStringInfo has
@@ -709,14 +727,69 @@
   return true;
 }
 
+/// Checks if a the given expression evaluates to null.
+///
+/// \brief Returns true if the value evaluates to null.
+static bool CheckNonNullExpr(Sema &S,
+                             const Expr *Expr) {
+  // As a special case, transparent unions initialized with zero are
+  // considered null for the purposes of the nonnull attribute.
+  if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
+    if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
+      if (const CompoundLiteralExpr *CLE =
+          dyn_cast<CompoundLiteralExpr>(Expr))
+        if (const InitListExpr *ILE =
+            dyn_cast<InitListExpr>(CLE->getInitializer()))
+          Expr = ILE->getInit(0);
+  }
+
+  bool Result;
+  return (!Expr->isValueDependent() &&
+          Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
+          !Result);
+}
+
+static void CheckNonNullArgument(Sema &S,
+                                 const Expr *ArgExpr,
+                                 SourceLocation CallSiteLoc) {
+  if (CheckNonNullExpr(S, ArgExpr))
+    S.Diag(CallSiteLoc, diag::warn_null_arg) << ArgExpr->getSourceRange();
+}
+
+static void CheckNonNullArguments(Sema &S,
+                                  const NamedDecl *FDecl,
+                                  const Expr * const *ExprArgs,
+                                  SourceLocation CallSiteLoc) {
+  // Check the attributes attached to the method/function itself.
+  for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
+    for (NonNullAttr::args_iterator i = NonNull->args_begin(),
+         e = NonNull->args_end();
+         i != e; ++i) {
+      CheckNonNullArgument(S, ExprArgs[*i], CallSiteLoc);
+    }
+  }
+
+  // Check the attributes on the parameters.
+  ArrayRef<ParmVarDecl*> parms;
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
+    parms = FD->parameters();
+  else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(FDecl))
+    parms = MD->parameters();
+
+  unsigned argIndex = 0;
+  for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
+       I != E; ++I, ++argIndex) {
+    const ParmVarDecl *PVD = *I;
+    if (PVD->hasAttr<NonNullAttr>())
+      CheckNonNullArgument(S, ExprArgs[argIndex], CallSiteLoc);
+  }
+}
+
 /// Handles the checks for format strings, non-POD arguments to vararg
 /// functions, and NULL arguments passed to non-NULL parameters.
-void Sema::checkCall(NamedDecl *FDecl,
-                     ArrayRef<const Expr *> Args,
-                     unsigned NumProtoArgs,
-                     bool IsMemberFunction,
-                     SourceLocation Loc,
-                     SourceRange Range,
+void Sema::checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args,
+                     unsigned NumParams, bool IsMemberFunction,
+                     SourceLocation Loc, SourceRange Range,
                      VariadicCallType CallType) {
   // FIXME: We should check as much as we can in the template definition.
   if (CurContext->isDependentContext())
@@ -725,14 +798,11 @@
   // Printf and scanf checking.
   llvm::SmallBitVector CheckedVarArgs;
   if (FDecl) {
-    for (specific_attr_iterator<FormatAttr>
-             I = FDecl->specific_attr_begin<FormatAttr>(),
-             E = FDecl->specific_attr_end<FormatAttr>();
-         I != E; ++I) {
+    for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
       // Only create vector if there are format attributes.
       CheckedVarArgs.resize(Args.size());
 
-      CheckFormatArguments(*I, Args, IsMemberFunction, CallType, Loc, Range,
+      CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
                            CheckedVarArgs);
     }
   }
@@ -740,7 +810,7 @@
   // Refuse POD arguments that weren't caught by the format string
   // checks above.
   if (CallType != VariadicDoesNotApply) {
-    for (unsigned ArgIdx = NumProtoArgs; ArgIdx < Args.size(); ++ArgIdx) {
+    for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
       // Args[ArgIdx] can be null in malformed code.
       if (const Expr *Arg = Args[ArgIdx]) {
         if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
@@ -750,18 +820,11 @@
   }
 
   if (FDecl) {
-    for (specific_attr_iterator<NonNullAttr>
-           I = FDecl->specific_attr_begin<NonNullAttr>(),
-           E = FDecl->specific_attr_end<NonNullAttr>(); I != E; ++I)
-      CheckNonNullArguments(*I, Args.data(), Loc);
+    CheckNonNullArguments(*this, FDecl, Args.data(), Loc);
 
     // Type safety checking.
-    for (specific_attr_iterator<ArgumentWithTypeTagAttr>
-           i = FDecl->specific_attr_begin<ArgumentWithTypeTagAttr>(),
-           e = FDecl->specific_attr_end<ArgumentWithTypeTagAttr>();
-         i != e; ++i) {
-      CheckArgumentWithTypeTag(*i, Args.data());
-    }
+    for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
+      CheckArgumentWithTypeTag(I, Args.data());
   }
 }
 
@@ -773,7 +836,7 @@
                                 SourceLocation Loc) {
   VariadicCallType CallType =
     Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
-  checkCall(FDecl, Args, Proto->getNumArgs(),
+  checkCall(FDecl, Args, Proto->getNumParams(),
             /*IsMemberFunction=*/true, Loc, SourceRange(), CallType);
 }
 
@@ -787,7 +850,7 @@
                           IsMemberOperatorCall;
   VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
                                                   TheCall->getCallee());
-  unsigned NumProtoArgs = Proto ? Proto->getNumArgs() : 0;
+  unsigned NumParams = Proto ? Proto->getNumParams() : 0;
   Expr** Args = TheCall->getArgs();
   unsigned NumArgs = TheCall->getNumArgs();
   if (IsMemberOperatorCall) {
@@ -797,8 +860,7 @@
     ++Args;
     --NumArgs;
   }
-  checkCall(FDecl, llvm::makeArrayRef<const Expr *>(Args, NumArgs),
-            NumProtoArgs,
+  checkCall(FDecl, llvm::makeArrayRef<const Expr *>(Args, NumArgs), NumParams,
             IsMemberFunction, TheCall->getRParenLoc(),
             TheCall->getCallee()->getSourceRange(), CallType);
 
@@ -808,6 +870,8 @@
   if (!FnInfo)
     return false;
 
+  CheckAbsoluteValueFunction(TheCall, FDecl, FnInfo);
+
   unsigned CMId = FDecl->getMemoryFunctionKind();
   if (CMId == 0)
     return false;
@@ -853,15 +917,13 @@
   } else { // Ty->isFunctionPointerType()
     CallType = VariadicFunction;
   }
-  unsigned NumProtoArgs = Proto ? Proto->getNumArgs() : 0;
+  unsigned NumParams = Proto ? Proto->getNumParams() : 0;
 
-  checkCall(NDecl,
-            llvm::makeArrayRef<const Expr *>(TheCall->getArgs(),
-                                             TheCall->getNumArgs()),
-            NumProtoArgs, /*IsMemberFunction=*/false,
-            TheCall->getRParenLoc(),
+  checkCall(NDecl, llvm::makeArrayRef<const Expr *>(TheCall->getArgs(),
+                                                    TheCall->getNumArgs()),
+            NumParams, /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
             TheCall->getCallee()->getSourceRange(), CallType);
-  
+
   return false;
 }
 
@@ -870,18 +932,43 @@
 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
   VariadicCallType CallType = getVariadicCallType(/*FDecl=*/0, Proto,
                                                   TheCall->getCallee());
-  unsigned NumProtoArgs = Proto ? Proto->getNumArgs() : 0;
+  unsigned NumParams = Proto ? Proto->getNumParams() : 0;
 
-  checkCall(/*FDecl=*/0,
-            llvm::makeArrayRef<const Expr *>(TheCall->getArgs(),
-                                             TheCall->getNumArgs()),
-            NumProtoArgs, /*IsMemberFunction=*/false,
-            TheCall->getRParenLoc(),
+  checkCall(/*FDecl=*/0, llvm::makeArrayRef<const Expr *>(
+                             TheCall->getArgs(), TheCall->getNumArgs()),
+            NumParams, /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
             TheCall->getCallee()->getSourceRange(), CallType);
 
   return false;
 }
 
+static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
+  if (Ordering < AtomicExpr::AO_ABI_memory_order_relaxed ||
+      Ordering > AtomicExpr::AO_ABI_memory_order_seq_cst)
+    return false;
+
+  switch (Op) {
+  case AtomicExpr::AO__c11_atomic_init:
+    llvm_unreachable("There is no ordering argument for an init");
+
+  case AtomicExpr::AO__c11_atomic_load:
+  case AtomicExpr::AO__atomic_load_n:
+  case AtomicExpr::AO__atomic_load:
+    return Ordering != AtomicExpr::AO_ABI_memory_order_release &&
+           Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel;
+
+  case AtomicExpr::AO__c11_atomic_store:
+  case AtomicExpr::AO__atomic_store:
+  case AtomicExpr::AO__atomic_store_n:
+    return Ordering != AtomicExpr::AO_ABI_memory_order_consume &&
+           Ordering != AtomicExpr::AO_ABI_memory_order_acquire &&
+           Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel;
+
+  default:
+    return true;
+  }
+}
+
 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
                                          AtomicExpr::AtomicOp Op) {
   CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
@@ -1170,7 +1257,16 @@
     SubExprs.push_back(TheCall->getArg(3)); // Weak
     break;
   }
-  
+
+  if (SubExprs.size() >= 2 && Form != Init) {
+    llvm::APSInt Result(32);
+    if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
+        !isValidOrderingForOp(Result.getSExtValue(), Op))
+      Diag(SubExprs[1]->getLocStart(),
+           diag::warn_atomic_op_has_invalid_memory_order)
+          << SubExprs[1]->getSourceRange();
+  }
+
   AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
                                             SubExprs, ResultType, Op,
                                             TheCall->getRParenLoc());
@@ -1901,6 +1997,26 @@
   return false;
 }
 
+/// SemaBuiltinMMPrefetch - Handle _mm_prefetch.
+// This is declared to take (const char*, int)
+bool Sema::SemaBuiltinMMPrefetch(CallExpr *TheCall) {
+  Expr *Arg = TheCall->getArg(1);
+
+  // We can't check the value of a dependent argument.
+  if (Arg->isTypeDependent() || Arg->isValueDependent())
+    return false;
+
+  llvm::APSInt Result;
+  if (SemaBuiltinConstantArg(TheCall, 1, Result))
+    return true;
+
+  if (Result.getLimitedValue() > 3)
+    return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+        << "0" << "3" << Arg->getSourceRange();
+
+  return false;
+}
+
 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
 /// TheCall is a constant expression.
 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
@@ -2081,10 +2197,7 @@
         if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
           if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
             int PVIndex = PV->getFunctionScopeIndex() + 1;
-            for (specific_attr_iterator<FormatAttr>
-                 i = ND->specific_attr_begin<FormatAttr>(),
-                 e = ND->specific_attr_end<FormatAttr>(); i != e ; ++i) {
-              FormatAttr *PVFormat = *i;
+            for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) {
               // adjust for implicit parameter
               if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
                 if (MD->isInstance())
@@ -2133,27 +2246,6 @@
 
     return SLCT_NotALiteral;
   }
-      
-  case Stmt::ObjCMessageExprClass: {
-    const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(E);
-    if (const ObjCMethodDecl *MDecl = ME->getMethodDecl()) {
-      if (const NamedDecl *ND = dyn_cast<NamedDecl>(MDecl)) {
-        if (const FormatArgAttr *FA = ND->getAttr<FormatArgAttr>()) {
-          unsigned ArgIndex = FA->getFormatIdx();
-          if (ArgIndex <= ME->getNumArgs()) {
-            const Expr *Arg = ME->getArg(ArgIndex-1);
-            return checkFormatStringExpr(S, Arg, Args,
-                                         HasVAListArg, format_idx,
-                                         firstDataArg, Type, CallType,
-                                         InFunctionCall, CheckedVarArgs);
-          }
-        }
-      }
-    }
-
-    return SLCT_NotALiteral;
-  }
-      
   case Stmt::ObjCStringLiteralClass:
   case Stmt::StringLiteralClass: {
     const StringLiteral *StrE = NULL;
@@ -2177,32 +2269,6 @@
   }
 }
 
-void
-Sema::CheckNonNullArguments(const NonNullAttr *NonNull,
-                            const Expr * const *ExprArgs,
-                            SourceLocation CallSiteLoc) {
-  for (NonNullAttr::args_iterator i = NonNull->args_begin(),
-                                  e = NonNull->args_end();
-       i != e; ++i) {
-    const Expr *ArgExpr = ExprArgs[*i];
-
-    // As a special case, transparent unions initialized with zero are
-    // considered null for the purposes of the nonnull attribute.
-    if (const RecordType *UT = ArgExpr->getType()->getAsUnionType()) {
-      if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
-        if (const CompoundLiteralExpr *CLE =
-            dyn_cast<CompoundLiteralExpr>(ArgExpr))
-          if (const InitListExpr *ILE =
-              dyn_cast<InitListExpr>(CLE->getInitializer()))
-            ArgExpr = ILE->getInit(0);
-    }
-
-    bool Result;
-    if (ArgExpr->EvaluateAsBooleanCondition(Result, Context) && !Result)
-      Diag(CallSiteLoc, diag::warn_null_arg) << ArgExpr->getSourceRange();
-  }
-}
-
 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
   return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
   .Case("scanf", FST_Scanf)
@@ -2331,30 +2397,31 @@
   void DoneProcessing();
 
   void HandleIncompleteSpecifier(const char *startSpecifier,
-                                 unsigned specifierLen);
+                                 unsigned specifierLen) override;
 
   void HandleInvalidLengthModifier(
-      const analyze_format_string::FormatSpecifier &FS,
-      const analyze_format_string::ConversionSpecifier &CS,
-      const char *startSpecifier, unsigned specifierLen, unsigned DiagID);
+                           const analyze_format_string::FormatSpecifier &FS,
+                           const analyze_format_string::ConversionSpecifier &CS,
+                           const char *startSpecifier, unsigned specifierLen,
+                           unsigned DiagID);
 
   void HandleNonStandardLengthModifier(
-      const analyze_format_string::FormatSpecifier &FS,
-      const char *startSpecifier, unsigned specifierLen);
+                    const analyze_format_string::FormatSpecifier &FS,
+                    const char *startSpecifier, unsigned specifierLen);
 
   void HandleNonStandardConversionSpecifier(
-      const analyze_format_string::ConversionSpecifier &CS,
-      const char *startSpecifier, unsigned specifierLen);
+                    const analyze_format_string::ConversionSpecifier &CS,
+                    const char *startSpecifier, unsigned specifierLen);
 
-  virtual void HandlePosition(const char *startPos, unsigned posLen);
+  void HandlePosition(const char *startPos, unsigned posLen) override;
 
-  virtual void HandleInvalidPosition(const char *startSpecifier,
-                                     unsigned specifierLen,
-                                     analyze_format_string::PositionContext p);
+  void HandleInvalidPosition(const char *startSpecifier,
+                             unsigned specifierLen,
+                             analyze_format_string::PositionContext p) override;
 
-  virtual void HandleZeroPosition(const char *startPos, unsigned posLen);
+  void HandleZeroPosition(const char *startPos, unsigned posLen) override;
 
-  void HandleNullChar(const char *nullCharacter);
+  void HandleNullChar(const char *nullCharacter) override;
 
   template <typename Range>
   static void EmitFormatDiagnostic(Sema &S, bool inFunctionCall,
@@ -2390,9 +2457,6 @@
   void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
                             bool IsStringLocation, Range StringRange,
                             ArrayRef<FixItHint> Fixit = None);
-
-  void CheckPositionalAndNonpositionalArgs(
-      const analyze_format_string::FormatSpecifier *FS);
 };
 }
 
@@ -2723,15 +2787,15 @@
       ObjCContext(isObjC)
   {}
 
-  
+
   bool HandleInvalidPrintfConversionSpecifier(
                                       const analyze_printf::PrintfSpecifier &FS,
                                       const char *startSpecifier,
-                                      unsigned specifierLen);
-  
+                                      unsigned specifierLen) override;
+
   bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
                              const char *startSpecifier,
-                             unsigned specifierLen);
+                             unsigned specifierLen) override;
   bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
                        const char *StartSpecifier,
                        unsigned SpecifierLen,
@@ -2751,7 +2815,7 @@
                          const analyze_printf::OptionalFlag &flag,
                          const char *startSpecifier, unsigned specifierLen);
   bool checkForCStrMembers(const analyze_printf::ArgType &AT,
-                           const Expr *E, const CharSourceRange &CSR);
+                           const Expr *E);
 
 };  
 }
@@ -2885,11 +2949,12 @@
   if (!RT)
     return Results;
   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
-  if (!RD)
+  if (!RD || !RD->getDefinition())
     return Results;
 
   LookupResult R(S, &S.PP.getIdentifierTable().get(Name), SourceLocation(),
                  Sema::LookupMemberName);
+  R.suppressDiagnostics();
 
   // We just need to include all members of the right kind turned up by the
   // filter, at this point.
@@ -2902,12 +2967,26 @@
   return Results;
 }
 
+/// Check if we could call '.c_str()' on an object.
+///
+/// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
+/// allow the call, or if it would be ambiguous).
+bool Sema::hasCStrMethod(const Expr *E) {
+  typedef llvm::SmallPtrSet<CXXMethodDecl*, 1> MethodSet;
+  MethodSet Results =
+      CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType());
+  for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
+       MI != ME; ++MI)
+    if ((*MI)->getMinRequiredArguments() == 0)
+      return true;
+  return false;
+}
+
 // Check if a (w)string was passed when a (w)char* was needed, and offer a
 // better diagnostic if so. AT is assumed to be valid.
 // Returns true when a c_str() conversion method is found.
 bool CheckPrintfHandler::checkForCStrMembers(
-    const analyze_printf::ArgType &AT, const Expr *E,
-    const CharSourceRange &CSR) {
+    const analyze_printf::ArgType &AT, const Expr *E) {
   typedef llvm::SmallPtrSet<CXXMethodDecl*, 1> MethodSet;
 
   MethodSet Results =
@@ -2916,8 +2995,8 @@
   for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
        MI != ME; ++MI) {
     const CXXMethodDecl *Method = *MI;
-    if (Method->getNumParams() == 0 &&
-          AT.matchesType(S.Context, Method->getResultType())) {
+    if (Method->getMinRequiredArguments() == 0 &&
+        AT.matchesType(S.Context, Method->getReturnType())) {
       // FIXME: Suggest parens if the expression needs them.
       SourceLocation EndLoc =
           S.getPreprocessor().getLocForEndOfToken(E->getLocEnd());
@@ -3303,7 +3382,7 @@
           << CSR
           << E->getSourceRange(),
         E->getLocStart(), /*IsStringLocation*/false, CSR);
-      checkForCStrMembers(AT, E, CSR);
+      checkForCStrMembers(AT, E);
       break;
 
     case Sema::VAK_Invalid:
@@ -3355,14 +3434,14 @@
   
   bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
                             const char *startSpecifier,
-                            unsigned specifierLen);
+                            unsigned specifierLen) override;
   
   bool HandleInvalidScanfConversionSpecifier(
           const analyze_scanf::ScanfSpecifier &FS,
           const char *startSpecifier,
-          unsigned specifierLen);
+          unsigned specifierLen) override;
 
-  void HandleIncompleteScanList(const char *start, const char *end);
+  void HandleIncompleteScanList(const char *start, const char *end) override;
 };
 }
 
@@ -3467,8 +3546,9 @@
   const analyze_format_string::ArgType &AT = FS.getArgType(S.Context);
   if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) {
     ScanfSpecifier fixedFS = FS;
-    bool success = fixedFS.fixType(Ex->getType(), S.getLangOpts(),
-                                   S.Context);
+    bool success = fixedFS.fixType(Ex->getType(),
+                                   Ex->IgnoreImpCasts()->getType(),
+                                   S.getLangOpts(), S.Context);
 
     if (success) {
       // Get the fix string from the fixed format specifier.
@@ -3520,9 +3600,25 @@
   // Str - The format string.  NOTE: this is NOT null-terminated!
   StringRef StrRef = FExpr->getString();
   const char *Str = StrRef.data();
-  unsigned StrLen = StrRef.size();
+  // Account for cases where the string literal is truncated in a declaration.
+  const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
+  assert(T && "String literal not of constant array type!");
+  size_t TypeSize = T->getSize().getZExtValue();
+  size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
   const unsigned numDataArgs = Args.size() - firstDataArg;
-  
+
+  // Emit a warning if the string literal is truncated and does not contain an
+  // embedded null character.
+  if (TypeSize <= StrRef.size() &&
+      StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) {
+    CheckFormatHandler::EmitFormatDiagnostic(
+        *this, inFunctionCall, Args[format_idx],
+        PDiag(diag::warn_printf_format_string_not_null_terminated),
+        FExpr->getLocStart(),
+        /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
+    return;
+  }
+
   // CHECK: empty format string?
   if (StrLen == 0 && numDataArgs > 0) {
     CheckFormatHandler::EmitFormatDiagnostic(
@@ -3554,8 +3650,365 @@
   } // TODO: handle other formats
 }
 
+//===--- CHECK: Warn on use of wrong absolute value function. -------------===//
+
+// Returns the related absolute value function that is larger, of 0 if one
+// does not exist.
+static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
+  switch (AbsFunction) {
+  default:
+    return 0;
+
+  case Builtin::BI__builtin_abs:
+    return Builtin::BI__builtin_labs;
+  case Builtin::BI__builtin_labs:
+    return Builtin::BI__builtin_llabs;
+  case Builtin::BI__builtin_llabs:
+    return 0;
+
+  case Builtin::BI__builtin_fabsf:
+    return Builtin::BI__builtin_fabs;
+  case Builtin::BI__builtin_fabs:
+    return Builtin::BI__builtin_fabsl;
+  case Builtin::BI__builtin_fabsl:
+    return 0;
+
+  case Builtin::BI__builtin_cabsf:
+    return Builtin::BI__builtin_cabs;
+  case Builtin::BI__builtin_cabs:
+    return Builtin::BI__builtin_cabsl;
+  case Builtin::BI__builtin_cabsl:
+    return 0;
+
+  case Builtin::BIabs:
+    return Builtin::BIlabs;
+  case Builtin::BIlabs:
+    return Builtin::BIllabs;
+  case Builtin::BIllabs:
+    return 0;
+
+  case Builtin::BIfabsf:
+    return Builtin::BIfabs;
+  case Builtin::BIfabs:
+    return Builtin::BIfabsl;
+  case Builtin::BIfabsl:
+    return 0;
+
+  case Builtin::BIcabsf:
+   return Builtin::BIcabs;
+  case Builtin::BIcabs:
+    return Builtin::BIcabsl;
+  case Builtin::BIcabsl:
+    return 0;
+  }
+}
+
+// Returns the argument type of the absolute value function.
+static QualType getAbsoluteValueArgumentType(ASTContext &Context,
+                                             unsigned AbsType) {
+  if (AbsType == 0)
+    return QualType();
+
+  ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
+  QualType BuiltinType = Context.GetBuiltinType(AbsType, Error);
+  if (Error != ASTContext::GE_None)
+    return QualType();
+
+  const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
+  if (!FT)
+    return QualType();
+
+  if (FT->getNumParams() != 1)
+    return QualType();
+
+  return FT->getParamType(0);
+}
+
+// Returns the best absolute value function, or zero, based on type and
+// current absolute value function.
+static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
+                                   unsigned AbsFunctionKind) {
+  unsigned BestKind = 0;
+  uint64_t ArgSize = Context.getTypeSize(ArgType);
+  for (unsigned Kind = AbsFunctionKind; Kind != 0;
+       Kind = getLargerAbsoluteValueFunction(Kind)) {
+    QualType ParamType = getAbsoluteValueArgumentType(Context, Kind);
+    if (Context.getTypeSize(ParamType) >= ArgSize) {
+      if (BestKind == 0)
+        BestKind = Kind;
+      else if (Context.hasSameType(ParamType, ArgType)) {
+        BestKind = Kind;
+        break;
+      }
+    }
+  }
+  return BestKind;
+}
+
+enum AbsoluteValueKind {
+  AVK_Integer,
+  AVK_Floating,
+  AVK_Complex
+};
+
+static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
+  if (T->isIntegralOrEnumerationType())
+    return AVK_Integer;
+  if (T->isRealFloatingType())
+    return AVK_Floating;
+  if (T->isAnyComplexType())
+    return AVK_Complex;
+
+  llvm_unreachable("Type not integer, floating, or complex");
+}
+
+// Changes the absolute value function to a different type.  Preserves whether
+// the function is a builtin.
+static unsigned changeAbsFunction(unsigned AbsKind,
+                                  AbsoluteValueKind ValueKind) {
+  switch (ValueKind) {
+  case AVK_Integer:
+    switch (AbsKind) {
+    default:
+      return 0;
+    case Builtin::BI__builtin_fabsf:
+    case Builtin::BI__builtin_fabs:
+    case Builtin::BI__builtin_fabsl:
+    case Builtin::BI__builtin_cabsf:
+    case Builtin::BI__builtin_cabs:
+    case Builtin::BI__builtin_cabsl:
+      return Builtin::BI__builtin_abs;
+    case Builtin::BIfabsf:
+    case Builtin::BIfabs:
+    case Builtin::BIfabsl:
+    case Builtin::BIcabsf:
+    case Builtin::BIcabs:
+    case Builtin::BIcabsl:
+      return Builtin::BIabs;
+    }
+  case AVK_Floating:
+    switch (AbsKind) {
+    default:
+      return 0;
+    case Builtin::BI__builtin_abs:
+    case Builtin::BI__builtin_labs:
+    case Builtin::BI__builtin_llabs:
+    case Builtin::BI__builtin_cabsf:
+    case Builtin::BI__builtin_cabs:
+    case Builtin::BI__builtin_cabsl:
+      return Builtin::BI__builtin_fabsf;
+    case Builtin::BIabs:
+    case Builtin::BIlabs:
+    case Builtin::BIllabs:
+    case Builtin::BIcabsf:
+    case Builtin::BIcabs:
+    case Builtin::BIcabsl:
+      return Builtin::BIfabsf;
+    }
+  case AVK_Complex:
+    switch (AbsKind) {
+    default:
+      return 0;
+    case Builtin::BI__builtin_abs:
+    case Builtin::BI__builtin_labs:
+    case Builtin::BI__builtin_llabs:
+    case Builtin::BI__builtin_fabsf:
+    case Builtin::BI__builtin_fabs:
+    case Builtin::BI__builtin_fabsl:
+      return Builtin::BI__builtin_cabsf;
+    case Builtin::BIabs:
+    case Builtin::BIlabs:
+    case Builtin::BIllabs:
+    case Builtin::BIfabsf:
+    case Builtin::BIfabs:
+    case Builtin::BIfabsl:
+      return Builtin::BIcabsf;
+    }
+  }
+  llvm_unreachable("Unable to convert function");
+}
+
+static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
+  const IdentifierInfo *FnInfo = FDecl->getIdentifier();
+  if (!FnInfo)
+    return 0;
+
+  switch (FDecl->getBuiltinID()) {
+  default:
+    return 0;
+  case Builtin::BI__builtin_abs:
+  case Builtin::BI__builtin_fabs:
+  case Builtin::BI__builtin_fabsf:
+  case Builtin::BI__builtin_fabsl:
+  case Builtin::BI__builtin_labs:
+  case Builtin::BI__builtin_llabs:
+  case Builtin::BI__builtin_cabs:
+  case Builtin::BI__builtin_cabsf:
+  case Builtin::BI__builtin_cabsl:
+  case Builtin::BIabs:
+  case Builtin::BIlabs:
+  case Builtin::BIllabs:
+  case Builtin::BIfabs:
+  case Builtin::BIfabsf:
+  case Builtin::BIfabsl:
+  case Builtin::BIcabs:
+  case Builtin::BIcabsf:
+  case Builtin::BIcabsl:
+    return FDecl->getBuiltinID();
+  }
+  llvm_unreachable("Unknown Builtin type");
+}
+
+// If the replacement is valid, emit a note with replacement function.
+// Additionally, suggest including the proper header if not already included.
+static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
+                            unsigned AbsKind) {
+  std::string AbsName = S.Context.BuiltinInfo.GetName(AbsKind);
+
+  // Look up absolute value function in TU scope.
+  DeclarationName DN(&S.Context.Idents.get(AbsName));
+  LookupResult R(S, DN, Loc, Sema::LookupAnyName);
+  R.suppressDiagnostics();
+  S.LookupName(R, S.TUScope);
+
+  // Skip notes if multiple results found in lookup.
+  if (!R.empty() && !R.isSingleResult())
+    return;
+
+  FunctionDecl *FD = 0;
+  bool FoundFunction = R.isSingleResult();
+  // When one result is found, see if it is the correct function.
+  if (R.isSingleResult()) {
+    FD = dyn_cast<FunctionDecl>(R.getFoundDecl());
+    if (!FD || FD->getBuiltinID() != AbsKind)
+      return;
+  }
+
+  // Look for local name conflict, prepend "::" as necessary.
+  R.clear();
+  S.LookupName(R, S.getCurScope());
+
+  if (!FoundFunction) {
+    if (!R.empty()) {
+      AbsName = "::" + AbsName;
+    }
+  } else { // FoundFunction
+    if (R.isSingleResult()) {
+      if (R.getFoundDecl() != FD) {
+        AbsName = "::" + AbsName;
+      }
+    } else if (!R.empty()) {
+      AbsName = "::" + AbsName;
+    }
+  }
+
+  S.Diag(Loc, diag::note_replace_abs_function)
+      << AbsName << FixItHint::CreateReplacement(Range, AbsName);
+
+  if (!FoundFunction) {
+    S.Diag(Loc, diag::note_please_include_header)
+        << S.Context.BuiltinInfo.getHeaderName(AbsKind)
+        << S.Context.BuiltinInfo.GetName(AbsKind);
+  }
+}
+
+// Warn when using the wrong abs() function.
+void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
+                                      const FunctionDecl *FDecl,
+                                      IdentifierInfo *FnInfo) {
+  if (Call->getNumArgs() != 1)
+    return;
+
+  unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
+  if (AbsKind == 0)
+    return;
+
+  QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType();
+  QualType ParamType = Call->getArg(0)->getType();
+
+  // Unsigned types can not be negative.  Suggest to drop the absolute value
+  // function.
+  if (ArgType->isUnsignedIntegerType()) {
+    Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
+    Diag(Call->getExprLoc(), diag::note_remove_abs)
+        << FDecl
+        << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
+    return;
+  }
+
+  AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType);
+  AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType);
+
+  // The argument and parameter are the same kind.  Check if they are the right
+  // size.
+  if (ArgValueKind == ParamValueKind) {
+    if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType))
+      return;
+
+    unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind);
+    Diag(Call->getExprLoc(), diag::warn_abs_too_small)
+        << FDecl << ArgType << ParamType;
+
+    if (NewAbsKind == 0)
+      return;
+
+    emitReplacement(*this, Call->getExprLoc(),
+                    Call->getCallee()->getSourceRange(), NewAbsKind);
+    return;
+  }
+
+  // ArgValueKind != ParamValueKind
+  // The wrong type of absolute value function was used.  Attempt to find the
+  // proper one.
+  unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind);
+  NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind);
+  if (NewAbsKind == 0)
+    return;
+
+  Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
+      << FDecl << ParamValueKind << ArgValueKind;
+
+  emitReplacement(*this, Call->getExprLoc(),
+                  Call->getCallee()->getSourceRange(), NewAbsKind);
+  return;
+}
+
 //===--- CHECK: Standard memory functions ---------------------------------===//
 
+/// \brief Takes the expression passed to the size_t parameter of functions
+/// such as memcmp, strncat, etc and warns if it's a comparison.
+///
+/// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
+static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
+                                           IdentifierInfo *FnName,
+                                           SourceLocation FnLoc,
+                                           SourceLocation RParenLoc) {
+  const BinaryOperator *Size = dyn_cast<BinaryOperator>(E);
+  if (!Size)
+    return false;
+
+  // if E is binop and op is >, <, >=, <=, ==, &&, ||:
+  if (!Size->isComparisonOp() && !Size->isEqualityOp() && !Size->isLogicalOp())
+    return false;
+
+  Preprocessor &PP = S.getPreprocessor();
+  SourceRange SizeRange = Size->getSourceRange();
+  S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
+      << SizeRange << FnName;
+  S.Diag(FnLoc, diag::warn_memsize_comparison_paren_note)
+      << FnName
+      << FixItHint::CreateInsertion(
+             PP.getLocForEndOfToken(Size->getLHS()->getLocEnd()),
+             ")")
+      << FixItHint::CreateRemoval(RParenLoc);
+  S.Diag(SizeRange.getBegin(), diag::warn_memsize_comparison_cast_note)
+      << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
+      << FixItHint::CreateInsertion(
+             PP.getLocForEndOfToken(SizeRange.getEnd()), ")");
+
+  return true;
+}
+
 /// \brief Determine whether the given type is a dynamic class type (e.g.,
 /// whether it has a vtable).
 static bool isDynamicClassType(QualType T) {
@@ -3611,6 +4064,10 @@
   unsigned LenArg = (BId == Builtin::BIstrndup ? 1 : 2);
   const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
 
+  if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
+                                     Call->getLocStart(), Call->getRParenLoc()))
+    return;
+
   // We have special checking when the length is a sizeof expression.
   QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
   const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
@@ -3794,6 +4251,10 @@
   const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
   const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context);
   const Expr *CompareWithSrc = NULL;
+
+  if (CheckMemorySizeofForComparison(*this, SizeArg, FnName,
+                                     Call->getLocStart(), Call->getRParenLoc()))
+    return;
   
   // Look for 'strlcpy(dst, x, sizeof(x))'
   if (const Expr *Ex = getSizeOfExprArg(SizeArg))
@@ -3801,8 +4262,8 @@
   else {
     // Look for 'strlcpy(dst, x, strlen(x))'
     if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
-      if (SizeCall->isBuiltinCall() == Builtin::BIstrlen
-          && SizeCall->getNumArgs() == 1)
+      if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
+          SizeCall->getNumArgs() == 1)
         CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
     }
   }
@@ -3876,6 +4337,10 @@
   const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
   const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
 
+  if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getLocStart(),
+                                     CE->getRParenLoc()))
+    return;
+
   // Identify common expressions, which are wrongly used as the size argument
   // to strncat and may lead to buffer overflows.
   unsigned PatternType = 0;
@@ -3954,9 +4419,9 @@
 
 /// CheckReturnStackAddr - Check if a return statement returns the address
 ///   of a stack variable.
-void
-Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
-                           SourceLocation ReturnLoc) {
+static void
+CheckReturnStackAddr(Sema &S, Expr *RetValExp, QualType lhsType,
+                     SourceLocation ReturnLoc) {
 
   Expr *stackE = 0;
   SmallVector<DeclRefExpr *, 8> refVars;
@@ -3964,7 +4429,7 @@
   // Perform checking for returned stack addresses, local blocks,
   // label addresses or references to temporaries.
   if (lhsType->isPointerType() ||
-      (!getLangOpts().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
+      (!S.getLangOpts().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
     stackE = EvalAddr(RetValExp, refVars, /*ParentDecl=*/0);
   } else if (lhsType->isReferenceType()) {
     stackE = EvalVal(RetValExp, refVars, /*ParentDecl=*/0);
@@ -3988,16 +4453,16 @@
   }
 
   if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(stackE)) { //address of local var.
-    Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_stack_ref
+    S.Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_stack_ref
                                              : diag::warn_ret_stack_addr)
      << DR->getDecl()->getDeclName() << diagRange;
   } else if (isa<BlockExpr>(stackE)) { // local block.
-    Diag(diagLoc, diag::err_ret_local_block) << diagRange;
+    S.Diag(diagLoc, diag::err_ret_local_block) << diagRange;
   } else if (isa<AddrLabelExpr>(stackE)) { // address of label.
-    Diag(diagLoc, diag::warn_ret_addr_label) << diagRange;
+    S.Diag(diagLoc, diag::warn_ret_addr_label) << diagRange;
   } else { // local temporary.
-    Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_local_temp_ref
-                                             : diag::warn_ret_local_temp_addr)
+    S.Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_local_temp_ref
+                                               : diag::warn_ret_local_temp_addr)
      << diagRange;
   }
 
@@ -4010,8 +4475,8 @@
     // show the range of the expression.
     SourceRange range = (i < e-1) ? refVars[i+1]->getSourceRange()
                                   : stackE->getSourceRange();
-    Diag(VD->getLocation(), diag::note_ref_var_local_bind)
-      << VD->getDeclName() << range;
+    S.Diag(VD->getLocation(), diag::note_ref_var_local_bind)
+        << VD->getDeclName() << range;
   }
 }
 
@@ -4061,6 +4526,10 @@
   case Stmt::DeclRefExprClass: {
     DeclRefExpr *DR = cast<DeclRefExpr>(E);
 
+    // If we leave the immediate function, the lifetime isn't about to end.
+    if (DR->refersToEnclosingLocal())
+      return 0;
+
     if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
       // If this is a reference variable, follow through to the expression that
       // it points to.
@@ -4110,20 +4579,21 @@
     ConditionalOperator *C = cast<ConditionalOperator>(E);
 
     // Handle the GNU extension for missing LHS.
-    if (Expr *lhsExpr = C->getLHS()) {
-    // In C++, we can have a throw-expression, which has 'void' type.
-      if (!lhsExpr->getType()->isVoidType())
-        if (Expr* LHS = EvalAddr(lhsExpr, refVars, ParentDecl))
+    // FIXME: That isn't a ConditionalOperator, so doesn't get here.
+    if (Expr *LHSExpr = C->getLHS()) {
+      // In C++, we can have a throw-expression, which has 'void' type.
+      if (!LHSExpr->getType()->isVoidType())
+        if (Expr *LHS = EvalAddr(LHSExpr, refVars, ParentDecl))
           return LHS;
     }
 
     // In C++, we can have a throw-expression, which has 'void' type.
     if (C->getRHS()->getType()->isVoidType())
-      return NULL;
+      return 0;
 
     return EvalAddr(C->getRHS(), refVars, ParentDecl);
   }
-  
+
   case Stmt::BlockExprClass:
     if (cast<BlockExpr>(E)->getBlockDecl()->hasCaptures())
       return E; // local block.
@@ -4216,6 +4686,10 @@
     // local storage within the function, and if so, return the expression.
     DeclRefExpr *DR = cast<DeclRefExpr>(E);
 
+    // If we leave the immediate function, the lifetime isn't about to end.
+    if (DR->refersToEnclosingLocal())
+      return 0;
+
     if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) {
       // Check if it refers to itself, e.g. "int& i = i;".
       if (V == ParentDecl)
@@ -4263,9 +4737,16 @@
     ConditionalOperator *C = cast<ConditionalOperator>(E);
 
     // Handle the GNU extension for missing LHS.
-    if (Expr *lhsExpr = C->getLHS())
-      if (Expr *LHS = EvalVal(lhsExpr, refVars, ParentDecl))
-        return LHS;
+    if (Expr *LHSExpr = C->getLHS()) {
+      // In C++, we can have a throw-expression, which has 'void' type.
+      if (!LHSExpr->getType()->isVoidType())
+        if (Expr *LHS = EvalVal(LHSExpr, refVars, ParentDecl))
+          return LHS;
+    }
+
+    // In C++, we can have a throw-expression, which has 'void' type.
+    if (C->getRHS()->getType()->isVoidType())
+      return 0;
 
     return EvalVal(C->getRHS(), refVars, ParentDecl);
   }
@@ -4306,6 +4787,38 @@
 } while (true);
 }
 
+void
+Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
+                         SourceLocation ReturnLoc,
+                         bool isObjCMethod,
+                         const AttrVec *Attrs,
+                         const FunctionDecl *FD) {
+  CheckReturnStackAddr(*this, RetValExp, lhsType, ReturnLoc);
+
+  // Check if the return value is null but should not be.
+  if (Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs) &&
+      CheckNonNullExpr(*this, RetValExp))
+    Diag(ReturnLoc, diag::warn_null_ret)
+      << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
+
+  // C++11 [basic.stc.dynamic.allocation]p4:
+  //   If an allocation function declared with a non-throwing
+  //   exception-specification fails to allocate storage, it shall return
+  //   a null pointer. Any other allocation function that fails to allocate
+  //   storage shall indicate failure only by throwing an exception [...]
+  if (FD) {
+    OverloadedOperatorKind Op = FD->getOverloadedOperator();
+    if (Op == OO_New || Op == OO_Array_New) {
+      const FunctionProtoType *Proto
+        = FD->getType()->castAs<FunctionProtoType>();
+      if (!Proto->isNothrow(Context, /*ResultIfDependent*/true) &&
+          CheckNonNullExpr(*this, RetValExp))
+        Diag(ReturnLoc, diag::warn_operator_new_returns_null)
+          << FD << getLangOpts().CPlusPlus11;
+    }
+  }
+}
+
 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
 
 /// Check for comparisons of floating point operands using != and ==.
@@ -4338,11 +4851,11 @@
 
   // Check for comparisons with builtin types.
   if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
-    if (CL->isBuiltinCall())
+    if (CL->getBuiltinCallee())
       return;
 
   if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
-    if (CR->isBuiltinCall())
+    if (CR->getBuiltinCallee())
       return;
 
   // Emit the diagnostic.
@@ -4920,9 +5433,11 @@
   else
     OS << Value;
 
-  S.Diag(E->getOperatorLoc(), diag::warn_out_of_range_compare)
-      << OS.str() << OtherT << IsTrue
-      << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+  S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
+                        S.PDiag(diag::warn_out_of_range_compare)
+                          << OS.str() << OtherT << IsTrue
+                          << E->getLHS()->getSourceRange()
+                          << E->getRHS()->getSourceRange());
 }
 
 /// Analyze the operands of the given comparison.  Implements the
@@ -5231,39 +5746,21 @@
   if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
     if (isa<StringLiteral>(E))
       // Warn on string literal to bool.  Checks for string literals in logical
-      // expressions, for instances, assert(0 && "error here"), is prevented
-      // by a check in AnalyzeImplicitConversions().
+      // and expressions, for instance, assert(0 && "error here"), are
+      // prevented by a check in AnalyzeImplicitConversions().
       return DiagnoseImpCast(S, E, T, CC,
                              diag::warn_impcast_string_literal_to_bool);
-    if (Source->isFunctionType()) {
-      // Warn on function to bool. Checks free functions and static member
-      // functions. Weakly imported functions are excluded from the check,
-      // since it's common to test their value to check whether the linker
-      // found a definition for them.
-      ValueDecl *D = 0;
-      if (DeclRefExpr* R = dyn_cast<DeclRefExpr>(E)) {
-        D = R->getDecl();
-      } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
-        D = M->getMemberDecl();
-      }
-
-      if (D && !D->isWeak()) {
-        if (FunctionDecl* F = dyn_cast<FunctionDecl>(D)) {
-          S.Diag(E->getExprLoc(), diag::warn_impcast_function_to_bool)
-            << F << E->getSourceRange() << SourceRange(CC);
-          S.Diag(E->getExprLoc(), diag::note_function_to_bool_silence)
-            << FixItHint::CreateInsertion(E->getExprLoc(), "&");
-          QualType ReturnType;
-          UnresolvedSet<4> NonTemplateOverloads;
-          S.tryExprAsCall(*E, ReturnType, NonTemplateOverloads);
-          if (!ReturnType.isNull() 
-              && ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
-            S.Diag(E->getExprLoc(), diag::note_function_to_bool_call)
-              << FixItHint::CreateInsertion(
-                 S.getPreprocessor().getLocForEndOfToken(E->getLocEnd()), "()");
-          return;
-        }
-      }
+    if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) ||
+        isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) {
+      // This covers the literal expressions that evaluate to Objective-C
+      // objects.
+      return DiagnoseImpCast(S, E, T, CC,
+                             diag::warn_impcast_objective_c_literal_to_bool);
+    }
+    if (Source->isPointerType() || Source->canDecayToPointerType()) {
+      // Warn on pointer to bool conversion that is always true.
+      S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
+                                     SourceRange(CC));
     }
   }
 
@@ -5283,6 +5780,8 @@
     Source = cast<VectorType>(Source)->getElementType().getTypePtr();
     Target = cast<VectorType>(Target)->getElementType().getTypePtr();
   }
+  if (auto VecTy = dyn_cast<VectorType>(Target))
+    Target = VecTy->getElementType().getTypePtr();
 
   // Strip complex types.
   if (isa<ComplexType>(Source)) {
@@ -5583,15 +6082,16 @@
   // Now just recurse over the expression's children.
   CC = E->getExprLoc();
   BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
-  bool IsLogicalOperator = BO && BO->isLogicalOp();
+  bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
   for (Stmt::child_range I = E->children(); I; ++I) {
     Expr *ChildExpr = dyn_cast_or_null<Expr>(*I);
     if (!ChildExpr)
       continue;
 
-    if (IsLogicalOperator &&
+    if (IsLogicalAndOperator &&
         isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
-      // Ignore checking string literals that are in logical operators.
+      // Ignore checking string literals that are in logical and operators.
+      // This is a common pattern for asserts.
       continue;
     AnalyzeImplicitConversions(S, ChildExpr, CC);
   }
@@ -5599,6 +6099,125 @@
 
 } // end anonymous namespace
 
+enum {
+  AddressOf,
+  FunctionPointer,
+  ArrayPointer
+};
+
+/// \brief Diagnose pointers that are always non-null.
+/// \param E the expression containing the pointer
+/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
+/// compared to a null pointer
+/// \param IsEqual True when the comparison is equal to a null pointer
+/// \param Range Extra SourceRange to highlight in the diagnostic
+void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
+                                        Expr::NullPointerConstantKind NullKind,
+                                        bool IsEqual, SourceRange Range) {
+
+  // Don't warn inside macros.
+  if (E->getExprLoc().isMacroID())
+      return;
+  E = E->IgnoreImpCasts();
+
+  const bool IsCompare = NullKind != Expr::NPCK_NotNull;
+
+  bool IsAddressOf = false;
+
+  if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+    if (UO->getOpcode() != UO_AddrOf)
+      return;
+    IsAddressOf = true;
+    E = UO->getSubExpr();
+  }
+
+  // Expect to find a single Decl.  Skip anything more complicated.
+  ValueDecl *D = 0;
+  if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
+    D = R->getDecl();
+  } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
+    D = M->getMemberDecl();
+  }
+
+  // Weak Decls can be null.
+  if (!D || D->isWeak())
+    return;
+
+  QualType T = D->getType();
+  const bool IsArray = T->isArrayType();
+  const bool IsFunction = T->isFunctionType();
+
+  if (IsAddressOf) {
+    // Address of function is used to silence the function warning.
+    if (IsFunction)
+      return;
+    // Address of reference can be null.
+    if (T->isReferenceType())
+      return;
+  }
+
+  // Found nothing.
+  if (!IsAddressOf && !IsFunction && !IsArray)
+    return;
+
+  // Pretty print the expression for the diagnostic.
+  std::string Str;
+  llvm::raw_string_ostream S(Str);
+  E->printPretty(S, 0, getPrintingPolicy());
+
+  unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
+                              : diag::warn_impcast_pointer_to_bool;
+  unsigned DiagType;
+  if (IsAddressOf)
+    DiagType = AddressOf;
+  else if (IsFunction)
+    DiagType = FunctionPointer;
+  else if (IsArray)
+    DiagType = ArrayPointer;
+  else
+    llvm_unreachable("Could not determine diagnostic.");
+  Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
+                                << Range << IsEqual;
+
+  if (!IsFunction)
+    return;
+
+  // Suggest '&' to silence the function warning.
+  Diag(E->getExprLoc(), diag::note_function_warning_silence)
+      << FixItHint::CreateInsertion(E->getLocStart(), "&");
+
+  // Check to see if '()' fixit should be emitted.
+  QualType ReturnType;
+  UnresolvedSet<4> NonTemplateOverloads;
+  tryExprAsCall(*E, ReturnType, NonTemplateOverloads);
+  if (ReturnType.isNull())
+    return;
+
+  if (IsCompare) {
+    // There are two cases here.  If there is null constant, the only suggest
+    // for a pointer return type.  If the null is 0, then suggest if the return
+    // type is a pointer or an integer type.
+    if (!ReturnType->isPointerType()) {
+      if (NullKind == Expr::NPCK_ZeroExpression ||
+          NullKind == Expr::NPCK_ZeroLiteral) {
+        if (!ReturnType->isIntegerType())
+          return;
+      } else {
+        return;
+      }
+    }
+  } else { // !IsCompare
+    // For function to bool, only suggest if the function pointer has bool
+    // return type.
+    if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
+      return;
+  }
+  Diag(E->getExprLoc(), diag::note_function_to_function_call)
+      << FixItHint::CreateInsertion(
+             getPreprocessor().getLocForEndOfToken(E->getLocEnd()), "()");
+}
+
+
 /// Diagnoses "dangerous" implicit conversions within the given
 /// expression (which is a full expression).  Implements -Wconversion
 /// and -Wsign-compare.
@@ -6189,11 +6808,23 @@
 
     // MSVC destroys objects passed by value in the callee.  Therefore a
     // function definition which takes such a parameter must be able to call the
-    // object's destructor.
-    if (getLangOpts().CPlusPlus &&
-        Context.getTargetInfo().getCXXABI().isArgumentDestroyedByCallee()) {
-      if (const RecordType *RT = Param->getType()->getAs<RecordType>())
-        FinalizeVarWithDestructor(Param, RT);
+    // object's destructor.  However, we don't perform any direct access check
+    // on the dtor.
+    if (getLangOpts().CPlusPlus && Context.getTargetInfo()
+                                       .getCXXABI()
+                                       .areArgsDestroyedLeftToRightInCallee()) {
+      if (!Param->isInvalidDecl()) {
+        if (const RecordType *RT = Param->getType()->getAs<RecordType>()) {
+          CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+          if (!ClassDecl->isInvalidDecl() &&
+              !ClassDecl->hasIrrelevantDestructor() &&
+              !ClassDecl->isDependentContext()) {
+            CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
+            MarkFunctionReferenced(Param->getLocation(), Destructor);
+            DiagnoseUseOfDecl(Destructor, Param->getLocation());
+          }
+        }
+      }
     }
   }
 
@@ -6784,7 +7415,7 @@
                               Expr *LHS, Expr *RHS) {
   QualType LHSType;
   // PropertyRef on LHS type need be directly obtained from
-  // its declaration as it has a PsuedoType.
+  // its declaration as it has a PseudoType.
   ObjCPropertyRefExpr *PRE
     = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
   if (PRE && !PRE->isImplicitProperty()) {
@@ -7057,21 +7688,16 @@
                              RecordDecl *RD1,
                              RecordDecl *RD2) {
   llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
-  for (RecordDecl::field_iterator Field2 = RD2->field_begin(),
-                                  Field2End = RD2->field_end();
-       Field2 != Field2End; ++Field2) {
-    UnmatchedFields.insert(*Field2);
-  }
+  for (auto *Field2 : RD2->fields())
+    UnmatchedFields.insert(Field2);
 
-  for (RecordDecl::field_iterator Field1 = RD1->field_begin(),
-                                  Field1End = RD1->field_end();
-       Field1 != Field1End; ++Field1) {
+  for (auto *Field1 : RD1->fields()) {
     llvm::SmallPtrSet<FieldDecl *, 8>::iterator
         I = UnmatchedFields.begin(),
         E = UnmatchedFields.end();
 
     for ( ; I != E; ++I) {
-      if (isLayoutCompatible(C, *Field1, *I)) {
+      if (isLayoutCompatible(C, Field1, *I)) {
         bool Result = UnmatchedFields.erase(*I);
         (void) Result;
         assert(Result);
@@ -7236,10 +7862,7 @@
     return false;
 
   if (VD) {
-    for (specific_attr_iterator<TypeTagForDatatypeAttr>
-             I = VD->specific_attr_begin<TypeTagForDatatypeAttr>(),
-             E = VD->specific_attr_end<TypeTagForDatatypeAttr>();
-         I != E; ++I) {
+    if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
       if (I->getArgumentKind() != ArgumentKind) {
         FoundWrongKind = true;
         return false;
@@ -7372,8 +7995,9 @@
 
   if (mismatch)
     Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
-        << ArgumentType << ArgumentKind->getName()
+        << ArgumentType << ArgumentKind
         << TypeInfo.LayoutCompatible << RequiredType
         << ArgumentExpr->getSourceRange()
         << TypeTagExpr->getSourceRange();
 }
+
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index 7a1b36b..d44c04b 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -660,13 +660,10 @@
     return C.getObjCInterfaceType(Iface);
   
   QualType T;
-  if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND))
+  if (const FunctionDecl *Function = ND->getAsFunction())
     T = Function->getCallResultType();
   else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
     T = Method->getSendResultType();
-  else if (const FunctionTemplateDecl *FunTmpl =
-               dyn_cast<FunctionTemplateDecl>(ND))
-    T = FunTmpl->getTemplatedDecl()->getCallResultType();
   else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
     T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
   else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
@@ -700,7 +697,7 @@
     }
 
     if (const FunctionType *Function = T->getAs<FunctionType>()) {
-      T = Function->getResultType();
+      T = Function->getReturnType();
       continue;
     }
 
@@ -1256,9 +1253,9 @@
   public:
     CodeCompletionDeclConsumer(ResultBuilder &Results, DeclContext *CurContext)
       : Results(Results), CurContext(CurContext) { }
-    
-    virtual void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
-                           bool InBaseClass) {
+
+    void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
+                   bool InBaseClass) override {
       bool Accessible = true;
       if (Ctx)
         Accessible = Results.getSema().IsSimplyAccessible(ND, Ctx);
@@ -1784,10 +1781,10 @@
     // know the function is void or not.
     bool isVoid = false;
     if (FunctionDecl *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
-      isVoid = Function->getResultType()->isVoidType();
+      isVoid = Function->getReturnType()->isVoidType();
     else if (ObjCMethodDecl *Method
                                  = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
-      isVoid = Method->getResultType()->isVoidType();
+      isVoid = Method->getReturnType()->isVoidType();
     else if (SemaRef.getCurBlock() && 
              !SemaRef.getCurBlock()->ReturnType.isNull())
       isVoid = SemaRef.getCurBlock()->ReturnType->isVoidType();
@@ -2066,14 +2063,11 @@
     return;
 
   // Determine the type of the declaration (if it has a type).
-  QualType T;  
-  if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND))
-    T = Function->getResultType();
+  QualType T;
+  if (const FunctionDecl *Function = ND->getAsFunction())
+    T = Function->getReturnType();
   else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
-    T = Method->getResultType();
-  else if (const FunctionTemplateDecl *FunTmpl =
-               dyn_cast<FunctionTemplateDecl>(ND))
-    T = FunTmpl->getTemplatedDecl()->getResultType();
+    T = Method->getReturnType();
   else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
     T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
   else if (isa<UnresolvedUsingValueDecl>(ND)) {
@@ -2206,26 +2200,26 @@
   // We have the function prototype behind the block pointer type, as it was
   // written in the source.
   std::string Result;
-  QualType ResultType = Block.getTypePtr()->getResultType();
+  QualType ResultType = Block.getTypePtr()->getReturnType();
   if (!ResultType->isVoidType() || SuppressBlock)
     ResultType.getAsStringInternal(Result, Policy);
 
   // Format the parameter list.
   std::string Params;
-  if (!BlockProto || Block.getNumArgs() == 0) {
+  if (!BlockProto || Block.getNumParams() == 0) {
     if (BlockProto && BlockProto.getTypePtr()->isVariadic())
       Params = "(...)";
     else
       Params = "(void)";
   } else {
     Params += "(";
-    for (unsigned I = 0, N = Block.getNumArgs(); I != N; ++I) {
+    for (unsigned I = 0, N = Block.getNumParams(); I != N; ++I) {
       if (I)
         Params += ", ";
-      Params += FormatFunctionParameter(Context, Policy, Block.getArg(I),
-                                        /*SuppressName=*/false, 
+      Params += FormatFunctionParameter(Context, Policy, Block.getParam(I),
+                                        /*SuppressName=*/false,
                                         /*SuppressBlock=*/true);
-      
+
       if (I == N - 1 && BlockProto.getTypePtr()->isVariadic())
         Params += ", ...";
     }
@@ -2297,7 +2291,7 @@
   if (const FunctionProtoType *Proto 
         = Function->getType()->getAs<FunctionProtoType>())
     if (Proto->isVariadic()) {
-      if (Proto->getNumArgs() == 0)
+      if (Proto->getNumParams() == 0)
         Result.AddPlaceholderChunk("...");
 
       MaybeAddSentinel(Context, Function, Result);
@@ -2644,12 +2638,9 @@
     return Result.TakeString();
   }
 
-  for (Decl::attr_iterator i = ND->attr_begin(); i != ND->attr_end(); ++i) {
-    if (AnnotateAttr *Attr = dyn_cast_or_null<AnnotateAttr>(*i)) {
-      Result.AddAnnotation(Result.getAllocator().CopyString(Attr->getAnnotation()));
-    }
-  }
-  
+  for (const auto *I : ND->specific_attrs<AnnotateAttr>())
+    Result.AddAnnotation(Result.getAllocator().CopyString(I->getAnnotation()));
+
   AddResultTypeChunk(Ctx, Policy, ND, Result);
   
   if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
@@ -2836,9 +2827,8 @@
     // Function without a prototype. Just give the return type and a 
     // highlighted ellipsis.
     const FunctionType *FT = getFunctionType();
-    Result.AddTextChunk(GetCompletionTypeString(FT->getResultType(),
-                                                S.Context, Policy, 
-                                                Result.getAllocator()));
+    Result.AddTextChunk(GetCompletionTypeString(FT->getReturnType(), S.Context,
+                                                Policy, Result.getAllocator()));
     Result.AddChunk(CodeCompletionString::CK_LeftParen);
     Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
     Result.AddChunk(CodeCompletionString::CK_RightParen);
@@ -2849,12 +2839,11 @@
     Result.AddTextChunk(
                     Result.getAllocator().CopyString(FDecl->getNameAsString()));
   else
-    Result.AddTextChunk(
-         Result.getAllocator().CopyString(
-                                  Proto->getResultType().getAsString(Policy)));
-  
+    Result.AddTextChunk(Result.getAllocator().CopyString(
+        Proto->getReturnType().getAsString(Policy)));
+
   Result.AddChunk(CodeCompletionString::CK_LeftParen);
-  unsigned NumParams = FDecl? FDecl->getNumParams() : Proto->getNumArgs();
+  unsigned NumParams = FDecl ? FDecl->getNumParams() : Proto->getNumParams();
   for (unsigned I = 0; I != NumParams; ++I) {
     if (I)
       Result.AddChunk(CodeCompletionString::CK_Comma);
@@ -2866,7 +2855,7 @@
       ArgString = FDecl->getParamDecl(I)->getNameAsString();
       ArgType = FDecl->getParamDecl(I)->getOriginalType();
     } else {
-      ArgType = Proto->getArgType(I);
+      ArgType = Proto->getParamType(I);
     }
     
     ArgType.getAsStringInternal(ArgString, Policy);
@@ -3109,13 +3098,9 @@
   
   // We need to have names for all of the parameters, if we're going to 
   // generate a forwarding call.
-  for (CXXMethodDecl::param_iterator P = Method->param_begin(),
-                                  PEnd = Method->param_end();
-       P != PEnd;
-       ++P) {
-    if (!(*P)->getDeclName())
+  for (auto P : Method->params())
+    if (!P->getDeclName())
       return;
-  }
 
   PrintingPolicy Policy = getCompletionPrintingPolicy(S);
   for (CXXMethodDecl::method_iterator M = Method->begin_overridden_methods(),
@@ -3145,16 +3130,14 @@
                                          Overridden->getNameAsString()));
     Builder.AddChunk(CodeCompletionString::CK_LeftParen);
     bool FirstParam = true;
-    for (CXXMethodDecl::param_iterator P = Method->param_begin(),
-                                    PEnd = Method->param_end();
-         P != PEnd; ++P) {
+    for (auto P : Method->params()) {
       if (FirstParam)
         FirstParam = false;
       else
         Builder.AddChunk(CodeCompletionString::CK_Comma);
 
-      Builder.AddPlaceholderChunk(Results.getAllocator().CopyString( 
-                                        (*P)->getIdentifier()->getName()));
+      Builder.AddPlaceholderChunk(
+          Results.getAllocator().CopyString(P->getIdentifier()->getName()));
     }
     Builder.AddChunk(CodeCompletionString::CK_RightParen);
     Results.AddResult(CodeCompletionResult(Builder.TakeString(),
@@ -3475,32 +3458,26 @@
   Container = getContainerDef(Container);
   
   // Add properties in this container.
-  for (ObjCContainerDecl::prop_iterator P = Container->prop_begin(),
-                                     PEnd = Container->prop_end();
-       P != PEnd;
-       ++P) {
+  for (const auto *P : Container->properties())
     if (AddedProperties.insert(P->getIdentifier()))
-      Results.MaybeAddResult(Result(*P, Results.getBasePriority(*P), 0),
+      Results.MaybeAddResult(Result(P, Results.getBasePriority(P), 0),
                              CurContext);
-  }
   
   // Add nullary methods
   if (AllowNullaryMethods) {
     ASTContext &Context = Container->getASTContext();
     PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
-    for (ObjCContainerDecl::method_iterator M = Container->meth_begin(),
-                                         MEnd = Container->meth_end();
-         M != MEnd; ++M) {
+    for (auto *M : Container->methods()) {
       if (M->getSelector().isUnarySelector())
         if (IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0))
           if (AddedProperties.insert(Name)) {
             CodeCompletionBuilder Builder(Results.getAllocator(),
                                           Results.getCodeCompletionTUInfo());
-            AddResultTypeChunk(Context, Policy, *M, Builder);
+            AddResultTypeChunk(Context, Policy, M, Builder);
             Builder.AddTypedTextChunk(
                             Results.getAllocator().CopyString(Name->getName()));
             
-            Results.MaybeAddResult(Result(Builder.TakeString(), *M,
+            Results.MaybeAddResult(Result(Builder.TakeString(), M,
                                   CCP_MemberDeclaration + CCD_MethodAsProperty),
                                           CurContext);
           }
@@ -3510,27 +3487,20 @@
   
   // Add properties in referenced protocols.
   if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
-    for (ObjCProtocolDecl::protocol_iterator P = Protocol->protocol_begin(),
-                                          PEnd = Protocol->protocol_end();
-         P != PEnd; ++P)
-      AddObjCProperties(*P, AllowCategories, AllowNullaryMethods, CurContext, 
+    for (auto *P : Protocol->protocols())
+      AddObjCProperties(P, AllowCategories, AllowNullaryMethods, CurContext, 
                         AddedProperties, Results);
   } else if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)){
     if (AllowCategories) {
       // Look through categories.
-      for (ObjCInterfaceDecl::known_categories_iterator
-             Cat = IFace->known_categories_begin(),
-             CatEnd = IFace->known_categories_end();
-           Cat != CatEnd; ++Cat)
-        AddObjCProperties(*Cat, AllowCategories, AllowNullaryMethods,
-                          CurContext, AddedProperties, Results);
+      for (auto *Cat : IFace->known_categories())
+        AddObjCProperties(Cat, AllowCategories, AllowNullaryMethods, CurContext,
+                          AddedProperties, Results);
     }
-    
+
     // Look through protocols.
-    for (ObjCInterfaceDecl::all_protocol_iterator
-         I = IFace->all_referenced_protocol_begin(),
-         E = IFace->all_referenced_protocol_end(); I != E; ++I)
-      AddObjCProperties(*I, AllowCategories, AllowNullaryMethods, CurContext,
+    for (auto *I : IFace->all_referenced_protocols())
+      AddObjCProperties(I, AllowCategories, AllowNullaryMethods, CurContext,
                         AddedProperties, Results);
     
     // Look in the superclass.
@@ -3541,10 +3511,8 @@
   } else if (const ObjCCategoryDecl *Category
                                     = dyn_cast<ObjCCategoryDecl>(Container)) {
     // Look through protocols.
-    for (ObjCCategoryDecl::protocol_iterator P = Category->protocol_begin(),
-                                          PEnd = Category->protocol_end(); 
-         P != PEnd; ++P)
-      AddObjCProperties(*P, AllowCategories, AllowNullaryMethods, CurContext,
+    for (auto *P : Category->protocols())
+      AddObjCProperties(P, AllowCategories, AllowNullaryMethods, CurContext,
                         AddedProperties, Results);
   }
 }
@@ -3636,10 +3604,8 @@
                       AddedProperties, Results);
     
     // Add properties from the protocols in a qualified interface.
-    for (ObjCObjectPointerType::qual_iterator I = ObjCPtr->qual_begin(),
-                                              E = ObjCPtr->qual_end();
-         I != E; ++I)
-      AddObjCProperties(*I, true, /*AllowNullaryMethods=*/true, CurContext, 
+    for (auto *I : ObjCPtr->quals())
+      AddObjCProperties(I, true, /*AllowNullaryMethods=*/true, CurContext, 
                         AddedProperties, Results);
   } else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
              (!IsArrow && BaseType->isObjCObjectType())) {
@@ -3809,13 +3775,11 @@
                         CodeCompleter->getCodeCompletionTUInfo(),
                         CodeCompletionContext::CCC_Expression);
   Results.EnterNewScope();
-  for (EnumDecl::enumerator_iterator E = Enum->enumerator_begin(),
-                                  EEnd = Enum->enumerator_end();
-       E != EEnd; ++E) {
-    if (EnumeratorsSeen.count(*E))
+  for (auto *E : Enum->enumerators()) {
+    if (EnumeratorsSeen.count(E))
       continue;
     
-    CodeCompletionResult R(*E, CCP_EnumInCase, Qualifier);
+    CodeCompletionResult R(E, CCP_EnumInCase, Qualifier);
     Results.AddResult(R, CurContext, 0, false);
   }
   Results.ExitScope();
@@ -3833,22 +3797,6 @@
                             Results.data(),Results.size());
 }
 
-namespace {
-  struct IsBetterOverloadCandidate {
-    Sema &S;
-    SourceLocation Loc;
-    
-  public:
-    explicit IsBetterOverloadCandidate(Sema &S, SourceLocation Loc)
-      : S(S), Loc(Loc) { }
-    
-    bool 
-    operator()(const OverloadCandidate &X, const OverloadCandidate &Y) const {
-      return isBetterOverloadCandidate(S, X, Y, Loc);
-    }
-  };
-}
-
 static bool anyNullArguments(ArrayRef<Expr *> Args) {
   if (Args.size() && !Args.data())
     return true;
@@ -3910,9 +3858,12 @@
   
   if (!CandidateSet.empty()) {
     // Sort the overload candidate set by placing the best overloads first.
-    std::stable_sort(CandidateSet.begin(), CandidateSet.end(),
-                     IsBetterOverloadCandidate(*this, Loc));
-  
+    std::stable_sort(
+        CandidateSet.begin(), CandidateSet.end(),
+        [&](const OverloadCandidate &X, const OverloadCandidate &Y) {
+          return isBetterOverloadCandidate(*this, X, Y, Loc);
+        });
+
     // Add the remaining viable overload candidates as code-completion reslults.
     for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
                                      CandEnd = CandidateSet.end();
@@ -3925,12 +3876,13 @@
     for (unsigned I = 0, N = Results.size(); I != N; ++I) {
       if (const FunctionType *FType = Results[I].getFunctionType())
         if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FType))
-          if (Args.size() < Proto->getNumArgs()) {
+          if (Args.size() < Proto->getNumParams()) {
             if (ParamType.isNull())
-              ParamType = Proto->getArgType(Args.size());
+              ParamType = Proto->getParamType(Args.size());
             else if (!Context.hasSameUnqualifiedType(
-                                            ParamType.getNonReferenceType(),
-                       Proto->getArgType(Args.size()).getNonReferenceType())) {
+                          ParamType.getNonReferenceType(),
+                          Proto->getParamType(Args.size())
+                              .getNonReferenceType())) {
               ParamType = QualType();
               break;
             }
@@ -3951,8 +3903,8 @@
     
     if (const FunctionProtoType *Proto
                                   = FunctionType->getAs<FunctionProtoType>()) {
-      if (Args.size() < Proto->getNumArgs())
-        ParamType = Proto->getArgType(Args.size());
+      if (Args.size() < Proto->getNumParams())
+        ParamType = Proto->getParamType(Args.size());
     }
   }
 
@@ -3982,10 +3934,10 @@
     if (BlockScopeInfo *BSI = getCurBlock())
       ResultType = BSI->ReturnType;
   } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(CurContext))
-    ResultType = Function->getResultType();
+    ResultType = Function->getReturnType();
   else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(CurContext))
-    ResultType = Method->getResultType();
-  
+    ResultType = Method->getReturnType();
+
   if (ResultType.isNull())
     CodeCompleteOrdinaryName(S, PCC_Expression);
   else
@@ -4080,7 +4032,7 @@
   
   // The "template" keyword can follow "::" in the grammar, but only
   // put it into the grammar if the nested-name-specifier is dependent.
-  NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
+  NestedNameSpecifier *NNS = SS.getScopeRep();
   if (!Results.empty() && NNS->isDependent())
     Results.AddResult("template");
 
@@ -4274,21 +4226,19 @@
                                 Results.getCodeCompletionTUInfo());
   bool SawLastInitializer = Initializers.empty();
   CXXRecordDecl *ClassDecl = Constructor->getParent();
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
-                                       BaseEnd = ClassDecl->bases_end();
-       Base != BaseEnd; ++Base) {
-    if (!InitializedBases.insert(Context.getCanonicalType(Base->getType()))) {
+  for (const auto &Base : ClassDecl->bases()) {
+    if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))) {
       SawLastInitializer
         = !Initializers.empty() && 
           Initializers.back()->isBaseInitializer() &&
-          Context.hasSameUnqualifiedType(Base->getType(),
+          Context.hasSameUnqualifiedType(Base.getType(),
                QualType(Initializers.back()->getBaseClass(), 0));
       continue;
     }
     
     Builder.AddTypedTextChunk(
                Results.getAllocator().CopyString(
-                          Base->getType().getAsString(Policy)));
+                          Base.getType().getAsString(Policy)));
     Builder.AddChunk(CodeCompletionString::CK_LeftParen);
     Builder.AddPlaceholderChunk("args");
     Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -4299,21 +4249,19 @@
   }
   
   // Add completions for virtual base classes.
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
-                                       BaseEnd = ClassDecl->vbases_end();
-       Base != BaseEnd; ++Base) {
-    if (!InitializedBases.insert(Context.getCanonicalType(Base->getType()))) {
+  for (const auto &Base : ClassDecl->vbases()) {
+    if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))) {
       SawLastInitializer
         = !Initializers.empty() && 
           Initializers.back()->isBaseInitializer() &&
-          Context.hasSameUnqualifiedType(Base->getType(),
+          Context.hasSameUnqualifiedType(Base.getType(),
                QualType(Initializers.back()->getBaseClass(), 0));
       continue;
     }
     
     Builder.AddTypedTextChunk(
                Builder.getAllocator().CopyString(
-                          Base->getType().getAsString(Policy)));
+                          Base.getType().getAsString(Policy)));
     Builder.AddChunk(CodeCompletionString::CK_LeftParen);
     Builder.AddPlaceholderChunk("args");
     Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -4324,14 +4272,12 @@
   }
   
   // Add completions for members.
-  for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
-                                  FieldEnd = ClassDecl->field_end();
-       Field != FieldEnd; ++Field) {
+  for (auto *Field : ClassDecl->fields()) {
     if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))) {
       SawLastInitializer
         = !Initializers.empty() && 
           Initializers.back()->isAnyMemberInitializer() &&
-          Initializers.back()->getAnyMember() == *Field;
+          Initializers.back()->getAnyMember() == Field;
       continue;
     }
     
@@ -4348,7 +4294,7 @@
                                                      : CCP_MemberDeclaration,
                                            CXCursor_MemberRef,
                                            CXAvailability_Available,
-                                           *Field));
+                                           Field));
     SawLastInitializer = false;
   }
   Results.ExitScope();
@@ -4389,9 +4335,8 @@
   
   // Look for other capturable variables.
   for (; S && !isNamespaceScope(S); S = S->getParent()) {
-    for (Scope::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
-         D != DEnd; ++D) {
-      VarDecl *Var = dyn_cast<VarDecl>(*D);
+    for (const auto *D : S->decls()) {
+      const auto *Var = dyn_cast<VarDecl>(D);
       if (!Var ||
           !Var->hasLocalStorage() ||
           Var->hasAttr<BlocksAttr>())
@@ -4759,7 +4704,7 @@
     CodeCompletionBuilder Setter(Results.getAllocator(),
                                  Results.getCodeCompletionTUInfo());
     Setter.AddTypedTextChunk("setter");
-    Setter.AddTextChunk(" = ");
+    Setter.AddTextChunk("=");
     Setter.AddPlaceholderChunk("method");
     Results.AddResult(CodeCompletionResult(Setter.TakeString()));
   }
@@ -4767,7 +4712,7 @@
     CodeCompletionBuilder Getter(Results.getAllocator(),
                                  Results.getCodeCompletionTUInfo());
     Getter.AddTypedTextChunk("getter");
-    Getter.AddTextChunk(" = ");
+    Getter.AddTextChunk("=");
     Getter.AddPlaceholderChunk("method");
     Results.AddResult(CodeCompletionResult(Getter.TakeString()));
   }
@@ -4856,22 +4801,20 @@
   Container = getContainerDef(Container);
   ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container);
   bool isRootClass = IFace && !IFace->getSuperClass();
-  for (ObjCContainerDecl::method_iterator M = Container->meth_begin(),
-                                       MEnd = Container->meth_end();
-       M != MEnd; ++M) {
+  for (auto *M : Container->methods()) {
     // The instance methods on the root class can be messaged via the
     // metaclass.
     if (M->isInstanceMethod() == WantInstanceMethods ||
         (isRootClass && !WantInstanceMethods)) {
       // Check whether the selector identifiers we've been given are a 
       // subset of the identifiers for this particular method.
-      if (!isAcceptableObjCMethod(*M, WantKind, SelIdents, AllowSameLength))
+      if (!isAcceptableObjCMethod(M, WantKind, SelIdents, AllowSameLength))
         continue;
 
       if (!Selectors.insert(M->getSelector()))
         continue;
       
-      Result R = Result(*M, Results.getBasePriority(*M), 0);
+      Result R = Result(M, Results.getBasePriority(M), 0);
       R.StartParameter = SelIdents.size();
       R.AllParametersAreInformative = (WantKind != MK_Any);
       if (!InOriginalClass)
@@ -4897,19 +4840,12 @@
     return;
   
   // Add methods in protocols.
-  for (ObjCInterfaceDecl::protocol_iterator I = IFace->protocol_begin(),
-                                            E = IFace->protocol_end();
-       I != E; ++I)
-    AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
+  for (auto *I : IFace->protocols())
+    AddObjCMethods(I, WantInstanceMethods, WantKind, SelIdents,
                    CurContext, Selectors, AllowSameLength, Results, false);
   
   // Add methods in categories.
-  for (ObjCInterfaceDecl::known_categories_iterator
-         Cat = IFace->known_categories_begin(),
-         CatEnd = IFace->known_categories_end();
-       Cat != CatEnd; ++Cat) {
-    ObjCCategoryDecl *CatDecl = *Cat;
-    
+  for (auto *CatDecl : IFace->known_categories()) {
     AddObjCMethods(CatDecl, WantInstanceMethods, WantKind, SelIdents,
                    CurContext, Selectors, AllowSameLength,
                    Results, InOriginalClass);
@@ -5181,10 +5117,7 @@
 
     // Check in categories or class extensions.
     if (!SuperMethod) {
-      for (ObjCInterfaceDecl::known_categories_iterator
-             Cat = Class->known_categories_begin(),
-             CatEnd = Class->known_categories_end();
-           Cat != CatEnd; ++Cat) {
+      for (const auto *Cat : Class->known_categories()) {
         if ((SuperMethod = Cat->getMethod(CurMethod->getSelector(),
                                                CurMethod->isInstanceMethod())))
           break;
@@ -5536,7 +5469,7 @@
   // If we're messaging an expression with type "id" or "Class", check
   // whether we know something special about the receiver that allows
   // us to assume a more-specific receiver type.
-  if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType())
+  if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType()) {
     if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr)) {
       if (ReceiverType->isObjCClassType())
         return CodeCompleteObjCClassMessage(S, 
@@ -5547,6 +5480,13 @@
       ReceiverType = Context.getObjCObjectPointerType(
                                           Context.getObjCInterfaceType(IFace));
     }
+  } else if (RecExpr && getLangOpts().CPlusPlus) {
+    ExprResult Conv = PerformContextuallyConvertToObjCPointer(RecExpr);
+    if (Conv.isUsable()) {
+      RecExpr = Conv.take();
+      ReceiverType = RecExpr->getType();
+    }
+  }
 
   // Build the set of methods we can see.
   ResultBuilder Results(*this, CodeCompleter->getAllocator(),
@@ -5587,10 +5527,8 @@
   else if (const ObjCObjectPointerType *QualID
              = ReceiverType->getAsObjCQualifiedIdType()) {
     // Search protocols for instance methods.
-    for (ObjCObjectPointerType::qual_iterator I = QualID->qual_begin(),
-                                              E = QualID->qual_end(); 
-         I != E; ++I)
-      AddObjCMethods(*I, true, MK_Any, SelIdents, CurContext,
+    for (auto *I : QualID->quals())
+      AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
                      Selectors, AtArgumentExpression, Results);
   }
   // Handle messages to a pointer to interface type.
@@ -5602,10 +5540,8 @@
                    Results);
     
     // Search protocols for instance methods.
-    for (ObjCObjectPointerType::qual_iterator I = IFacePtr->qual_begin(),
-         E = IFacePtr->qual_end(); 
-         I != E; ++I)
-      AddObjCMethods(*I, true, MK_Any, SelIdents, CurContext,
+    for (auto *I : IFacePtr->quals())
+      AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
                      Selectors, AtArgumentExpression, Results);
   }
   // Handle messages to "id".
@@ -5750,11 +5686,9 @@
                                ResultBuilder &Results) {
   typedef CodeCompletionResult Result;
   
-  for (DeclContext::decl_iterator D = Ctx->decls_begin(), 
-                               DEnd = Ctx->decls_end();
-       D != DEnd; ++D) {
+  for (const auto *D : Ctx->decls()) {
     // Record any protocols we find.
-    if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*D))
+    if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(D))
       if (!OnlyForwardDeclarations || !Proto->hasDefinition())
         Results.AddResult(Result(Proto, Results.getBasePriority(Proto), 0),
                           CurContext, 0, false);
@@ -5818,11 +5752,9 @@
                                 ResultBuilder &Results) {
   typedef CodeCompletionResult Result;
   
-  for (DeclContext::decl_iterator D = Ctx->decls_begin(), 
-                               DEnd = Ctx->decls_end();
-       D != DEnd; ++D) {
+  for (const auto *D : Ctx->decls()) {
     // Record any interfaces we find.
-    if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*D))
+    if (const auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
       if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
           (!OnlyUnimplemented || !Class->getImplementation()))
         Results.AddResult(Result(Class, Results.getBasePriority(Class), 0),
@@ -5909,21 +5841,15 @@
   NamedDecl *CurClass
     = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
   if (ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass)){
-    for (ObjCInterfaceDecl::visible_categories_iterator
-           Cat = Class->visible_categories_begin(),
-           CatEnd = Class->visible_categories_end();
-         Cat != CatEnd; ++Cat) {
+    for (const auto *Cat : Class->visible_categories())
       CategoryNames.insert(Cat->getIdentifier());
-    }
   }
 
   // Add all of the categories we know about.
   Results.EnterNewScope();
   TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
-  for (DeclContext::decl_iterator D = TU->decls_begin(), 
-                               DEnd = TU->decls_end();
-       D != DEnd; ++D) 
-    if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(*D))
+  for (const auto *D : TU->decls()) 
+    if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
       if (CategoryNames.insert(Category->getIdentifier()))
         Results.AddResult(Result(Category, Results.getBasePriority(Category),0),
                           CurContext, 0, false);
@@ -5959,13 +5885,10 @@
   Results.EnterNewScope();
   bool IgnoreImplemented = true;
   while (Class) {
-    for (ObjCInterfaceDecl::visible_categories_iterator
-           Cat = Class->visible_categories_begin(),
-           CatEnd = Class->visible_categories_end();
-         Cat != CatEnd; ++Cat) {
+    for (const auto *Cat : Class->visible_categories()) {
       if ((!IgnoreImplemented || !Cat->getImplementation()) &&
           CategoryNames.insert(Cat->getIdentifier()))
-        Results.AddResult(Result(*Cat, Results.getBasePriority(*Cat), 0),
+        Results.AddResult(Result(Cat, Results.getBasePriority(Cat), 0),
                           CurContext, 0, false);
     }
     
@@ -5994,10 +5917,8 @@
 
   // Ignore any properties that have already been implemented.
   Container = getContainerDef(Container);
-  for (DeclContext::decl_iterator D = Container->decls_begin(),
-                               DEnd = Container->decls_end();
-       D != DEnd; ++D)
-    if (ObjCPropertyImplDecl *PropertyImpl = dyn_cast<ObjCPropertyImplDecl>(*D))
+  for (const auto *D : Container->decls())
+    if (const auto *PropertyImpl = dyn_cast<ObjCPropertyImplDecl>(D))
       Results.Ignore(PropertyImpl->getPropertyDecl());
   
   // Add any properties that we find.
@@ -6144,11 +6065,8 @@
                                KnownMethods, InOriginalClass);
 
     // Add methods from any class extensions and categories.
-    for (ObjCInterfaceDecl::visible_categories_iterator
-           Cat = IFace->visible_categories_begin(),
-           CatEnd = IFace->visible_categories_end();
-         Cat != CatEnd; ++Cat) {
-      FindImplementableMethods(Context, *Cat, WantInstanceMethods, ReturnType,
+    for (auto *Cat : IFace->visible_categories()) {
+      FindImplementableMethods(Context, Cat, WantInstanceMethods, ReturnType,
                                KnownMethods, false);      
     }
 
@@ -6196,16 +6114,14 @@
   // Add methods in this container. This operation occurs last because
   // we want the methods from this container to override any methods
   // we've previously seen with the same selector.
-  for (ObjCContainerDecl::method_iterator M = Container->meth_begin(),
-                                       MEnd = Container->meth_end();
-       M != MEnd; ++M) {
+  for (auto *M : Container->methods()) {
     if (M->isInstanceMethod() == WantInstanceMethods) {
       if (!ReturnType.isNull() &&
-          !Context.hasSameUnqualifiedType(ReturnType, M->getResultType()))
+          !Context.hasSameUnqualifiedType(ReturnType, M->getReturnType()))
         continue;
 
       KnownMethods[M->getSelector()] =
-          KnownMethodsMap::mapped_type(*M, InOriginalClass);
+          KnownMethodsMap::mapped_type(M, InOriginalClass);
     }
   }
 }
@@ -6926,10 +6842,9 @@
     // If the result type was not already provided, add it to the
     // pattern as (type).
     if (ReturnType.isNull())
-      AddObjCPassingTypeChunk(Method->getResultType(),
-                              Method->getObjCDeclQualifier(),
-                              Context, Policy,
-                              Builder); 
+      AddObjCPassingTypeChunk(Method->getReturnType(),
+                              Method->getObjCDeclQualifier(), Context, Policy,
+                              Builder);
 
     Selector Sel = Method->getSelector();
 
@@ -6973,7 +6888,7 @@
       Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
       Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
       Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
-      if (!Method->getResultType()->isVoidType()) {
+      if (!Method->getReturnType()->isVoidType()) {
         // If the result type is not void, add a return clause.
         Builder.AddTextChunk("return");
         Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7011,23 +6926,14 @@
       if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(SearchDecl))
         IFace = Category->getClassInterface();
     
-    if (IFace) {
-      for (ObjCInterfaceDecl::visible_categories_iterator
-             Cat = IFace->visible_categories_begin(),
-             CatEnd = IFace->visible_categories_end();
-           Cat != CatEnd; ++Cat) {
-        Containers.push_back(*Cat);
-      }
-    }
+    if (IFace)
+      for (auto *Cat : IFace->visible_categories())
+        Containers.push_back(Cat);
     
-    for (unsigned I = 0, N = Containers.size(); I != N; ++I) {
-      for (ObjCContainerDecl::prop_iterator P = Containers[I]->prop_begin(),
-                                         PEnd = Containers[I]->prop_end(); 
-           P != PEnd; ++P) {
-        AddObjCKeyValueCompletions(*P, IsInstanceMethod, ReturnType, Context, 
+    for (unsigned I = 0, N = Containers.size(); I != N; ++I)
+      for (auto *P : Containers[I]->properties())
+        AddObjCKeyValueCompletions(P, IsInstanceMethod, ReturnType, Context, 
                                    KnownSelectors, Results);
-      }
-    }
   }
   
   Results.ExitScope();
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 328ce70..43d855c 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -62,24 +62,29 @@
 
 class TypeNameValidatorCCC : public CorrectionCandidateCallback {
  public:
-  TypeNameValidatorCCC(bool AllowInvalid, bool WantClass=false)
-      : AllowInvalidDecl(AllowInvalid), WantClassName(WantClass) {
+  TypeNameValidatorCCC(bool AllowInvalid, bool WantClass=false,
+                       bool AllowTemplates=false)
+      : AllowInvalidDecl(AllowInvalid), WantClassName(WantClass),
+        AllowClassTemplates(AllowTemplates) {
     WantExpressionKeywords = false;
     WantCXXNamedCasts = false;
     WantRemainingKeywords = false;
   }
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
-    if (NamedDecl *ND = candidate.getCorrectionDecl())
-      return (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) &&
-          (AllowInvalidDecl || !ND->isInvalidDecl());
-    else
-      return !WantClassName && candidate.isKeyword();
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
+    if (NamedDecl *ND = candidate.getCorrectionDecl()) {
+      bool IsType = isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
+      bool AllowedTemplate = AllowClassTemplates && isa<ClassTemplateDecl>(ND);
+      return (IsType || AllowedTemplate) &&
+             (AllowInvalidDecl || !ND->isInvalidDecl());
+    }
+    return !WantClassName && candidate.isKeyword();
   }
 
  private:
   bool AllowInvalidDecl;
   bool WantClassName;
+  bool AllowClassTemplates;
 };
 
 }
@@ -381,9 +386,8 @@
     const Type *Ty = SS->getScopeRep()->getAsType();
 
     CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
-    for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
-          BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base)
-      if (Context.hasSameUnqualifiedType(QualType(Ty, 1), Base->getType()))
+    for (const auto &Base : RD->bases())
+      if (Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType()))
         return true;
     return S->isFunctionPrototypeScope();
   } 
@@ -394,13 +398,14 @@
                                    SourceLocation IILoc,
                                    Scope *S,
                                    CXXScopeSpec *SS,
-                                   ParsedType &SuggestedType) {
+                                   ParsedType &SuggestedType,
+                                   bool AllowClassTemplates) {
   // We don't have anything to suggest (yet).
   SuggestedType = ParsedType();
   
   // There may have been a typo in the name of the type. Look up typo
   // results, in case we have something that we can suggest.
-  TypeNameValidatorCCC Validator(false);
+  TypeNameValidatorCCC Validator(false, false, AllowClassTemplates);
   if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(II, IILoc),
                                              LookupOrdinaryName, S, SS,
                                              Validator)) {
@@ -467,11 +472,11 @@
       << II << DC << SS->getRange();
   else if (isDependentScopeSpecifier(*SS)) {
     unsigned DiagID = diag::err_typename_missing;
-    if (getLangOpts().MicrosoftMode && isMicrosoftMissingTypename(SS, S))
+    if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S))
       DiagID = diag::warn_typename_missing;
 
     Diag(SS->getRange().getBegin(), DiagID)
-      << (NestedNameSpecifier *)SS->getScopeRep() << II->getName()
+      << SS->getScopeRep() << II->getName()
       << SourceRange(SS->getRange().getBegin(), IILoc)
       << FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
     SuggestedType = ActOnTypenameType(S, SourceLocation(),
@@ -845,7 +850,8 @@
   // seems likely a type is wanted instead of the non-type that was found.
   bool NextIsOp = NextToken.is(tok::amp) || NextToken.is(tok::star);
   if ((NextToken.is(tok::identifier) ||
-       (NextIsOp && FirstDecl->isFunctionOrFunctionTemplate())) &&
+       (NextIsOp &&
+        FirstDecl->getUnderlyingDecl()->isFunctionOrFunctionTemplate())) &&
       isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
     TypeDecl *Type = Result.getAsSingle<TypeDecl>();
     DiagnoseUseOfDecl(Type, NameLoc);
@@ -962,12 +968,9 @@
 
 
 void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
-  FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
-  if (FunctionTemplateDecl *TFD = dyn_cast_or_null<FunctionTemplateDecl>(D)) {
-    // We assume that the caller has already called
-    // ActOnReenterTemplateScope
-    FD = TFD->getTemplatedDecl();
-  }
+  // We assume that the caller has already called
+  // ActOnReenterTemplateScope so getTemplatedDecl() works.
+  FunctionDecl *FD = D->getAsFunction();
   if (!FD)
     return;
 
@@ -1086,9 +1089,8 @@
 }
 
 bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S,
-                         bool ExplicitInstantiationOrSpecialization) {
-  return IdResolver.isDeclInScope(D, Ctx, S,
-                                  ExplicitInstantiationOrSpecialization);
+                         bool AllowInlineNamespace) {
+  return IdResolver.isDeclInScope(D, Ctx, S, AllowInlineNamespace);
 }
 
 Scope *Sema::getScopeForDeclContext(Scope *S, DeclContext *DC) {
@@ -1108,21 +1110,19 @@
 
 /// Filters out lookup results that don't fall within the given scope
 /// as determined by isDeclInScope.
-void Sema::FilterLookupForScope(LookupResult &R,
-                                DeclContext *Ctx, Scope *S,
+void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
                                 bool ConsiderLinkage,
-                                bool ExplicitInstantiationOrSpecialization) {
+                                bool AllowInlineNamespace) {
   LookupResult::Filter F = R.makeFilter();
   while (F.hasNext()) {
     NamedDecl *D = F.next();
 
-    if (isDeclInScope(D, Ctx, S, ExplicitInstantiationOrSpecialization))
+    if (isDeclInScope(D, Ctx, S, AllowInlineNamespace))
       continue;
 
-    if (ConsiderLinkage &&
-        isOutOfScopePreviousDeclaration(D, Ctx, Context))
+    if (ConsiderLinkage && isOutOfScopePreviousDeclaration(D, Ctx, Context))
       continue;
-    
+
     F.erase();
   }
 
@@ -1173,8 +1173,8 @@
 //
 // When we see foo we don't know if after the typedef we will get 'A' or '*A'
 // for example. If 'A', foo will have external linkage. If we have '*A',
-// foo will have no linkage. Since we can't know untill we get to the end
-// of the typedef, this function finds out if D might have non external linkage.
+// foo will have no linkage. Since we can't know until we get to the end
+// of the typedef, this function finds out if D might have non-external linkage.
 // Callers should verify at the end of the TU if it D has external linkage or
 // not.
 bool Sema::mightHaveNonExternalLinkage(const DeclaratorDecl *D) {
@@ -1204,7 +1204,8 @@
   if (D->isInvalidDecl() || D->isUsed() || D->hasAttr<UnusedAttr>())
     return false;
 
-  // Ignore class templates.
+  // Ignore all entities declared within templates, and out-of-line definitions
+  // of members of class templates.
   if (D->getDeclContext()->isDependentContext() ||
       D->getLexicalDeclContext()->isDependentContext())
     return false;
@@ -1271,7 +1272,8 @@
   if (D->isInvalidDecl())
     return false;
 
-  if (D->isReferenced() || D->isUsed() || D->hasAttr<UnusedAttr>())
+  if (D->isReferenced() || D->isUsed() || D->hasAttr<UnusedAttr>() ||
+      D->hasAttr<ObjCPreciseLifetimeAttr>())
     return false;
 
   if (isa<LabelDecl>(D))
@@ -1374,9 +1376,7 @@
   assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) &&
          "Scope shouldn't contain decls!");
 
-  for (Scope::decl_iterator I = S->decl_begin(), E = S->decl_end();
-       I != E; ++I) {
-    Decl *TmpD = (*I);
+  for (auto *TmpD : S->decls()) {
     assert(TmpD && "This decl didn't get pushed??");
 
     assert(isa<NamedDecl>(TmpD) && "Decl isn't NamedDecl?");
@@ -1395,16 +1395,6 @@
     // Remove this name from our lexical scope.
     IdResolver.RemoveDecl(D);
   }
-  DiagnoseUnusedBackingIvarInAccessor(S);
-}
-
-void Sema::ActOnStartFunctionDeclarator() {
-  ++InFunctionDeclarator;
-}
-
-void Sema::ActOnEndFunctionDeclarator() {
-  assert(InFunctionDeclarator);
-  --InFunctionDeclarator;
 }
 
 /// \brief Look for an Objective-C class in the translation unit.
@@ -1549,6 +1539,7 @@
     LinkageSpecDecl *CLinkageDecl =
         LinkageSpecDecl::Create(Context, Parent, Loc, Loc,
                                 LinkageSpecDecl::lang_c, false);
+    CLinkageDecl->setImplicit();
     Parent->addDecl(CLinkageDecl);
     Parent = CLinkageDecl;
   }
@@ -1565,12 +1556,10 @@
   // FunctionDecl.
   if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
     SmallVector<ParmVarDecl*, 16> Params;
-    for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+    for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
       ParmVarDecl *parm =
-        ParmVarDecl::Create(Context, New, SourceLocation(),
-                            SourceLocation(), 0,
-                            FT->getArgType(i), /*TInfo=*/0,
-                            SC_None, 0);
+          ParmVarDecl::Create(Context, New, SourceLocation(), SourceLocation(),
+                              0, FT->getParamType(i), /*TInfo=*/0, SC_None, 0);
       parm->setScopeInfo(0, i);
       Params.push_back(parm);
     }
@@ -1797,46 +1786,19 @@
 
 /// DeclhasAttr - returns true if decl Declaration already has the target
 /// attribute.
-static bool
-DeclHasAttr(const Decl *D, const Attr *A) {
-  // There can be multiple AvailabilityAttr in a Decl. Make sure we copy
-  // all of them. It is mergeAvailabilityAttr in SemaDeclAttr.cpp that is
-  // responsible for making sure they are consistent.
-  const AvailabilityAttr *AA = dyn_cast<AvailabilityAttr>(A);
-  if (AA)
-    return false;
-
-  // The following thread safety attributes can also be duplicated.
-  switch (A->getKind()) {
-    case attr::ExclusiveLocksRequired:
-    case attr::SharedLocksRequired:
-    case attr::LocksExcluded:
-    case attr::ExclusiveLockFunction:
-    case attr::SharedLockFunction:
-    case attr::UnlockFunction:
-    case attr::ExclusiveTrylockFunction:
-    case attr::SharedTrylockFunction:
-    case attr::GuardedBy:
-    case attr::PtGuardedBy:
-    case attr::AcquiredBefore:
-    case attr::AcquiredAfter:
-      return false;
-    default:
-      ;
-  }
-
+static bool DeclHasAttr(const Decl *D, const Attr *A) {
   const OwnershipAttr *OA = dyn_cast<OwnershipAttr>(A);
   const AnnotateAttr *Ann = dyn_cast<AnnotateAttr>(A);
-  for (Decl::attr_iterator i = D->attr_begin(), e = D->attr_end(); i != e; ++i)
-    if ((*i)->getKind() == A->getKind()) {
+  for (const auto *i : D->attrs())
+    if (i->getKind() == A->getKind()) {
       if (Ann) {
-        if (Ann->getAnnotation() == cast<AnnotateAttr>(*i)->getAnnotation())
+        if (Ann->getAnnotation() == cast<AnnotateAttr>(i)->getAnnotation())
           return true;
         continue;
       }
       // FIXME: Don't hardcode this check
-      if (OA && isa<OwnershipAttr>(*i))
-        return OA->getOwnKind() == cast<OwnershipAttr>(*i)->getOwnKind();
+      if (OA && isa<OwnershipAttr>(i))
+        return OA->getOwnKind() == cast<OwnershipAttr>(i)->getOwnKind();
       return true;
     }
 
@@ -1861,9 +1823,7 @@
   AlignedAttr *OldAlignasAttr = 0;
   AlignedAttr *OldStrictestAlignAttr = 0;
   unsigned OldAlign = 0;
-  for (specific_attr_iterator<AlignedAttr>
-         I = Old->specific_attr_begin<AlignedAttr>(),
-         E = Old->specific_attr_end<AlignedAttr>(); I != E; ++I) {
+  for (auto *I : Old->specific_attrs<AlignedAttr>()) {
     // FIXME: We have no way of representing inherited dependent alignments
     // in a case like:
     //   template<int A, int B> struct alignas(A) X;
@@ -1874,26 +1834,24 @@
       return false;
 
     if (I->isAlignas())
-      OldAlignasAttr = *I;
+      OldAlignasAttr = I;
 
     unsigned Align = I->getAlignment(S.Context);
     if (Align > OldAlign) {
       OldAlign = Align;
-      OldStrictestAlignAttr = *I;
+      OldStrictestAlignAttr = I;
     }
   }
 
   // Look for alignas attributes on New.
   AlignedAttr *NewAlignasAttr = 0;
   unsigned NewAlign = 0;
-  for (specific_attr_iterator<AlignedAttr>
-         I = New->specific_attr_begin<AlignedAttr>(),
-         E = New->specific_attr_end<AlignedAttr>(); I != E; ++I) {
+  for (auto *I : New->specific_attrs<AlignedAttr>()) {
     if (I->isAlignmentDependent())
       return false;
 
     if (I->isAlignas())
-      NewAlignasAttr = *I;
+      NewAlignasAttr = I;
 
     unsigned Align = I->getAlignment(S.Context);
     if (Align > NewAlign)
@@ -1939,9 +1897,9 @@
     //   specifier, any other declaration of that object shall also
     //   have no alignment specifier.
     S.Diag(New->getLocation(), diag::err_alignas_missing_on_definition)
-      << OldAlignasAttr->isC11();
+      << OldAlignasAttr;
     S.Diag(OldAlignasAttr->getLocation(), diag::note_alignas_on_declaration)
-      << OldAlignasAttr->isC11();
+      << OldAlignasAttr;
   }
 
   bool AnyAdded = false;
@@ -1995,11 +1953,15 @@
   else if (SectionAttr *SA = dyn_cast<SectionAttr>(Attr))
     NewAttr = S.mergeSectionAttr(D, SA->getRange(), SA->getName(),
                                  AttrSpellingListIndex);
+  else if (MSInheritanceAttr *IA = dyn_cast<MSInheritanceAttr>(Attr))
+    NewAttr = S.mergeMSInheritanceAttr(D, IA->getRange(), IA->getBestCase(),
+                                       AttrSpellingListIndex,
+                                       IA->getSemanticSpelling());
   else if (isa<AlignedAttr>(Attr))
     // AlignedAttrs are handled separately, because we need to handle all
     // such attributes on a declaration at the same time.
     NewAttr = 0;
-  else if (!DeclHasAttr(D, Attr))
+  else if (Attr->duplicatesAllowed() || !DeclHasAttr(D, Attr))
     NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
 
   if (NewAttr) {
@@ -2029,12 +1991,9 @@
 }
 
 static bool hasAttribute(const Decl *D, attr::Kind Kind) {
-  for (Decl::attr_iterator I = D->attr_begin(), E = D->attr_end();
-       I != E; ++I) {
-    Attr *Attribute = *I;
+  for (const auto *Attribute : D->attrs())
     if (Attribute->getKind() == Kind)
       return true;
-  }
   return false;
 }
 
@@ -2097,9 +2056,9 @@
         //   specifier, any other declaration of that object shall also
         //   have no alignment specifier.
         S.Diag(Def->getLocation(), diag::err_alignas_missing_on_definition)
-          << AA->isC11();
+          << AA;
         S.Diag(NewAttribute->getLocation(), diag::note_alignas_on_declaration)
-          << AA->isC11();
+          << AA;
         NewAttributes.erase(NewAttributes.begin() + I);
         --E;
         continue;
@@ -2138,15 +2097,12 @@
   // we process them.
   if (!foundAny) New->setAttrs(AttrVec());
 
-  for (specific_attr_iterator<InheritableAttr>
-         i = Old->specific_attr_begin<InheritableAttr>(),
-         e = Old->specific_attr_end<InheritableAttr>(); 
-       i != e; ++i) {
+  for (auto *I : Old->specific_attrs<InheritableAttr>()) {
     bool Override = false;
     // Ignore deprecated/unavailable/availability attributes if requested.
-    if (isa<DeprecatedAttr>(*i) ||
-        isa<UnavailableAttr>(*i) ||
-        isa<AvailabilityAttr>(*i)) {
+    if (isa<DeprecatedAttr>(I) ||
+        isa<UnavailableAttr>(I) ||
+        isa<AvailabilityAttr>(I)) {
       switch (AMK) {
       case AMK_None:
         continue;
@@ -2161,10 +2117,10 @@
     }
 
     // Already handled.
-    if (isa<UsedAttr>(*i))
+    if (isa<UsedAttr>(I))
       continue;
 
-    if (mergeDeclAttribute(*this, New, *i, Override))
+    if (mergeDeclAttribute(*this, New, I, Override))
       foundAny = true;
   }
 
@@ -2183,9 +2139,9 @@
   //   The first declaration of a function shall specify the
   //   carries_dependency attribute for its declarator-id if any declaration
   //   of the function specifies the carries_dependency attribute.
-  if (newDecl->hasAttr<CarriesDependencyAttr>() &&
-      !oldDecl->hasAttr<CarriesDependencyAttr>()) {
-    S.Diag(newDecl->getAttr<CarriesDependencyAttr>()->getLocation(),
+  const CarriesDependencyAttr *CDA = newDecl->getAttr<CarriesDependencyAttr>();
+  if (CDA && !oldDecl->hasAttr<CarriesDependencyAttr>()) {
+    S.Diag(CDA->getLocation(),
            diag::err_carries_dependency_missing_on_first_decl) << 1/*Param*/;
     // Find the first declaration of the parameter.
     // FIXME: Should we build redeclaration chains for function parameters?
@@ -2206,12 +2162,10 @@
   // done before we process them.
   if (!foundAny) newDecl->setAttrs(AttrVec());
 
-  for (specific_attr_iterator<InheritableParamAttr>
-       i = oldDecl->specific_attr_begin<InheritableParamAttr>(),
-       e = oldDecl->specific_attr_end<InheritableParamAttr>(); i != e; ++i) {
-    if (!DeclHasAttr(newDecl, *i)) {
+  for (const auto *I : oldDecl->specific_attrs<InheritableParamAttr>()) {
+    if (!DeclHasAttr(newDecl, I)) {
       InheritableAttr *newAttr =
-        cast<InheritableParamAttr>((*i)->clone(S.Context));
+        cast<InheritableParamAttr>(I->clone(S.Context));
       newAttr->setInherited(true);
       newDecl->addAttr(newAttr);
       foundAny = true;
@@ -2298,15 +2252,10 @@
 /// merged with.
 ///
 /// Returns true if there was an error, false otherwise.
-bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S,
-                             bool MergeTypeWithOld) {
+bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
+                             Scope *S, bool MergeTypeWithOld) {
   // Verify the old decl was also a function.
-  FunctionDecl *Old = 0;
-  if (FunctionTemplateDecl *OldFunctionTemplate
-        = dyn_cast<FunctionTemplateDecl>(OldD))
-    Old = OldFunctionTemplate->getTemplatedDecl();
-  else
-    Old = dyn_cast<FunctionDecl>(OldD);
+  FunctionDecl *Old = OldD->getAsFunction();
   if (!Old) {
     if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(OldD)) {
       if (New->getFriendObjectKind()) {
@@ -2318,18 +2267,34 @@
         return true;
       }
 
-      Diag(New->getLocation(), diag::err_using_decl_conflict_reverse);
-      Diag(Shadow->getTargetDecl()->getLocation(),
-           diag::note_using_decl_target);
-      Diag(Shadow->getUsingDecl()->getLocation(),
-           diag::note_using_decl) << 0;
+      // C++11 [namespace.udecl]p14:
+      //   If a function declaration in namespace scope or block scope has the
+      //   same name and the same parameter-type-list as a function introduced
+      //   by a using-declaration, and the declarations do not declare the same
+      //   function, the program is ill-formed.
+
+      // Check whether the two declarations might declare the same function.
+      Old = dyn_cast<FunctionDecl>(Shadow->getTargetDecl());
+      if (Old &&
+          !Old->getDeclContext()->getRedeclContext()->Equals(
+              New->getDeclContext()->getRedeclContext()) &&
+          !(Old->isExternC() && New->isExternC()))
+        Old = 0;
+
+      if (!Old) {
+        Diag(New->getLocation(), diag::err_using_decl_conflict_reverse);
+        Diag(Shadow->getTargetDecl()->getLocation(),
+             diag::note_using_decl_target);
+        Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl) << 0;
+        return true;
+      }
+      OldD = Old;
+    } else {
+      Diag(New->getLocation(), diag::err_redefinition_different_kind)
+        << New->getDeclName();
+      Diag(OldD->getLocation(), diag::note_previous_definition);
       return true;
     }
-
-    Diag(New->getLocation(), diag::err_redefinition_different_kind)
-      << New->getDeclName();
-    Diag(OldD->getLocation(), diag::note_previous_definition);
-    return true;
   }
 
   // If the old declaration is invalid, just give up here.
@@ -2339,11 +2304,14 @@
   // Determine whether the previous declaration was a definition,
   // implicit declaration, or a declaration.
   diag::kind PrevDiag;
+  SourceLocation OldLocation = Old->getLocation();
   if (Old->isThisDeclarationADefinition())
     PrevDiag = diag::note_previous_definition;
-  else if (Old->isImplicit())
+  else if (Old->isImplicit()) {
     PrevDiag = diag::note_previous_implicit_declaration;
-  else
+    if (OldLocation.isInvalid())
+      OldLocation = New->getLocation();
+  } else
     PrevDiag = diag::note_previous_declaration;
 
   // Don't complain about this if we're in GNU89 mode and the old function
@@ -2357,10 +2325,10 @@
       !canRedefineFunction(Old, getLangOpts())) {
     if (getLangOpts().MicrosoftExt) {
       Diag(New->getLocation(), diag::warn_static_non_static) << New;
-      Diag(Old->getLocation(), PrevDiag);
+      Diag(OldLocation, PrevDiag);
     } else {
       Diag(New->getLocation(), diag::err_static_non_static) << New;
-      Diag(Old->getLocation(), PrevDiag);
+      Diag(OldLocation, PrevDiag);
       return true;
     }
   }
@@ -2426,7 +2394,7 @@
       Diag(New->getLocation(), diag::err_regparm_mismatch)
         << NewType->getRegParmType()
         << OldType->getRegParmType();
-      Diag(Old->getLocation(), diag::note_previous_declaration);      
+      Diag(OldLocation, diag::note_previous_declaration);
       return true;
     }
 
@@ -2438,7 +2406,7 @@
   if (OldTypeInfo.getProducesResult() != NewTypeInfo.getProducesResult()) {
     if (NewTypeInfo.getProducesResult()) {
       Diag(New->getLocation(), diag::err_returns_retained_mismatch);
-      Diag(Old->getLocation(), diag::note_previous_declaration);      
+      Diag(OldLocation, diag::note_previous_declaration);
       return true;
     }
     
@@ -2482,12 +2450,14 @@
     //   Redeclarations or specializations of a function or function template
     //   with a declared return type that uses a placeholder type shall also
     //   use that placeholder, not a deduced type.
-    QualType OldDeclaredReturnType = (Old->getTypeSourceInfo()
-      ? Old->getTypeSourceInfo()->getType()->castAs<FunctionType>()
-      : OldType)->getResultType();
-    QualType NewDeclaredReturnType = (New->getTypeSourceInfo()
-      ? New->getTypeSourceInfo()->getType()->castAs<FunctionType>()
-      : NewType)->getResultType();
+    QualType OldDeclaredReturnType =
+        (Old->getTypeSourceInfo()
+             ? Old->getTypeSourceInfo()->getType()->castAs<FunctionType>()
+             : OldType)->getReturnType();
+    QualType NewDeclaredReturnType =
+        (New->getTypeSourceInfo()
+             ? New->getTypeSourceInfo()->getType()->castAs<FunctionType>()
+             : NewType)->getReturnType();
     QualType ResQT;
     if (!Context.hasSameType(OldDeclaredReturnType, NewDeclaredReturnType) &&
         !((NewQType->isDependentType() || OldQType->isDependentType()) &&
@@ -2501,19 +2471,19 @@
                diag::err_member_def_does_not_match_ret_type) << New;
         else
           Diag(New->getLocation(), diag::err_ovl_diff_return_type);
-        Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+        Diag(OldLocation, PrevDiag) << Old << Old->getType();
         return true;
       }
       else
         NewQType = ResQT;
     }
 
-    QualType OldReturnType = OldType->getResultType();
-    QualType NewReturnType = cast<FunctionType>(NewQType)->getResultType();
+    QualType OldReturnType = OldType->getReturnType();
+    QualType NewReturnType = cast<FunctionType>(NewQType)->getReturnType();
     if (OldReturnType != NewReturnType) {
       // If this function has a deduced return type and has already been
       // defined, copy the deduced value from the old declaration.
-      AutoType *OldAT = Old->getResultType()->getContainedAutoType();
+      AutoType *OldAT = Old->getReturnType()->getContainedAutoType();
       if (OldAT && OldAT->isDeduced()) {
         New->setType(
             SubstAutoType(New->getType(),
@@ -2533,8 +2503,8 @@
       NewMethod->setTrivial(OldMethod->isTrivial());
 
       // MSVC allows explicit template specialization at class scope:
-      // 2 CXMethodDecls referring to the same function will be injected.
-      // We don't want a redeclartion error.
+      // 2 CXXMethodDecls referring to the same function will be injected.
+      // We don't want a redeclaration error.
       bool IsClassScopeExplicitSpecialization =
                               OldMethod->isFunctionTemplateSpecialization() &&
                               NewMethod->isFunctionTemplateSpecialization();
@@ -2547,7 +2517,7 @@
         //       is a static member function declaration.
         if (OldMethod->isStatic() != NewMethod->isStatic()) {
           Diag(New->getLocation(), diag::err_ovl_static_nonstatic_member);
-          Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+          Diag(OldLocation, PrevDiag) << Old << Old->getType();
           return true;
         }
 
@@ -2571,7 +2541,7 @@
           Diag(New->getLocation(), diag::err_member_redeclared_in_instantiation)
             << New << New->getType();
         }
-        Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+        Diag(OldLocation, PrevDiag) << Old << Old->getType();
 
       // Complain if this is an explicit declaration of a special
       // member that was initially declared implicitly.
@@ -2599,10 +2569,9 @@
     //   The first declaration of a function shall specify the noreturn
     //   attribute if any declaration of that function specifies the noreturn
     //   attribute.
-    if (New->hasAttr<CXX11NoReturnAttr>() &&
-        !Old->hasAttr<CXX11NoReturnAttr>()) {
-      Diag(New->getAttr<CXX11NoReturnAttr>()->getLocation(),
-           diag::err_noreturn_missing_on_first_decl);
+    const CXX11NoReturnAttr *NRA = New->getAttr<CXX11NoReturnAttr>();
+    if (NRA && !Old->hasAttr<CXX11NoReturnAttr>()) {
+      Diag(NRA->getLocation(), diag::err_noreturn_missing_on_first_decl);
       Diag(Old->getFirstDecl()->getLocation(),
            diag::note_noreturn_missing_first_decl);
     }
@@ -2611,9 +2580,9 @@
     //   The first declaration of a function shall specify the
     //   carries_dependency attribute for its declarator-id if any declaration
     //   of the function specifies the carries_dependency attribute.
-    if (New->hasAttr<CarriesDependencyAttr>() &&
-        !Old->hasAttr<CarriesDependencyAttr>()) {
-      Diag(New->getAttr<CarriesDependencyAttr>()->getLocation(),
+    const CarriesDependencyAttr *CDA = New->getAttr<CarriesDependencyAttr>();
+    if (CDA && !Old->hasAttr<CarriesDependencyAttr>()) {
+      Diag(CDA->getLocation(),
            diag::err_carries_dependency_missing_on_first_decl) << 0/*Function*/;
       Diag(Old->getFirstDecl()->getLocation(),
            diag::note_carries_dependency_missing_first_decl) << 0/*Function*/;
@@ -2645,10 +2614,10 @@
       // Check cautiously as the friend object kind isn't yet complete.
       if (New->getFriendObjectKind() != Decl::FOK_None) {
         Diag(New->getLocation(), diag::ext_retained_language_linkage) << New;
-        Diag(Old->getLocation(), PrevDiag);
+        Diag(OldLocation, PrevDiag);
       } else {
         Diag(New->getLocation(), diag::err_different_language_linkage) << New;
-        Diag(Old->getLocation(), PrevDiag);
+        Diag(OldLocation, PrevDiag);
         return true;
       }
     }
@@ -2679,26 +2648,19 @@
       // The old declaration provided a function prototype, but the
       // new declaration does not. Merge in the prototype.
       assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
-      SmallVector<QualType, 16> ParamTypes(OldProto->arg_type_begin(),
-                                                 OldProto->arg_type_end());
-      NewQType = Context.getFunctionType(NewFuncType->getResultType(),
-                                         ParamTypes,
-                                         OldProto->getExtProtoInfo());
+      SmallVector<QualType, 16> ParamTypes(OldProto->param_types());
+      NewQType =
+          Context.getFunctionType(NewFuncType->getReturnType(), ParamTypes,
+                                  OldProto->getExtProtoInfo());
       New->setType(NewQType);
       New->setHasInheritedPrototype();
 
       // Synthesize a parameter for each argument type.
       SmallVector<ParmVarDecl*, 16> Params;
-      for (FunctionProtoType::arg_type_iterator
-             ParamType = OldProto->arg_type_begin(),
-             ParamEnd = OldProto->arg_type_end();
-           ParamType != ParamEnd; ++ParamType) {
-        ParmVarDecl *Param = ParmVarDecl::Create(Context, New,
-                                                 SourceLocation(),
-                                                 SourceLocation(), 0,
-                                                 *ParamType, /*TInfo=*/0,
-                                                 SC_None,
-                                                 0);
+      for (const auto &ParamType : OldProto->param_types()) {
+        ParmVarDecl *Param = ParmVarDecl::Create(Context, New, SourceLocation(),
+                                                 SourceLocation(), 0, ParamType,
+                                                 /*TInfo=*/0, SC_None, 0);
         Param->setScopeInfo(0, Params.size());
         Param->setImplicit();
         Params.push_back(Param);
@@ -2733,21 +2695,21 @@
       = New->getType()->getAs<FunctionProtoType>();
 
     // Determine whether this is the GNU C extension.
-    QualType MergedReturn = Context.mergeTypes(OldProto->getResultType(),
-                                               NewProto->getResultType());
+    QualType MergedReturn = Context.mergeTypes(OldProto->getReturnType(),
+                                               NewProto->getReturnType());
     bool LooseCompatible = !MergedReturn.isNull();
     for (unsigned Idx = 0, End = Old->getNumParams();
          LooseCompatible && Idx != End; ++Idx) {
       ParmVarDecl *OldParm = Old->getParamDecl(Idx);
       ParmVarDecl *NewParm = New->getParamDecl(Idx);
       if (Context.typesAreCompatible(OldParm->getType(),
-                                     NewProto->getArgType(Idx))) {
+                                     NewProto->getParamType(Idx))) {
         ArgTypes.push_back(NewParm->getType());
       } else if (Context.typesAreCompatible(OldParm->getType(),
                                             NewParm->getType(),
                                             /*CompareUnqualified=*/true)) {
-        GNUCompatibleParamWarning Warn
-          = { OldParm, NewParm, NewProto->getArgType(Idx) };
+        GNUCompatibleParamWarning Warn = { OldParm, NewParm,
+                                           NewProto->getParamType(Idx) };
         Warnings.push_back(Warn);
         ArgTypes.push_back(NewParm->getType());
       } else
@@ -2785,7 +2747,7 @@
     // or 'printf', just warn about the incompatible redeclaration.
     if (Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) {
       Diag(New->getLocation(), diag::warn_redecl_library_builtin) << New;
-      Diag(Old->getLocation(), diag::note_previous_builtin_declaration)
+      Diag(OldLocation, diag::note_previous_builtin_declaration)
         << Old << Old->getType();
 
       // If this is a global redeclaration, just forget hereafter
@@ -2806,7 +2768,7 @@
   }
 
   Diag(New->getLocation(), diag::err_conflicting_types) << New->getDeclName();
-  Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+  Diag(OldLocation, PrevDiag) << Old << Old->getType();
   return true;
 }
 
@@ -3003,14 +2965,17 @@
   if (New->isInvalidDecl())
     return;
 
+  VarTemplateDecl *NewTemplate = New->getDescribedVarTemplate();
+
   // Verify the old decl was also a variable or variable template.
   VarDecl *Old = 0;
-  if (Previous.isSingleResult() &&
-      (Old = dyn_cast<VarDecl>(Previous.getFoundDecl()))) {
-    if (New->getDescribedVarTemplate())
-      Old = Old->getDescribedVarTemplate() ? Old : 0;
-    else
-      Old = Old->getDescribedVarTemplate() ? 0 : Old;
+  VarTemplateDecl *OldTemplate = 0;
+  if (Previous.isSingleResult()) {
+    if (NewTemplate) {
+      OldTemplate = dyn_cast<VarTemplateDecl>(Previous.getFoundDecl());
+      Old = OldTemplate ? OldTemplate->getTemplatedDecl() : 0;
+    } else
+      Old = dyn_cast<VarDecl>(Previous.getFoundDecl());
   }
   if (!Old) {
     Diag(New->getLocation(), diag::err_redefinition_different_kind)
@@ -3023,6 +2988,13 @@
   if (!shouldLinkPossiblyHiddenDecl(Old, New))
     return;
 
+  // Ensure the template parameters are compatible.
+  if (NewTemplate &&
+      !TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
+                                      OldTemplate->getTemplateParameters(),
+                                      /*Complain=*/true, TPL_TemplateMatch))
+    return;
+
   // C++ [class.mem]p1:
   //   A member shall not be declared twice in the member-specification [...]
   // 
@@ -3037,9 +3009,9 @@
   mergeDeclAttributes(New, Old);
   // Warn if an already-declared variable is made a weak_import in a subsequent 
   // declaration
-  if (New->getAttr<WeakImportAttr>() &&
+  if (New->hasAttr<WeakImportAttr>() &&
       Old->getStorageClass() == SC_None &&
-      !Old->getAttr<WeakImportAttr>()) {
+      !Old->hasAttr<WeakImportAttr>()) {
     Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
     Diag(Old->getLocation(), diag::note_previous_definition);
     // Remove weak_import attribute on new declaration.
@@ -3148,14 +3120,13 @@
 
   // Keep a chain of previous declarations.
   New->setPreviousDecl(Old);
+  if (NewTemplate)
+    NewTemplate->setPreviousDecl(OldTemplate);
 
   // Inherit access appropriately.
   New->setAccess(Old->getAccess());
-
-  if (VarTemplateDecl *VTD = New->getDescribedVarTemplate()) {
-    if (New->isStaticDataMember() && New->isOutOfLine())
-      VTD->setAccess(New->getAccess());
-  }
+  if (NewTemplate)
+    NewTemplate->setAccess(New->getAccess());
 }
 
 /// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
@@ -3165,7 +3136,7 @@
   return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg());
 }
 
-static void HandleTagNumbering(Sema &S, const TagDecl *Tag) {
+static void HandleTagNumbering(Sema &S, const TagDecl *Tag, Scope *TagScope) {
   if (!S.Context.getLangOpts().CPlusPlus)
     return;
 
@@ -3176,7 +3147,8 @@
       return;
     MangleNumberingContext &MCtx =
         S.Context.getManglingNumberContext(Tag->getParent());
-    S.Context.setManglingNumber(Tag, MCtx.getManglingNumber(Tag));
+    S.Context.setManglingNumber(
+        Tag, MCtx.getManglingNumber(Tag, TagScope->getMSLocalManglingNumber()));
     return;
   }
 
@@ -3185,7 +3157,9 @@
   if (MangleNumberingContext *MCtx =
           S.getCurrentMangleNumberContext(Tag->getDeclContext(),
                                           ManglingContextDecl)) {
-    S.Context.setManglingNumber(Tag, MCtx->getManglingNumber(Tag));
+    S.Context.setManglingNumber(
+        Tag,
+        MCtx->getManglingNumber(Tag, TagScope->getMSLocalManglingNumber()));
   }
 }
 
@@ -3218,7 +3192,7 @@
   }
 
   if (Tag) {
-    HandleTagNumbering(*this, Tag);
+    HandleTagNumbering(*this, Tag, S);
     Tag->setFreeStanding();
     if (Tag->isInvalidDecl())
       return Tag;
@@ -3285,7 +3259,7 @@
         DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
       if (getLangOpts().CPlusPlus ||
           Record->getDeclContext()->isRecord())
-        return BuildAnonymousStructOrUnion(S, DS, AS, Record);
+        return BuildAnonymousStructOrUnion(S, DS, AS, Record, Context.getPrintingPolicy());
 
       DeclaresAnything = false;
     }
@@ -3471,12 +3445,10 @@
   bool Invalid = false;
 
   // Look every FieldDecl and IndirectFieldDecl with a name.
-  for (RecordDecl::decl_iterator D = AnonRecord->decls_begin(),
-                               DEnd = AnonRecord->decls_end();
-       D != DEnd; ++D) {
-    if ((isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D)) &&
-        cast<NamedDecl>(*D)->getDeclName()) {
-      ValueDecl *VD = cast<ValueDecl>(*D);
+  for (auto *D : AnonRecord->decls()) {
+    if ((isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) &&
+        cast<NamedDecl>(D)->getDeclName()) {
+      ValueDecl *VD = cast<ValueDecl>(D);
       if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
                                        VD->getLocation(), diagKind)) {
         // C++ [class.union]p2:
@@ -3492,9 +3464,8 @@
         //   anonymous union is declared.
         unsigned OldChainingSize = Chaining.size();
         if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD))
-          for (IndirectFieldDecl::chain_iterator PI = IF->chain_begin(),
-               PE = IF->chain_end(); PI != PE; ++PI)
-            Chaining.push_back(*PI);
+          for (auto *PI : IF->chain())
+            Chaining.push_back(PI);
         else
           Chaining.push_back(VD);
 
@@ -3549,13 +3520,45 @@
   llvm_unreachable("unknown storage class specifier");
 }
 
+static SourceLocation findDefaultInitializer(const CXXRecordDecl *Record) {
+  assert(Record->hasInClassInitializer());
+
+  for (const auto *I : Record->decls()) {
+    const auto *FD = dyn_cast<FieldDecl>(I);
+    if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I))
+      FD = IFD->getAnonField();
+    if (FD && FD->hasInClassInitializer())
+      return FD->getLocation();
+  }
+
+  llvm_unreachable("couldn't find in-class initializer");
+}
+
+static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
+                                      SourceLocation DefaultInitLoc) {
+  if (!Parent->isUnion() || !Parent->hasInClassInitializer())
+    return;
+
+  S.Diag(DefaultInitLoc, diag::err_multiple_mem_union_initialization);
+  S.Diag(findDefaultInitializer(Parent), diag::note_previous_initializer) << 0;
+}
+
+static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
+                                      CXXRecordDecl *AnonUnion) {
+  if (!Parent->isUnion() || !Parent->hasInClassInitializer())
+    return;
+
+  checkDuplicateDefaultInit(S, Parent, findDefaultInitializer(AnonUnion));
+}
+
 /// BuildAnonymousStructOrUnion - Handle the declaration of an
 /// anonymous structure or union. Anonymous unions are a C++ feature
 /// (C++ [class.union]) and a C11 feature; anonymous structures
 /// are a C11 feature and GNU C++ extension.
 Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
-                                             AccessSpecifier AS,
-                                             RecordDecl *Record) {
+                                        AccessSpecifier AS,
+                                        RecordDecl *Record,
+                                        const PrintingPolicy &Policy) {
   DeclContext *Owner = Record->getDeclContext();
 
   // Diagnose whether this anonymous struct/union is an extension.
@@ -3585,7 +3588,7 @@
   
         // Recover by adding 'static'.
         DS.SetStorageClassSpec(*this, DeclSpec::SCS_static, SourceLocation(),
-                               PrevSpec, DiagID);
+                               PrevSpec, DiagID, Policy);
       }
       // C++ [class.union]p6:
       //   A storage class is not allowed in a declaration of an
@@ -3599,7 +3602,7 @@
         // Recover by removing the storage specifier.
         DS.SetStorageClassSpec(*this, DeclSpec::SCS_unspecified, 
                                SourceLocation(),
-                               PrevSpec, DiagID);
+                               PrevSpec, DiagID, Context.getPrintingPolicy());
       }
     }
 
@@ -3632,10 +3635,8 @@
     //   The member-specification of an anonymous union shall only
     //   define non-static data members. [Note: nested types and
     //   functions cannot be declared within an anonymous union. ]
-    for (DeclContext::decl_iterator Mem = Record->decls_begin(),
-                                 MemEnd = Record->decls_end();
-         Mem != MemEnd; ++Mem) {
-      if (FieldDecl *FD = dyn_cast<FieldDecl>(*Mem)) {
+    for (auto *Mem : Record->decls()) {
+      if (auto *FD = dyn_cast<FieldDecl>(Mem)) {
         // C++ [class.union]p3:
         //   An anonymous union shall not have private or protected
         //   members (clause 11).
@@ -3653,14 +3654,14 @@
         //   array of such objects.
         if (CheckNontrivialField(FD))
           Invalid = true;
-      } else if ((*Mem)->isImplicit()) {
+      } else if (Mem->isImplicit()) {
         // Any implicit members are fine.
-      } else if (isa<TagDecl>(*Mem) && (*Mem)->getDeclContext() != Record) {
+      } else if (isa<TagDecl>(Mem) && Mem->getDeclContext() != Record) {
         // This is a type that showed up in an
         // elaborated-type-specifier inside the anonymous struct or
         // union, but which actually declares a type outside of the
         // anonymous struct or union. It's okay.
-      } else if (RecordDecl *MemRecord = dyn_cast<RecordDecl>(*Mem)) {
+      } else if (auto *MemRecord = dyn_cast<RecordDecl>(Mem)) {
         if (!MemRecord->isAnonymousStructOrUnion() &&
             MemRecord->getDeclName()) {
           // Visual C++ allows type definition in anonymous struct or union.
@@ -3681,31 +3682,39 @@
                diag::ext_anonymous_record_with_anonymous_type)
             << (int)Record->isUnion();
         }
-      } else if (isa<AccessSpecDecl>(*Mem)) {
+      } else if (isa<AccessSpecDecl>(Mem)) {
         // Any access specifier is fine.
       } else {
         // We have something that isn't a non-static data
         // member. Complain about it.
         unsigned DK = diag::err_anonymous_record_bad_member;
-        if (isa<TypeDecl>(*Mem))
+        if (isa<TypeDecl>(Mem))
           DK = diag::err_anonymous_record_with_type;
-        else if (isa<FunctionDecl>(*Mem))
+        else if (isa<FunctionDecl>(Mem))
           DK = diag::err_anonymous_record_with_function;
-        else if (isa<VarDecl>(*Mem))
+        else if (isa<VarDecl>(Mem))
           DK = diag::err_anonymous_record_with_static;
         
         // Visual C++ allows type definition in anonymous struct or union.
         if (getLangOpts().MicrosoftExt &&
             DK == diag::err_anonymous_record_with_type)
-          Diag((*Mem)->getLocation(), diag::ext_anonymous_record_with_type)
+          Diag(Mem->getLocation(), diag::ext_anonymous_record_with_type)
             << (int)Record->isUnion();
         else {
-          Diag((*Mem)->getLocation(), DK)
+          Diag(Mem->getLocation(), DK)
               << (int)Record->isUnion();
           Invalid = true;
         }
       }
     }
+
+    // C++11 [class.union]p8 (DR1460):
+    //   At most one variant member of a union may have a
+    //   brace-or-equal-initializer.
+    if (cast<CXXRecordDecl>(Record)->hasInClassInitializer() &&
+        Owner->isRecord())
+      checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Owner),
+                                cast<CXXRecordDecl>(Record));
   }
 
   if (!Record->isUnion() && !Owner->isRecord()) {
@@ -3758,11 +3767,14 @@
   }
   Anon->setImplicit();
 
+  // Mark this as an anonymous struct/union type.
+  Record->setAnonymousStructOrUnion(true);
+
   // Add the anonymous struct/union object to the current
   // context. We'll be referencing this object when we refer to one of
   // its members.
   Owner->addDecl(Anon);
-  
+
   // Inject the members of the anonymous struct/union into the owning
   // context and into the identifier resolver chain for name lookup
   // purposes.
@@ -3773,13 +3785,17 @@
                                           Chain, false))
     Invalid = true;
 
-  // Mark this as an anonymous struct/union type. Note that we do not
-  // do this until after we have already checked and injected the
-  // members of this anonymous struct/union type, because otherwise
-  // the members could be injected twice: once by DeclContext when it
-  // builds its lookup table, and once by
-  // InjectAnonymousStructOrUnionMembers.
-  Record->setAnonymousStructOrUnion(true);
+  if (VarDecl *NewVD = dyn_cast<VarDecl>(Anon)) {
+    if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) {
+      Decl *ManglingContextDecl;
+      if (MangleNumberingContext *MCtx =
+              getCurrentMangleNumberContext(NewVD->getDeclContext(),
+                                            ManglingContextDecl)) {
+        Context.setManglingNumber(NewVD, MCtx->getManglingNumber(NewVD, S->getMSLocalManglingNumber()));
+        Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD));
+      }
+    }
+  }
 
   if (Invalid)
     Anon->setInvalidDecl();
@@ -4117,33 +4133,31 @@
 /// \returns true if we cannot safely recover from this error, false otherwise.
 bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
                                         DeclarationName Name,
-                                      SourceLocation Loc) {
+                                        SourceLocation Loc) {
   DeclContext *Cur = CurContext;
   while (isa<LinkageSpecDecl>(Cur) || isa<CapturedDecl>(Cur))
     Cur = Cur->getParent();
-  
-  // C++ [dcl.meaning]p1:
-  //   A declarator-id shall not be qualified except for the definition
-  //   of a member function (9.3) or static data member (9.4) outside of
-  //   its class, the definition or explicit instantiation of a function 
-  //   or variable member of a namespace outside of its namespace, or the
-  //   definition of an explicit specialization outside of its namespace,
-  //   or the declaration of a friend function that is a member of 
-  //   another class or namespace (11.3). [...]
-    
-  // The user provided a superfluous scope specifier that refers back to the
-  // class or namespaces in which the entity is already declared.
+
+  // If the user provided a superfluous scope specifier that refers back to the
+  // class in which the entity is already declared, diagnose and ignore it.
   //
   // class X {
   //   void X::f();
   // };
+  //
+  // Note, it was once ill-formed to give redundant qualification in all
+  // contexts, but that rule was removed by DR482.
   if (Cur->Equals(DC)) {
-    Diag(Loc, LangOpts.MicrosoftExt? diag::warn_member_extra_qualification
-                                   : diag::err_member_extra_qualification)
-      << Name << FixItHint::CreateRemoval(SS.getRange());
-    SS.clear();
+    if (Cur->isRecord()) {
+      Diag(Loc, LangOpts.MicrosoftExt ? diag::warn_member_extra_qualification
+                                      : diag::err_member_extra_qualification)
+        << Name << FixItHint::CreateRemoval(SS.getRange());
+      SS.clear();
+    } else {
+      Diag(Loc, diag::warn_namespace_member_extra_qualification) << Name;
+    }
     return false;
-  } 
+  }
 
   // Check whether the qualifying scope encloses the scope of the original
   // declaration.
@@ -4239,7 +4253,7 @@
       // and return early, to avoid the coming semantic disaster.
       Diag(D.getIdentifierLoc(),
            diag::err_template_qualified_declarator_no_match)
-        << (NestedNameSpecifier*)D.getCXXScopeSpec().getScopeRep()
+        << D.getCXXScopeSpec().getScopeRep()
         << D.getCXXScopeSpec().getRange();
       return 0;
     }
@@ -4670,8 +4684,8 @@
                            LookupResult &Previous, bool &Redeclaration) {
   // Merge the decl with the existing one if appropriate. If the decl is
   // in an outer scope, it isn't the same thing.
-  FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/ false,
-                       /*ExplicitInstantiationOrSpecialization=*/false);
+  FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/false,
+                       /*AllowInlineNamespace*/false);
   filterNonConflictingPreviousDecls(Context, NewTD, Previous);
   if (!Previous.empty()) {
     Redeclaration = true;
@@ -4804,6 +4818,10 @@
 }
 
 static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
+  // Ensure that an auto decl is deduced otherwise the checks below might cache
+  // the wrong linkage.
+  assert(S.ParsingInitForAutoVars.count(&ND) == 0);
+
   // 'weak' only applies to declarations with external linkage.
   if (WeakAttr *Attr = ND.getAttr<WeakAttr>()) {
     if (!ND.isExternallyVisible()) {
@@ -4826,6 +4844,72 @@
       ND.dropAttr<SelectAnyAttr>();
     }
   }
+
+  // dll attributes require external linkage.
+  if (const DLLImportAttr *Attr = ND.getAttr<DLLImportAttr>()) {
+    if (!ND.isExternallyVisible()) {
+      S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
+        << &ND << Attr;
+      ND.setInvalidDecl();
+    }
+  }
+  if (const DLLExportAttr *Attr = ND.getAttr<DLLExportAttr>()) {
+    if (!ND.isExternallyVisible()) {
+      S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
+        << &ND << Attr;
+      ND.setInvalidDecl();
+    }
+  }
+}
+
+static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
+                                           NamedDecl *NewDecl,
+                                           bool IsSpecialization) {
+  if (TemplateDecl *OldTD = dyn_cast<TemplateDecl>(OldDecl))
+    OldDecl = OldTD->getTemplatedDecl();
+  if (TemplateDecl *NewTD = dyn_cast<TemplateDecl>(NewDecl))
+    NewDecl = NewTD->getTemplatedDecl();
+
+  if (!OldDecl || !NewDecl)
+      return;
+
+  const DLLImportAttr *OldImportAttr = OldDecl->getAttr<DLLImportAttr>();
+  const DLLExportAttr *OldExportAttr = OldDecl->getAttr<DLLExportAttr>();
+  const DLLImportAttr *NewImportAttr = NewDecl->getAttr<DLLImportAttr>();
+  const DLLExportAttr *NewExportAttr = NewDecl->getAttr<DLLExportAttr>();
+
+  // dllimport and dllexport are inheritable attributes so we have to exclude
+  // inherited attribute instances.
+  bool HasNewAttr = (NewImportAttr && !NewImportAttr->isInherited()) ||
+                    (NewExportAttr && !NewExportAttr->isInherited());
+
+  // A redeclaration is not allowed to add a dllimport or dllexport attribute,
+  // the only exception being explicit specializations.
+  // Implicitly generated declarations are also excluded for now because there
+  // is no other way to switch these to use dllimport or dllexport.
+  bool AddsAttr = !(OldImportAttr || OldExportAttr) && HasNewAttr;
+  if (AddsAttr && !IsSpecialization && !OldDecl->isImplicit()) {
+    S.Diag(NewDecl->getLocation(), diag::err_attribute_dll_redeclaration)
+      << NewDecl
+      << (NewImportAttr ? (const Attr *)NewImportAttr : NewExportAttr);
+    S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
+    NewDecl->setInvalidDecl();
+    return;
+  }
+
+  // A redeclaration is not allowed to drop a dllimport attribute, the only
+  // exception being inline function definitions.
+  // FIXME: Handle inline functions.
+  // NB: MSVC converts such a declaration to dllexport.
+  if (OldImportAttr && !HasNewAttr) {
+    S.Diag(NewDecl->getLocation(),
+           diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
+      << NewDecl << OldImportAttr;
+    S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
+    S.Diag(OldImportAttr->getLocation(), diag::note_previous_attribute);
+    OldDecl->dropAttr<DLLImportAttr>();
+    NewDecl->dropAttr<DLLImportAttr>();
+  }
 }
 
 /// Given that we are within the definition of the given function,
@@ -4906,6 +4990,31 @@
   llvm_unreachable("Unexpected context");
 }
 
+static bool hasParsedAttr(Scope *S, const AttributeList *AttrList,
+                          AttributeList::Kind Kind) {
+  for (const AttributeList *L = AttrList; L; L = L->getNext())
+    if (L->getKind() == Kind)
+      return true;
+  return false;
+}
+
+static bool hasParsedAttr(Scope *S, const Declarator &PD,
+                          AttributeList::Kind Kind) {
+  // Check decl attributes on the DeclSpec.
+  if (hasParsedAttr(S, PD.getDeclSpec().getAttributes().getList(), Kind))
+    return true;
+
+  // Walk the declarator structure, checking decl attributes that were in a type
+  // position to the decl itself.
+  for (unsigned I = 0, E = PD.getNumTypeObjects(); I != E; ++I) {
+    if (hasParsedAttr(S, PD.getTypeObject(I).getAttrs(), Kind))
+      return true;
+  }
+
+  // Finally, check attributes on the decl itself.
+  return hasParsedAttr(S, PD.getAttributes(), Kind);
+}
+
 /// Adjust the \c DeclContext for a function or variable that might be a
 /// function-local external declaration.
 bool Sema::adjustContextForLocalExternDecl(DeclContext *&DC) {
@@ -4942,16 +5051,35 @@
   VarDecl::StorageClass SC =
     StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
 
+  // dllimport globals without explicit storage class are treated as extern. We
+  // have to change the storage class this early to get the right DeclContext.
+  if (SC == SC_None && !DC->isRecord() &&
+      hasParsedAttr(S, D, AttributeList::AT_DLLImport))
+    SC = SC_Extern;
+
   DeclContext *OriginalDC = DC;
   bool IsLocalExternDecl = SC == SC_Extern &&
                            adjustContextForLocalExternDecl(DC);
 
-  if (getLangOpts().OpenCL && !getOpenCLOptions().cl_khr_fp16) {
-    // OpenCL v1.2 s6.1.1.1: reject declaring variables of the half and
-    // half array type (unless the cl_khr_fp16 extension is enabled).
-    if (Context.getBaseElementType(R)->isHalfType()) {
-      Diag(D.getIdentifierLoc(), diag::err_opencl_half_declaration) << R;
-      D.setInvalidType();
+  if (getLangOpts().OpenCL) {
+    // OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
+    QualType NR = R;
+    while (NR->isPointerType()) {
+      if (NR->isFunctionPointerType()) {
+        Diag(D.getIdentifierLoc(), diag::err_opencl_function_pointer_variable);
+        D.setInvalidType();
+        break;
+      }
+      NR = NR->getPointeeType();
+    }
+
+    if (!getOpenCLOptions().cl_khr_fp16) {
+      // OpenCL v1.2 s6.1.1.1: reject declaring variables of the half and
+      // half array type (unless the cl_khr_fp16 extension is enabled).
+      if (Context.getBaseElementType(R)->isHalfType()) {
+        Diag(D.getIdentifierLoc(), diag::err_opencl_half_declaration) << R;
+        D.setInvalidType();
+      }
     }
   }
 
@@ -5033,9 +5161,9 @@
   bool IsVariableTemplateSpecialization = false;
   bool IsPartialSpecialization = false;
   bool IsVariableTemplate = false;
-  VarTemplateDecl *PrevVarTemplate = 0;
   VarDecl *NewVD = 0;
   VarTemplateDecl *NewTemplate = 0;
+  TemplateParameterList *TemplateParams = 0;
   if (!getLangOpts().CPlusPlus) {
     NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
                             D.getIdentifierLoc(), II,
@@ -5097,104 +5225,15 @@
       }
     }
 
-    NamedDecl *PrevDecl = 0;
-    if (Previous.begin() != Previous.end())
-      PrevDecl = (*Previous.begin())->getUnderlyingDecl();
-    PrevVarTemplate = dyn_cast_or_null<VarTemplateDecl>(PrevDecl);
-
     // Match up the template parameter lists with the scope specifier, then
     // determine whether we have a template or a template specialization.
-    TemplateParameterList *TemplateParams =
-        MatchTemplateParametersToScopeSpecifier(
-            D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
-            D.getCXXScopeSpec(), TemplateParamLists,
-            /*never a friend*/ false, IsExplicitSpecialization, Invalid);
-    if (TemplateParams) {
-      if (!TemplateParams->size() &&
-          D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
-        // There is an extraneous 'template<>' for this variable. Complain
-        // about it, but allow the declaration of the variable.
-        Diag(TemplateParams->getTemplateLoc(),
-             diag::err_template_variable_noparams)
-          << II
-          << SourceRange(TemplateParams->getTemplateLoc(),
-                         TemplateParams->getRAngleLoc());
-      } else {
-        // Only C++1y supports variable templates (N3651).
-        Diag(D.getIdentifierLoc(),
-             getLangOpts().CPlusPlus1y
-                 ? diag::warn_cxx11_compat_variable_template
-                 : diag::ext_variable_template);
+    TemplateParams = MatchTemplateParametersToScopeSpecifier(
+        D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
+        D.getCXXScopeSpec(), TemplateParamLists,
+        /*never a friend*/ false, IsExplicitSpecialization, Invalid);
 
-        if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
-          // This is an explicit specialization or a partial specialization.
-          // Check that we can declare a specialization here
-
-          IsVariableTemplateSpecialization = true;
-          IsPartialSpecialization = TemplateParams->size() > 0;
-
-        } else { // if (TemplateParams->size() > 0)
-          // This is a template declaration.
-          IsVariableTemplate = true;
-
-          // Check that we can declare a template here.
-          if (CheckTemplateDeclScope(S, TemplateParams))
-            return 0;
-
-          // If there is a previous declaration with the same name, check
-          // whether this is a valid redeclaration.
-          if (PrevDecl && !isDeclInScope(PrevDecl, DC, S))
-            PrevDecl = PrevVarTemplate = 0;
-
-          if (PrevVarTemplate) {
-            // Ensure that the template parameter lists are compatible.
-            if (!TemplateParameterListsAreEqual(
-                    TemplateParams, PrevVarTemplate->getTemplateParameters(),
-                    /*Complain=*/true, TPL_TemplateMatch))
-              return 0;
-          } else if (PrevDecl && PrevDecl->isTemplateParameter()) {
-            // Maybe we will complain about the shadowed template parameter.
-            DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
-
-            // Just pretend that we didn't see the previous declaration.
-            PrevDecl = 0;
-          } else if (PrevDecl) {
-            // C++ [temp]p5:
-            // ... a template name declared in namespace scope or in class
-            // scope shall be unique in that scope.
-            Diag(D.getIdentifierLoc(), diag::err_redefinition_different_kind)
-                << Name;
-            Diag(PrevDecl->getLocation(), diag::note_previous_definition);
-            return 0;
-          }
-
-          // Check the template parameter list of this declaration, possibly
-          // merging in the template parameter list from the previous variable
-          // template declaration.
-          if (CheckTemplateParameterList(
-                  TemplateParams,
-                  PrevVarTemplate ? PrevVarTemplate->getTemplateParameters()
-                                  : 0,
-                  (D.getCXXScopeSpec().isSet() && DC && DC->isRecord() &&
-                   DC->isDependentContext())
-                      ? TPC_ClassTemplateMember
-                      : TPC_VarTemplate))
-            Invalid = true;
-
-          if (D.getCXXScopeSpec().isSet()) {
-            // If the name of the template was qualified, we must be defining
-            // the template out-of-line.
-            if (!D.getCXXScopeSpec().isInvalid() && !Invalid &&
-                !PrevVarTemplate) {
-              Diag(D.getIdentifierLoc(), diag::err_member_decl_does_not_match)
-                  << Name << DC << /*IsDefinition*/true
-                  << D.getCXXScopeSpec().getRange();
-              Invalid = true;
-            }
-          }
-        }
-      }
-    } else if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+    if (D.getName().getKind() == UnqualifiedId::IK_TemplateId &&
+        !TemplateParams) {
       TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
 
       // We have encountered something that the user meant to be a
@@ -5207,22 +5246,53 @@
           << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc)
           << FixItHint::CreateInsertion(D.getDeclSpec().getLocStart(),
                                         "template<> ");
-      IsVariableTemplateSpecialization = true;
+      IsExplicitSpecialization = true;
+      TemplateParams = TemplateParameterList::Create(Context, SourceLocation(),
+                                                     SourceLocation(), 0, 0,
+                                                     SourceLocation());
+    }
+
+    if (TemplateParams) {
+      if (!TemplateParams->size() &&
+          D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+        // There is an extraneous 'template<>' for this variable. Complain
+        // about it, but allow the declaration of the variable.
+        Diag(TemplateParams->getTemplateLoc(),
+             diag::err_template_variable_noparams)
+          << II
+          << SourceRange(TemplateParams->getTemplateLoc(),
+                         TemplateParams->getRAngleLoc());
+        TemplateParams = 0;
+      } else {
+        // Only C++1y supports variable templates (N3651).
+        Diag(D.getIdentifierLoc(),
+             getLangOpts().CPlusPlus1y
+                 ? diag::warn_cxx11_compat_variable_template
+                 : diag::ext_variable_template);
+
+        if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+          // This is an explicit specialization or a partial specialization.
+          // FIXME: Check that we can declare a specialization here.
+          IsVariableTemplateSpecialization = true;
+          IsPartialSpecialization = TemplateParams->size() > 0;
+        } else { // if (TemplateParams->size() > 0)
+          // This is a template declaration.
+          IsVariableTemplate = true;
+
+          // Check that we can declare a template here.
+          if (CheckTemplateDeclScope(S, TemplateParams))
+            return 0;
+        }
+      }
     }
 
     if (IsVariableTemplateSpecialization) {
-      if (!PrevVarTemplate) {
-        Diag(D.getIdentifierLoc(), diag::err_var_spec_no_template)
-            << IsPartialSpecialization;
-        return 0;
-      }
-
       SourceLocation TemplateKWLoc =
           TemplateParamLists.size() > 0
               ? TemplateParamLists[0]->getTemplateLoc()
               : SourceLocation();
       DeclResult Res = ActOnVarTemplateSpecialization(
-          S, PrevVarTemplate, D, TInfo, TemplateKWLoc, TemplateParams, SC,
+          S, D, TInfo, TemplateKWLoc, TemplateParams, SC,
           IsPartialSpecialization);
       if (Res.isInvalid())
         return 0;
@@ -5236,7 +5306,7 @@
     if (IsVariableTemplate) {
       NewTemplate =
           VarTemplateDecl::Create(Context, DC, D.getIdentifierLoc(), Name,
-                                  TemplateParams, NewVD, PrevVarTemplate);
+                                  TemplateParams, NewVD);
       NewVD->setDescribedVarTemplate(NewTemplate);
     }
 
@@ -5253,18 +5323,13 @@
 
     SetNestedNameSpecifier(NewVD, D);
 
-    // FIXME: Do we need D.getCXXScopeSpec().isSet()?
-    if (TemplateParams && TemplateParamLists.size() > 1 &&
-        (!IsVariableTemplateSpecialization || D.getCXXScopeSpec().isSet())) {
+    // If we have any template parameter lists that don't directly belong to
+    // the variable (matching the scope specifier), store them.
+    unsigned VDTemplateParamLists = TemplateParams ? 1 : 0;
+    if (TemplateParamLists.size() > VDTemplateParamLists)
       NewVD->setTemplateParameterListsInfo(
-          Context, TemplateParamLists.size() - 1, TemplateParamLists.data());
-    } else if (IsVariableTemplateSpecialization ||
-               (!TemplateParams && TemplateParamLists.size() > 0 &&
-                (D.getCXXScopeSpec().isSet()))) {
-      NewVD->setTemplateParameterListsInfo(Context,
-                                           TemplateParamLists.size(),
-                                           TemplateParamLists.data());
-    }
+          Context, TemplateParamLists.size() - VDTemplateParamLists,
+          TemplateParamLists.data());
 
     if (D.getDeclSpec().isConstexprSpecified())
       NewVD->setConstexpr(true);
@@ -5346,9 +5411,6 @@
   // Handle attributes prior to checking for duplicates in MergeVarDecl
   ProcessDeclAttributes(S, NewVD, D);
 
-  if (NewVD->hasAttrs())
-    CheckAlignasUnderalignment(NewVD);
-
   if (getLangOpts().CUDA) {
     // CUDA B.2.5: "__shared__ and __constant__ variables have implied static
     // storage [duration]."
@@ -5359,6 +5421,13 @@
     }
   }
 
+  // Ensure that dllimport globals without explicit storage class are treated as
+  // extern. The storage class is set above using parsed attributes. Now we can
+  // check the VarDecl itself.
+  assert(!NewVD->hasAttr<DLLImportAttr>() ||
+         NewVD->getAttr<DLLImportAttr>()->isInherited() ||
+         NewVD->isStaticDataMember() || NewVD->getStorageClass() != SC_None);
+
   // In auto-retain/release, infer strong retension for variables of
   // retainable type.
   if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
@@ -5388,7 +5457,7 @@
     }
 
     NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
-                                                Context, Label));
+                                                Context, Label, 0));
   } else if (!ExtnameUndeclaredIdentifiers.empty()) {
     llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
       ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
@@ -5399,15 +5468,16 @@
   }
 
   // Diagnose shadowed variables before filtering for scope.
-  if (!D.getCXXScopeSpec().isSet())
+  if (D.getCXXScopeSpec().isEmpty())
     CheckShadow(S, NewVD, Previous);
 
   // Don't consider existing declarations that are in a different
   // scope and are out-of-semantic-context declarations (if the new
   // declaration has linkage).
-  FilterLookupForScope(
-      Previous, OriginalDC, S, shouldConsiderLinkage(NewVD),
-      IsExplicitSpecialization || IsVariableTemplateSpecialization);
+  FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewVD),
+                       D.getCXXScopeSpec().isNotEmpty() ||
+                       IsExplicitSpecialization ||
+                       IsVariableTemplateSpecialization);
 
   // Check whether the previous declaration is in the same block scope. This
   // affects whether we merge types with it, per C++11 [dcl.array]p3.
@@ -5420,6 +5490,11 @@
   if (!getLangOpts().CPlusPlus) {
     D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
   } else {
+    // If this is an explicit specialization of a static data member, check it.
+    if (IsExplicitSpecialization && !NewVD->isInvalidDecl() &&
+        CheckMemberSpecialization(NewVD, Previous))
+      NewVD->setInvalidDecl();
+
     // Merge the decl with the existing one if appropriate.
     if (!Previous.empty()) {
       if (Previous.isSingleResult() &&
@@ -5440,24 +5515,37 @@
       NewVD->setInvalidDecl();
     }
 
-    if (!IsVariableTemplateSpecialization) {
-      if (PrevVarTemplate) {
-        LookupResult PrevDecl(*this, GetNameForDeclarator(D),
-                              LookupOrdinaryName, ForRedeclaration);
-        PrevDecl.addDecl(PrevVarTemplate->getTemplatedDecl());
-        D.setRedeclaration(CheckVariableDeclaration(NewVD, PrevDecl));
-      } else
-        D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
-    }
+    if (!IsVariableTemplateSpecialization)
+      D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
 
-    // This is an explicit specialization of a static data member. Check it.
-    if (IsExplicitSpecialization && !NewVD->isInvalidDecl() &&
-        CheckMemberSpecialization(NewVD, Previous))
-      NewVD->setInvalidDecl();
+    if (NewTemplate) {
+      VarTemplateDecl *PrevVarTemplate =
+          NewVD->getPreviousDecl()
+              ? NewVD->getPreviousDecl()->getDescribedVarTemplate()
+              : 0;
+
+      // Check the template parameter list of this declaration, possibly
+      // merging in the template parameter list from the previous variable
+      // template declaration.
+      if (CheckTemplateParameterList(
+              TemplateParams,
+              PrevVarTemplate ? PrevVarTemplate->getTemplateParameters()
+                              : 0,
+              (D.getCXXScopeSpec().isSet() && DC && DC->isRecord() &&
+               DC->isDependentContext())
+                  ? TPC_ClassTemplateMember
+                  : TPC_VarTemplate))
+        NewVD->setInvalidDecl();
+
+      // If we are providing an explicit specialization of a static variable
+      // template, make a note of that.
+      if (PrevVarTemplate &&
+          PrevVarTemplate->getInstantiatedFromMemberTemplate())
+        PrevVarTemplate->setMemberSpecialization();
+    }
   }
 
   ProcessPragmaWeak(S, NewVD);
-  checkAttributesAfterMerging(*this, *NewVD);
 
   // If this is the first declaration of an extern C variable, update
   // the map of such variables.
@@ -5470,16 +5558,21 @@
     if (MangleNumberingContext *MCtx =
             getCurrentMangleNumberContext(NewVD->getDeclContext(),
                                           ManglingContextDecl)) {
-      Context.setManglingNumber(NewVD, MCtx->getManglingNumber(NewVD));
+      Context.setManglingNumber(
+          NewVD, MCtx->getManglingNumber(NewVD, S->getMSLocalManglingNumber()));
+      Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD));
     }
   }
 
-  // If we are providing an explicit specialization of a static variable
-  // template, make a note of that.
-  if (PrevVarTemplate && PrevVarTemplate->getInstantiatedFromMemberTemplate())
-    PrevVarTemplate->setMemberSpecialization();
+  if (D.isRedeclaration() && !Previous.empty()) {
+    checkDLLAttributeRedeclaration(
+        *this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewVD,
+        IsExplicitSpecialization);
+  }
 
   if (NewTemplate) {
+    if (NewVD->isInvalidDecl())
+      NewTemplate->setInvalidDecl();
     ActOnDocumentableDecl(NewTemplate);
     return NewTemplate;
   }
@@ -5526,11 +5619,9 @@
     if (shadowedVar->isExternC()) {
       // For shadowing external vars, make sure that we point to the global
       // declaration, not a locally scoped extern declaration.
-      for (VarDecl::redecl_iterator
-             I = shadowedVar->redecls_begin(), E = shadowedVar->redecls_end();
-           I != E; ++I)
+      for (auto I : shadowedVar->redecls())
         if (I->isFileVarDecl()) {
-          ShadowedDecl = *I;
+          ShadowedDecl = I;
           break;
         }
     }
@@ -5566,6 +5657,8 @@
   DeclarationName Name = R.getLookupName();
 
   // Emit warning and note.
+  if (getSourceManager().isInSystemMacro(R.getNameLoc()))
+    return;
   Diag(R.getNameLoc(), diag::warn_decl_shadow) << Name << Kind << OldDC;
   Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
 }
@@ -5711,6 +5804,9 @@
   if (T->isUndeducedType())
     return;
 
+  if (NewVD->hasAttrs())
+    CheckAlignasUnderalignment(NewVD);
+
   if (T->isObjCObjectType()) {
     Diag(NewVD->getLocation(), diag::err_statically_allocated_object)
       << FixItHint::CreateInsertion(NewVD->getLocation(), "*");
@@ -5828,7 +5924,6 @@
   if (NewVD->isConstexpr() && !T->isDependentType() &&
       RequireLiteralType(NewVD->getLocation(), T,
                          diag::err_constexpr_var_non_literal)) {
-    // Can't perform this check until the type is deduced.
     NewVD->setInvalidDecl();
     return;
   }
@@ -5947,9 +6042,8 @@
   bool hasNonDeletedOverridenMethods = false;
   bool AddedAny = false;
   if (DC->lookupInBases(&FindOverriddenMethod, &Data, Paths)) {
-    for (CXXBasePaths::decl_iterator I = Paths.found_decls_begin(),
-         E = Paths.found_decls_end(); I != E; ++I) {
-      if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(*I)) {
+    for (auto *I : Paths.found_decls()) {
+      if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(I)) {
         MD->addOverriddenMethod(OldMD->getCanonicalDecl());
         if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
             !CheckOverridingFunctionAttributes(MD, OldMD) &&
@@ -5995,7 +6089,7 @@
       : Context(Context), OriginalFD(TypoFD),
         ExpectedParent(Parent ? Parent->getCanonicalDecl() : 0) {}
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     if (candidate.getEditDistance() == 0)
       return false;
 
@@ -6254,10 +6348,9 @@
   // For record types, this is done by the AbstractClassUsageDiagnoser once
   // the class has been completely parsed.
   if (!DC->isRecord() &&
-      SemaRef.RequireNonAbstractType(D.getIdentifierLoc(),
-                                     R->getAs<FunctionType>()->getResultType(),
-                                     diag::err_abstract_type_in_decl,
-                                     SemaRef.AbstractReturnType))
+      SemaRef.RequireNonAbstractType(
+          D.getIdentifierLoc(), R->getAs<FunctionType>()->getReturnType(),
+          diag::err_abstract_type_in_decl, SemaRef.AbstractReturnType))
     D.setInvalidType();
 
   if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
@@ -6292,15 +6385,6 @@
         SemaRef.AdjustDestructorExceptionSpec(Record, NewDD);
       }
 
-      // The Microsoft ABI requires that we perform the destructor body
-      // checks (i.e. operator delete() lookup) at every declaration, as
-      // any translation unit may need to emit a deleting destructor.
-      if (SemaRef.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
-          !Record->isDependentType() && Record->getDefinition() &&
-          !Record->isBeingDefined()) {
-        SemaRef.CheckDestructor(NewDD);
-      }
-
       IsVirtualOkay = true;
       return NewDD;
 
@@ -6363,26 +6447,11 @@
   }
 }
 
-void Sema::checkVoidParamDecl(ParmVarDecl *Param) {
-  // In C++, the empty parameter-type-list must be spelled "void"; a
-  // typedef of void is not permitted.
-  if (getLangOpts().CPlusPlus &&
-      Param->getType().getUnqualifiedType() != Context.VoidTy) {
-    bool IsTypeAlias = false;
-    if (const TypedefType *TT = Param->getType()->getAs<TypedefType>())
-      IsTypeAlias = isa<TypeAliasDecl>(TT->getDecl());
-    else if (const TemplateSpecializationType *TST =
-               Param->getType()->getAs<TemplateSpecializationType>())
-      IsTypeAlias = TST->isTypeAlias();
-    Diag(Param->getLocation(), diag::err_param_typedef_of_void)
-      << IsTypeAlias;
-  }
-}
-
 enum OpenCLParamType {
   ValidKernelParam,
   PtrPtrKernelParam,
   PtrKernelParam,
+  PrivatePtrKernelParam,
   InvalidKernelParam,
   RecordKernelParam
 };
@@ -6390,7 +6459,10 @@
 static OpenCLParamType getOpenCLKernelParameterType(QualType PT) {
   if (PT->isPointerType()) {
     QualType PointeeType = PT->getPointeeType();
-    return PointeeType->isPointerType() ? PtrPtrKernelParam : PtrKernelParam;
+    if (PointeeType->isPointerType())
+      return PtrPtrKernelParam;
+    return PointeeType.getAddressSpace() == 0 ? PrivatePtrKernelParam
+                                              : PtrKernelParam;
   }
 
   // TODO: Forbid the other integer types (size_t, ptrdiff_t...) when they can
@@ -6435,6 +6507,14 @@
     D.setInvalidType();
     return;
 
+  case PrivatePtrKernelParam:
+    // OpenCL v1.2 s6.9.a:
+    // A kernel function argument cannot be declared as a
+    // pointer to the private address space.
+    S.Diag(Param->getLocation(), diag::err_opencl_private_ptr_kernel_param);
+    D.setInvalidType();
+    return;
+
     // OpenCL v1.2 s6.9.k:
     // Arguments to kernel functions in a program cannot be declared with the
     // built-in scalar types bool, half, size_t, ptrdiff_t, intptr_t, and
@@ -6495,9 +6575,7 @@
     // Add a null marker so we know when we've gone back up a level
     VisitStack.push_back((const Decl *) 0);
 
-    for (RecordDecl::field_iterator I = RD->field_begin(),
-           E = RD->field_end(); I != E; ++I) {
-      const FieldDecl *FD = *I;
+    for (const auto *FD : RD->fields()) {
       QualType QT = FD->getType();
 
       if (ValidTypes.count(QT.getTypePtr()))
@@ -6516,7 +6594,8 @@
       // Arguments to kernel functions that are declared to be a struct or union
       // do not allow OpenCL objects to be passed as elements of the struct or
       // union.
-      if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam) {
+      if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam ||
+          ParamType == PrivatePtrKernelParam) {
         S.Diag(Param->getLocation(),
                diag::err_record_with_pointers_kernel_param)
           << PT->isUnionType()
@@ -6742,14 +6821,14 @@
       }
 
       if (getLangOpts().CPlusPlus1y &&
-          NewFD->getResultType()->isUndeducedType())
+          NewFD->getReturnType()->isUndeducedType())
         Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_auto_fn_virtual);
     }
 
     if (getLangOpts().CPlusPlus1y &&
         (NewFD->isDependentContext() ||
          (isFriend && CurContext->isDependentContext())) &&
-        NewFD->getResultType()->isUndeducedType()) {
+        NewFD->getReturnType()->isUndeducedType()) {
       // If the function template is referenced directly (for instance, as a
       // member of the current instantiation), pretend it has a dependent type.
       // This is not really justified by the standard, but is the only sane
@@ -6758,9 +6837,9 @@
       // a friend yet, so 'isDependentContext' on the FD doesn't work.
       const FunctionProtoType *FPT =
           NewFD->getType()->castAs<FunctionProtoType>();
-      QualType Result = SubstAutoType(FPT->getResultType(),
-                                       Context.DependentTy);
-      NewFD->setType(Context.getFunctionType(Result, FPT->getArgTypes(),
+      QualType Result =
+          SubstAutoType(FPT->getReturnType(), Context.DependentTy);
+      NewFD->setType(Context.getFunctionType(Result, FPT->getParamTypes(),
                                              FPT->getExtProtoInfo()));
     }
 
@@ -6833,6 +6912,8 @@
     }
 
     // If a function is defined as defaulted or deleted, mark it as such now.
+    // FIXME: Does this ever happen? ActOnStartOfFunctionDef forces the function
+    // definition kind to FDK_Definition.
     switch (D.getFunctionDefinitionKind()) {
       case FDK_Declaration:
       case FDK_Definition:
@@ -6878,13 +6959,14 @@
         getLangOpts().CPlusPlus11 && FPT && !FPT->hasExceptionSpec()) {
       FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
       EPI.ExceptionSpecType = EST_BasicNoexcept;
-      NewFD->setType(Context.getFunctionType(FPT->getResultType(),
-                                             FPT->getArgTypes(), EPI));
+      NewFD->setType(Context.getFunctionType(FPT->getReturnType(),
+                                             FPT->getParamTypes(), EPI));
     }
   }
 
   // Filter out previous declarations that don't match the scope.
   FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewFD),
+                       D.getCXXScopeSpec().isNotEmpty() ||
                        isExplicitSpecialization ||
                        isFunctionTemplateSpecialization);
 
@@ -6893,7 +6975,7 @@
     // The parser guarantees this is a string.
     StringLiteral *SE = cast<StringLiteral>(E);
     NewFD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context,
-                                                SE->getString()));
+                                                SE->getString(), 0));
   } else if (!ExtnameUndeclaredIdentifiers.empty()) {
     llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
       ExtnameUndeclaredIdentifiers.find(NewFD->getIdentifier());
@@ -6914,14 +6996,13 @@
     // single void argument.
     // We let through "const void" here because Sema::GetTypeForDeclarator
     // already checks for that case.
-    if (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
-        FTI.ArgInfo[0].Param &&
-        cast<ParmVarDecl>(FTI.ArgInfo[0].Param)->getType()->isVoidType()) {
+    if (FTI.NumParams == 1 && !FTI.isVariadic && FTI.Params[0].Ident == 0 &&
+        FTI.Params[0].Param &&
+        cast<ParmVarDecl>(FTI.Params[0].Param)->getType()->isVoidType()) {
       // Empty arg list, don't push any params.
-      checkVoidParamDecl(cast<ParmVarDecl>(FTI.ArgInfo[0].Param));
-    } else if (FTI.NumArgs > 0 && FTI.ArgInfo[0].Param != 0) {
-      for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
-        ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
+    } else if (FTI.NumParams > 0 && FTI.Params[0].Param != 0) {
+      for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) {
+        ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
         assert(Param->getDeclContext() != NewFD && "Was set before ?");
         Param->setDeclContext(NewFD);
         Params.push_back(Param);
@@ -6942,10 +7023,9 @@
     // @endcode
 
     // Synthesize a parameter for each argument type.
-    for (FunctionProtoType::arg_type_iterator AI = FT->arg_type_begin(),
-         AE = FT->arg_type_end(); AI != AE; ++AI) {
+    for (const auto &AI : FT->param_types()) {
       ParmVarDecl *Param =
-        BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), *AI);
+          BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), AI);
       Param->setScopeInfo(0, Params.size());
       Params.push_back(Param);
     }
@@ -6969,12 +7049,12 @@
   if (D.getDeclSpec().isNoreturnSpecified())
     NewFD->addAttr(
         ::new(Context) C11NoReturnAttr(D.getDeclSpec().getNoreturnSpecLoc(),
-                                       Context));
+                                       Context, 0));
 
   // Functions returning a variably modified type violate C99 6.7.5.2p2
   // because all functions have linkage.
   if (!NewFD->isInvalidDecl() &&
-      NewFD->getResultType()->isVariablyModifiedType()) {
+      NewFD->getReturnType()->isVariablyModifiedType()) {
     Diag(NewFD->getLocation(), diag::err_vm_func_decl);
     NewFD->setInvalidDecl();
   }
@@ -6982,17 +7062,29 @@
   // Handle attributes.
   ProcessDeclAttributes(S, NewFD, D);
 
-  QualType RetType = NewFD->getResultType();
+  QualType RetType = NewFD->getReturnType();
   const CXXRecordDecl *Ret = RetType->isRecordType() ?
       RetType->getAsCXXRecordDecl() : RetType->getPointeeCXXRecordDecl();
   if (!NewFD->isInvalidDecl() && !NewFD->hasAttr<WarnUnusedResultAttr>() &&
       Ret && Ret->hasAttr<WarnUnusedResultAttr>()) {
     const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
-    // Attach the attribute to the new decl. Don't apply the attribute if it
-    // returns an instance of the class (e.g. assignment operators).
-    if (!MD || MD->getParent() != Ret) {
-      NewFD->addAttr(new (Context) WarnUnusedResultAttr(SourceRange(),
-                                                        Context));
+    // Attach WarnUnusedResult to functions returning types with that attribute.
+    // Don't apply the attribute to that type's own non-static member functions
+    // (to avoid warning on things like assignment operators)
+    if (!MD || MD->getParent() != Ret)
+      NewFD->addAttr(WarnUnusedResultAttr::CreateImplicit(Context));
+  }
+
+  if (getLangOpts().OpenCL) {
+    // OpenCL v1.1 s6.5: Using an address space qualifier in a function return
+    // type declaration will generate a compilation error.
+    unsigned AddressSpace = RetType.getAddressSpace();
+    if (AddressSpace == LangAS::opencl_local ||
+        AddressSpace == LangAS::opencl_global ||
+        AddressSpace == LangAS::opencl_constant) {
+      Diag(NewFD->getLocation(),
+           diag::err_opencl_return_value_with_address_space);
+      NewFD->setInvalidDecl();
     }
   }
 
@@ -7141,12 +7233,7 @@
       if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
         CheckMSVCRTEntryPoint(NewFD);
 
-      if (NewFD->isInvalidDecl()) {
-        // If this is a class member, mark the class invalid immediately.
-        // This avoids some consistency errors later.
-        if (CXXMethodDecl* methodDecl = dyn_cast<CXXMethodDecl>(NewFD))
-          methodDecl->getParent()->setInvalidDecl();
-      } else
+      if (!NewFD->isInvalidDecl())
         D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
                                                     isExplicitSpecialization));
     }
@@ -7248,11 +7335,12 @@
         }
       }
 
-    } else if (!D.isFunctionDefinition() && D.getCXXScopeSpec().isSet() &&
+    } else if (!D.isFunctionDefinition() &&
+               isa<CXXMethodDecl>(NewFD) && NewFD->isOutOfLine() &&
                !isFriend && !isFunctionTemplateSpecialization &&
                !isExplicitSpecialization) {
       // An out-of-line member function declaration must also be a
-      // definition (C++ [dcl.meaning]p1).
+      // definition (C++ [class.mfct]p2).
       // Note that this is not the case for explicit specializations of
       // function templates or member functions of class templates, per
       // C++ [temp.expl.spec]p2. We also allow these declarations as an 
@@ -7281,7 +7369,7 @@
     EPI.Variadic = true;
     EPI.ExtInfo = FT->getExtInfo();
 
-    QualType R = Context.getFunctionType(FT->getResultType(), None, EPI);
+    QualType R = Context.getFunctionType(FT->getReturnType(), None, EPI);
     NewFD->setType(R);
   }
 
@@ -7303,6 +7391,12 @@
   // Set this FunctionDecl's range up to the right paren.
   NewFD->setRangeEnd(D.getSourceRange().getEnd());
 
+  if (D.isRedeclaration() && !Previous.empty()) {
+    checkDLLAttributeRedeclaration(
+        *this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewFD,
+        isExplicitSpecialization || isFunctionTemplateSpecialization);
+  }
+
   if (getLangOpts().CPlusPlus) {
     if (FunctionTemplate) {
       if (NewFD->isInvalidDecl())
@@ -7320,18 +7414,15 @@
     }
     
     // OpenCL v1.2, s6.9 -- Kernels can only have return type void.
-    if (!NewFD->getResultType()->isVoidType()) {
+    if (!NewFD->getReturnType()->isVoidType()) {
       Diag(D.getIdentifierLoc(),
            diag::err_expected_kernel_void_return_type);
       D.setInvalidType();
     }
 
     llvm::SmallPtrSet<const Type *, 16> ValidTypes;
-    for (FunctionDecl::param_iterator PI = NewFD->param_begin(),
-         PE = NewFD->param_end(); PI != PE; ++PI) {
-      ParmVarDecl *Param = *PI;
+    for (auto Param : NewFD->params())
       checkIsValidOpenCLKernelParameter(*this, D, Param, ValidTypes);
-    }
   }
 
   MarkUnusedFileScopedDecl(NewFD);
@@ -7341,7 +7432,7 @@
       if (!NewFD->isInvalidDecl() &&
           NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
         if (II->isStr("cudaConfigureCall")) {
-          if (!R->getAs<FunctionType>()->getResultType()->isScalarType())
+          if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
             Diag(NewFD->getLocation(), diag::err_config_scalar_return);
 
           Context.setcudaConfigureCallDecl(NewFD);
@@ -7383,8 +7474,8 @@
 bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
                                     LookupResult &Previous,
                                     bool IsExplicitSpecialization) {
-  assert(!NewFD->getResultType()->isVariablyModifiedType() 
-         && "Variably modified return types are not handled here");
+  assert(!NewFD->getReturnType()->isVariablyModifiedType() &&
+         "Variably modified return types are not handled here");
 
   // Determine whether the type of this function should be merged with
   // a previous visible declaration. This never happens for functions in C++,
@@ -7440,8 +7531,7 @@
         if (OverloadedDecl)
           Diag(OverloadedDecl->getLocation(),
                diag::note_attribute_overloadable_prev_overload);
-        NewFD->addAttr(::new (Context) OverloadableAttr(SourceLocation(),
-                                                        Context));
+        NewFD->addAttr(OverloadableAttr::CreateImplicit(Context));
       }
     }
   }
@@ -7464,8 +7554,7 @@
             << Redeclaration << NewFD;
           Diag(Previous.getFoundDecl()->getLocation(),
                diag::note_attribute_overloadable_prev_overload);
-          NewFD->addAttr(::new (Context) OverloadableAttr(SourceLocation(),
-                                                          Context));
+          NewFD->addAttr(OverloadableAttr::CreateImplicit(Context));
         }
         if (IsOverload(NewFD, cast<FunctionDecl>(OldDecl), false)) {
           Redeclaration = false;
@@ -7488,17 +7577,16 @@
   if (!getLangOpts().CPlusPlus1y && MD && MD->isConstexpr() &&
       !MD->isStatic() && !isa<CXXConstructorDecl>(MD) &&
       (MD->getTypeQualifiers() & Qualifiers::Const) == 0) {
-    CXXMethodDecl *OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl);
-    if (FunctionTemplateDecl *OldTD =
-          dyn_cast_or_null<FunctionTemplateDecl>(OldDecl))
-      OldMD = dyn_cast<CXXMethodDecl>(OldTD->getTemplatedDecl());
+    CXXMethodDecl *OldMD = 0;
+    if (OldDecl)
+      OldMD = dyn_cast<CXXMethodDecl>(OldDecl->getAsFunction());
     if (!OldMD || !OldMD->isStatic()) {
       const FunctionProtoType *FPT =
         MD->getType()->castAs<FunctionProtoType>();
       FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
       EPI.TypeQuals |= Qualifiers::Const;
-      MD->setType(Context.getFunctionType(FPT->getResultType(),
-                                          FPT->getArgTypes(), EPI));
+      MD->setType(Context.getFunctionType(FPT->getReturnType(),
+                                          FPT->getParamTypes(), EPI));
 
       // Warn that we did this, if we're not performing template instantiation.
       // In that case, we'll have warned already when the template was defined.
@@ -7656,7 +7744,7 @@
     // compatible, and if it does, warn the user.
     // But, issue any diagnostic on the first declaration only.
     if (NewFD->isExternC() && Previous.empty()) {
-      QualType R = NewFD->getResultType();
+      QualType R = NewFD->getReturnType();
       if (R->isIncompleteType() && !R->isVoidType())
         Diag(NewFD->getLocation(), diag::warn_return_value_udt_incomplete)
             << NewFD << R;
@@ -7678,7 +7766,7 @@
   if (!FunctionTL)
     return SourceRange();
 
-  TypeLoc ResultTL = FunctionTL.getResultLoc();
+  TypeLoc ResultTL = FunctionTL.getReturnLoc();
   if (ResultTL.getUnqualifiedLoc().getAs<BuiltinTypeLoc>())
     return ResultTL.getSourceRange();
 
@@ -7686,8 +7774,9 @@
 }
 
 void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
-  // C++11 [basic.start.main]p3:  A program that declares main to be inline,
-  //   static or constexpr is ill-formed.
+  // C++11 [basic.start.main]p3:
+  //   A program that [...] declares main to be inline, static or
+  //   constexpr is ill-formed.
   // C11 6.7.4p4:  In a hosted environment, no function specifier(s) shall
   //   appear in a declaration of main.
   // static main is not an error under C99, but we should warn about it.
@@ -7725,7 +7814,7 @@
   const FunctionType* FT = T->castAs<FunctionType>();
 
   // All the standards say that main() should should return 'int'.
-  if (Context.hasSameUnqualifiedType(FT->getResultType(), Context.IntTy)) {
+  if (Context.hasSameUnqualifiedType(FT->getReturnType(), Context.IntTy)) {
     // In C and C++, main magically returns 0 if you fall off the end;
     // set the flag which tells us that.
     // This is C++ [basic.start.main]p5 and C99 5.1.2.2.3.
@@ -7758,7 +7847,7 @@
   if (isa<FunctionNoProtoType>(FT)) return;
 
   const FunctionProtoType* FTP = cast<const FunctionProtoType>(FT);
-  unsigned nparams = FTP->getNumArgs();
+  unsigned nparams = FTP->getNumParams();
   assert(FD->getNumParams() == nparams);
 
   bool HasExtraParameters = (nparams > 3);
@@ -7783,7 +7872,7 @@
   QualType Expected[] = { Context.IntTy, CharPP, CharPP, CharPP };
 
   for (unsigned i = 0; i < nparams; ++i) {
-    QualType AT = FTP->getArgType(i);
+    QualType AT = FTP->getParamType(i);
 
     bool mismatch = true;
 
@@ -7818,7 +7907,7 @@
   }
   
   if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
-    Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD->getName();
+    Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
     FD->setInvalidDecl();
   }
 }
@@ -7830,15 +7919,15 @@
 
   // Set an implicit return of 'zero' if the function can return some integral,
   // enumeration, pointer or nullptr type.
-  if (FT->getResultType()->isIntegralOrEnumerationType() ||
-      FT->getResultType()->isAnyPointerType() ||
-      FT->getResultType()->isNullPtrType())
+  if (FT->getReturnType()->isIntegralOrEnumerationType() ||
+      FT->getReturnType()->isAnyPointerType() ||
+      FT->getReturnType()->isNullPtrType())
     // DllMain is exempt because a return value of zero means it failed.
     if (FD->getName() != "DllMain")
       FD->setHasImplicitReturnZero(true);
 
   if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
-    Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD->getName();
+    Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
     FD->setInvalidDecl();
   }
 }
@@ -8084,6 +8173,11 @@
         return;
       } else {
         DeduceInit = CXXDirectInit->getExpr(0);
+        if (isa<InitListExpr>(DeduceInit))
+          Diag(CXXDirectInit->getLocStart(),
+               diag::err_auto_var_init_paren_braces)
+            << VDecl->getDeclName() << VDecl->getType()
+            << VDecl->getSourceRange();
       }
     }
 
@@ -8141,6 +8235,13 @@
       return;
   }
 
+  // dllimport cannot be used on variable definitions.
+  if (VDecl->hasAttr<DLLImportAttr>() && !VDecl->isStaticDataMember()) {
+    Diag(VDecl->getLocation(), diag::err_attribute_dllimport_data_definition);
+    VDecl->setInvalidDecl();
+    return;
+  }
+
   if (VDecl->isLocalVarDecl() && VDecl->hasExternalStorage()) {
     // C99 6.7.8p5. C++ has no such restriction, but that is a defect.
     Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
@@ -8192,9 +8293,9 @@
     // data members we also need to check whether there was an in-class
     // declaration with an initializer.
     if (VDecl->isStaticDataMember() && VDecl->getAnyInitializer(PrevInit)) {
-      Diag(VDecl->getLocation(), diag::err_redefinition) 
-        << VDecl->getDeclName();
-      Diag(PrevInit->getLocation(), diag::note_previous_definition);
+      Diag(Init->getExprLoc(), diag::err_static_data_member_reinitialization)
+          << VDecl->getDeclName();
+      Diag(PrevInit->getInit()->getExprLoc(), diag::note_previous_initializer) << 0;
       return;
     }  
 
@@ -8558,6 +8659,16 @@
       return;
     }
 
+    // OpenCL v1.1 s6.5.3: variables declared in the constant address space must
+    // be initialized.
+    if (!Var->isInvalidDecl() &&
+        Var->getType().getAddressSpace() == LangAS::opencl_constant &&
+        Var->getStorageClass() != SC_Extern && !Var->getInit()) {
+      Diag(Var->getLocation(), diag::err_opencl_constant_no_init);
+      Var->setInvalidDecl();
+      return;
+    }
+
     switch (Var->isThisDeclarationADefinition()) {
     case VarDecl::Definition:
       if (!Var->isStaticDataMember() || !Var->getAnyInitializer())
@@ -8788,7 +8899,13 @@
     }
   }
 
+  // Warn about externally-visible variables being defined without a
+  // prior declaration.  We only want to do this for global
+  // declarations, but we also specifically need to avoid doing it for
+  // class members because the linkage of an anonymous class can
+  // change if it's later given a typedef name.
   if (var->isThisDeclarationADefinition() &&
+      var->getDeclContext()->getRedeclContext()->isFileContext() &&
       var->isExternallyVisible() && var->hasLinkage() &&
       getDiagnostics().getDiagnosticLevel(
                        diag::warn_missing_variable_declarations,
@@ -8901,9 +9018,11 @@
   if (!VD)
     return;
 
+  checkAttributesAfterMerging(*this, *VD);
+
   if (UsedAttr *Attr = VD->getAttr<UsedAttr>()) {
     if (!Attr->isInherited() && !VD->isThisDeclarationADefinition()) {
-      Diag(Attr->getLocation(), diag::warn_attribute_ignored) << "used";
+      Diag(Attr->getLocation(), diag::warn_attribute_ignored) << Attr;
       VD->dropAttr<UsedAttr>();
     }
   }
@@ -8923,10 +9042,11 @@
   const DeclContext *DC = VD->getDeclContext();
   // If there's a #pragma GCC visibility in scope, and this isn't a class
   // member, set the visibility of this variable.
-  if (!DC->isRecord() && VD->isExternallyVisible())
+  if (DC->getRedeclContext()->isFileContext() && VD->isExternallyVisible())
     AddPushedVisibilityAttribute(VD);
 
-  if (VD->isFileVarDecl())
+  // FIXME: Warn on unused templates.
+  if (VD->isFileVarDecl() && !VD->getDescribedVarTemplate())
     MarkUnusedFileScopedDecl(VD);
 
   // Now we have parsed the initializer and can update the table of magic
@@ -8935,10 +9055,7 @@
       !VD->getType()->isIntegralOrEnumerationType())
     return;
 
-  for (specific_attr_iterator<TypeTagForDatatypeAttr>
-         I = ThisDecl->specific_attr_begin<TypeTagForDatatypeAttr>(),
-         E = ThisDecl->specific_attr_end<TypeTagForDatatypeAttr>();
-       I != E; ++I) {
+  for (const auto *I : ThisDecl->specific_attrs<TypeTagForDatatypeAttr>()) {
     const Expr *MagicValueExpr = VD->getInit();
     if (!MagicValueExpr) {
       continue;
@@ -8983,7 +9100,7 @@
 
   if (DeclSpec::isDeclRep(DS.getTypeSpecType())) {
     if (TagDecl *Tag = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl())) {
-      HandleTagNumbering(*this, Tag);
+      HandleTagNumbering(*this, Tag, S);
       if (!Tag->hasNameForLinkage() && !Tag->hasDeclaratorForAnonDecl())
         Tag->setDeclaratorForAnonDecl(FirstDeclaratorInGroup);
     }
@@ -9138,7 +9255,7 @@
     II = D.getIdentifier();
     if (!II) {
       Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name)
-        << GetNameForDeclarator(D).getName().getAsString();
+        << GetNameForDeclarator(D).getName();
       D.setInvalidType(true);
     }
   }
@@ -9333,16 +9450,15 @@
   // Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
   // for a K&R function.
   if (!FTI.hasPrototype) {
-    for (int i = FTI.NumArgs; i != 0; /* decrement in loop */) {
+    for (int i = FTI.NumParams; i != 0; /* decrement in loop */) {
       --i;
-      if (FTI.ArgInfo[i].Param == 0) {
+      if (FTI.Params[i].Param == 0) {
         SmallString<256> Code;
-        llvm::raw_svector_ostream(Code) << "  int "
-                                        << FTI.ArgInfo[i].Ident->getName()
-                                        << ";\n";
-        Diag(FTI.ArgInfo[i].IdentLoc, diag::ext_param_not_declared)
-          << FTI.ArgInfo[i].Ident
-          << FixItHint::CreateInsertion(LocAfterDecls, Code.str());
+        llvm::raw_svector_ostream(Code)
+            << "  int " << FTI.Params[i].Ident->getName() << ";\n";
+        Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared)
+            << FTI.Params[i].Ident
+            << FixItHint::CreateInsertion(LocAfterDecls, Code.str());
 
         // Implicitly declare the argument as type 'int' for lack of a better
         // type.
@@ -9350,14 +9466,14 @@
         DeclSpec DS(attrs);
         const char* PrevSpec; // unused
         unsigned DiagID; // unused
-        DS.SetTypeSpecType(DeclSpec::TST_int, FTI.ArgInfo[i].IdentLoc,
-                           PrevSpec, DiagID);
+        DS.SetTypeSpecType(DeclSpec::TST_int, FTI.Params[i].IdentLoc, PrevSpec,
+                           DiagID, Context.getPrintingPolicy());
         // Use the identifier location for the type source range.
-        DS.SetRangeStart(FTI.ArgInfo[i].IdentLoc);
-        DS.SetRangeEnd(FTI.ArgInfo[i].IdentLoc);
+        DS.SetRangeStart(FTI.Params[i].IdentLoc);
+        DS.SetRangeEnd(FTI.Params[i].IdentLoc);
         Declarator ParamD(DS, Declarator::KNRTypeListContext);
-        ParamD.SetIdentifier(FTI.ArgInfo[i].Ident, FTI.ArgInfo[i].IdentLoc);
-        FTI.ArgInfo[i].Param = ActOnParamDeclarator(S, ParamD);
+        ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc);
+        FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD);
       }
     }
   }
@@ -9456,7 +9572,7 @@
   LambdaScopeInfo *LSI = S.PushLambdaScope();
   LSI->CallOperator = CallOperator;
   LSI->Lambda = LambdaClass;
-  LSI->ReturnType = CallOperator->getResultType();
+  LSI->ReturnType = CallOperator->getReturnType();
   const LambdaCaptureDefault LCD = LambdaClass->getLambdaCaptureDefault();
 
   if (LCD == LCD_None)
@@ -9472,22 +9588,21 @@
 
   // Add the captures to the LSI so they can be noted as already
   // captured within tryCaptureVar. 
-  for (LambdaExpr::capture_iterator C = LambdaClass->captures_begin(),
-      CEnd = LambdaClass->captures_end(); C != CEnd; ++C) {
-    if (C->capturesVariable()) {
-      VarDecl *VD = C->getCapturedVar();
+  for (const auto &C : LambdaClass->captures()) {
+    if (C.capturesVariable()) {
+      VarDecl *VD = C.getCapturedVar();
       if (VD->isInitCapture())
         S.CurrentInstantiationScope->InstantiatedLocal(VD, VD);
       QualType CaptureType = VD->getType();
-      const bool ByRef = C->getCaptureKind() == LCK_ByRef;
+      const bool ByRef = C.getCaptureKind() == LCK_ByRef;
       LSI->addCapture(VD, /*IsBlock*/false, ByRef, 
-          /*RefersToEnclosingLocal*/true, C->getLocation(),
-          /*EllipsisLoc*/C->isPackExpansion() 
-                         ? C->getEllipsisLoc() : SourceLocation(),
+          /*RefersToEnclosingLocal*/true, C.getLocation(),
+          /*EllipsisLoc*/C.isPackExpansion() 
+                         ? C.getEllipsisLoc() : SourceLocation(),
           CaptureType, /*Expr*/ 0);
       
-    } else if (C->capturesThis()) {
-      LSI->addThisCapture(/*Nested*/ false, C->getLocation(), 
+    } else if (C.capturesThis()) {
+      LSI->addThisCapture(/*Nested*/ false, C.getLocation(), 
                               S.getCurrentThisType(), /*Expr*/ 0);
     }
   }
@@ -9539,7 +9654,7 @@
 
   // The return type of a function definition must be complete
   // (C99 6.9.1p3, C++ [dcl.fct]p6).
-  QualType ResultType = FD->getResultType();
+  QualType ResultType = FD->getReturnType();
   if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
       !FD->isInvalidDecl() &&
       RequireCompleteType(FD->getLocation(), ResultType,
@@ -9578,8 +9693,7 @@
                            /*CheckParameterNames=*/true);
 
   // Introduce our parameters into the function scope
-  for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) {
-    ParmVarDecl *Param = FD->getParamDecl(p);
+  for (auto Param : FD->params()) {
     Param->setOwningFunction(FD);
 
     // If this has an identifier, add it to the scope stack.
@@ -9604,9 +9718,8 @@
       // and reattach to the current context.
       if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) {
         // Is the decl actually in the context?
-        for (DeclContext::decl_iterator DI = Context.getTranslationUnitDecl()->decls_begin(),
-               DE = Context.getTranslationUnitDecl()->decls_end(); DI != DE; ++DI) {
-          if (*DI == D) {  
+        for (const auto *DI : Context.getTranslationUnitDecl()->decls()) {
+          if (DI == D) {  
             Context.getTranslationUnitDecl()->removeDecl(D);
             break;
           }
@@ -9621,10 +9734,9 @@
 
       // Similarly, dive into enums and fish their constants out, making them
       // accessible in this scope.
-      if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
-        for (EnumDecl::enumerator_iterator EI = ED->enumerator_begin(),
-               EE = ED->enumerator_end(); EI != EE; ++EI)
-          PushOnScopeChains(*EI, FnBodyScope, /*AddToContext=*/false);
+      if (auto *ED = dyn_cast<EnumDecl>(D)) {
+        for (auto *EI : ED->enumerators())
+          PushOnScopeChains(EI, FnBodyScope, /*AddToContext=*/false);
       }
     }
   }
@@ -9636,28 +9748,17 @@
   // Checking attributes of current function definition
   // dllimport attribute.
   DLLImportAttr *DA = FD->getAttr<DLLImportAttr>();
-  if (DA && (!FD->getAttr<DLLExportAttr>())) {
+  if (DA && (!FD->hasAttr<DLLExportAttr>())) {
     // dllimport attribute cannot be directly applied to definition.
     // Microsoft accepts dllimport for functions defined within class scope. 
     if (!DA->isInherited() &&
         !(LangOpts.MicrosoftExt && FD->getLexicalDeclContext()->isRecord())) {
       Diag(FD->getLocation(),
            diag::err_attribute_can_be_applied_only_to_symbol_declaration)
-        << "dllimport";
+        << DA;
       FD->setInvalidDecl();
       return D;
     }
-
-    // Visual C++ appears to not think this is an issue, so only issue
-    // a warning when Microsoft extensions are disabled.
-    if (!LangOpts.MicrosoftExt) {
-      // If a symbol previously declared dllimport is later defined, the
-      // attribute is ignored in subsequent references, and a warning is
-      // emitted.
-      Diag(FD->getLocation(),
-           diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
-        << FD->getName() << "dllimport";
-    }
   }
   // We want to attach documentation to original Decl (which might be
   // a function template).
@@ -9699,25 +9800,40 @@
     const_cast<VarDecl*>(NRVOCandidate)->setNRVOVariable(true);
 }
 
-bool Sema::canSkipFunctionBody(Decl *D) {
-  if (!Consumer.shouldSkipFunctionBody(D))
+bool Sema::canDelayFunctionBody(const Declarator &D) {
+  // We can't delay parsing the body of a constexpr function template (yet).
+  if (D.getDeclSpec().isConstexprSpecified())
     return false;
 
-  if (isa<ObjCMethodDecl>(D))
-    return true;
+  // We can't delay parsing the body of a function template with a deduced
+  // return type (yet).
+  if (D.getDeclSpec().containsPlaceholderType()) {
+    // If the placeholder introduces a non-deduced trailing return type,
+    // we can still delay parsing it.
+    if (D.getNumTypeObjects()) {
+      const auto &Outer = D.getTypeObject(D.getNumTypeObjects() - 1);
+      if (Outer.Kind == DeclaratorChunk::Function &&
+          Outer.Fun.hasTrailingReturnType()) {
+        QualType Ty = GetTypeFromParser(Outer.Fun.getTrailingReturnType());
+        return Ty.isNull() || !Ty->isUndeducedType();
+      }
+    }
+    return false;
+  }
 
-  FunctionDecl *FD = 0;
-  if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
-    FD = FTD->getTemplatedDecl();
-  else
-    FD = cast<FunctionDecl>(D);
+  return true;
+}
 
+bool Sema::canSkipFunctionBody(Decl *D) {
   // We cannot skip the body of a function (or function template) which is
   // constexpr, since we may need to evaluate its body in order to parse the
   // rest of the file.
   // We cannot skip the body of a function with an undeduced return type,
   // because any callers of that function need to know the type.
-  return !FD->isConstexpr() && !FD->getResultType()->isUndeducedType();
+  if (const FunctionDecl *FD = D->getAsFunction())
+    if (FD->isConstexpr() || FD->getReturnType()->isUndeducedType())
+      return false;
+  return Consumer.shouldSkipFunctionBody(D);
 }
 
 Decl *Sema::ActOnSkippedFunctionBody(Decl *Decl) {
@@ -9734,12 +9850,7 @@
 
 Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
                                     bool IsInstantiation) {
-  FunctionDecl *FD = 0;
-  FunctionTemplateDecl *FunTmpl = dyn_cast_or_null<FunctionTemplateDecl>(dcl);
-  if (FunTmpl)
-    FD = FunTmpl->getTemplatedDecl();
-  else
-    FD = dyn_cast_or_null<FunctionDecl>(dcl);
+  FunctionDecl *FD = dcl ? dcl->getAsFunction() : 0;
 
   sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
   sema::AnalysisBasedWarnings::Policy *ActivePolicy = 0;
@@ -9748,18 +9859,18 @@
     FD->setBody(Body);
 
     if (getLangOpts().CPlusPlus1y && !FD->isInvalidDecl() && Body &&
-        !FD->isDependentContext() && FD->getResultType()->isUndeducedType()) {
+        !FD->isDependentContext() && FD->getReturnType()->isUndeducedType()) {
       // If the function has a deduced result type but contains no 'return'
       // statements, the result type as written must be exactly 'auto', and
       // the deduced result type is 'void'.
-      if (!FD->getResultType()->getAs<AutoType>()) {
+      if (!FD->getReturnType()->getAs<AutoType>()) {
         Diag(dcl->getLocation(), diag::err_auto_fn_no_return_but_not_auto)
-          << FD->getResultType();
+            << FD->getReturnType();
         FD->setInvalidDecl();
       } else {
         // Substitute 'void' for the 'auto' in the type.
         TypeLoc ResultType = FD->getTypeSourceInfo()->getTypeLoc().
-            IgnoreParens().castAs<FunctionProtoTypeLoc>().getResultLoc();
+            IgnoreParens().castAs<FunctionProtoTypeLoc>().getReturnLoc();
         Context.adjustDeducedFunctionResultType(
             FD, SubstAutoType(ResultType.getType(), Context.VoidTy));
       }
@@ -9783,15 +9894,15 @@
       WP.disableCheckFallThrough();
 
     // MSVC permits the use of pure specifier (=0) on function definition,
-    // defined at class scope, warn about this non standard construct.
+    // defined at class scope, warn about this non-standard construct.
     if (getLangOpts().MicrosoftExt && FD->isPure() && FD->isCanonicalDecl())
       Diag(FD->getLocation(), diag::warn_pure_function_definition);
 
     if (!FD->isInvalidDecl()) {
       DiagnoseUnusedParameters(FD->param_begin(), FD->param_end());
       DiagnoseSizeOfParametersAndReturnValue(FD->param_begin(), FD->param_end(),
-                                             FD->getResultType(), FD);
-      
+                                             FD->getReturnType(), FD);
+
       // If this is a constructor, we need a vtable.
       if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
         MarkVTableUsed(FD->getLocation(), Constructor->getParent());
@@ -9799,7 +9910,7 @@
       // Try to apply the named return value optimization. We have to check
       // if we can do this here because lambdas keep return statements around
       // to deduce an implicit return type.
-      if (getLangOpts().CPlusPlus && FD->getResultType()->isRecordType() &&
+      if (getLangOpts().CPlusPlus && FD->getReturnType()->isRecordType() &&
           !FD->isDependentContext())
         computeNRVO(Body, getCurFunction());
     }
@@ -9812,8 +9923,8 @@
     if (!MD->isInvalidDecl()) {
       DiagnoseUnusedParameters(MD->param_begin(), MD->param_end());
       DiagnoseSizeOfParametersAndReturnValue(MD->param_begin(), MD->param_end(),
-                                             MD->getResultType(), MD);
-      
+                                             MD->getReturnType(), MD);
+
       if (Body)
         computeNRVO(Body, getCurFunction());
     }
@@ -9822,6 +9933,27 @@
         << MD->getSelector().getAsString();
       getCurFunction()->ObjCShouldCallSuper = false;
     }
+    if (getCurFunction()->ObjCWarnForNoDesignatedInitChain) {
+      const ObjCMethodDecl *InitMethod = 0;
+      bool isDesignated =
+          MD->isDesignatedInitializerForTheInterface(&InitMethod);
+      assert(isDesignated && InitMethod);
+      (void)isDesignated;
+      // Don't issue this warning for unavaialable inits.
+      if (!MD->isUnavailable()) {
+        Diag(MD->getLocation(),
+             diag::warn_objc_designated_init_missing_super_call);
+        Diag(InitMethod->getLocation(),
+             diag::note_objc_designated_init_marked_here);
+      }
+      getCurFunction()->ObjCWarnForNoDesignatedInitChain = false;
+    }
+    if (getCurFunction()->ObjCWarnForNoInitDelegation) {
+      // Don't issue this warning for unavaialable inits.
+      if (!MD->isUnavailable())
+        Diag(MD->getLocation(), diag::warn_objc_secondary_init_missing_init_call);
+      getCurFunction()->ObjCWarnForNoInitDelegation = false;
+    }
   } else {
     return 0;
   }
@@ -9948,7 +10080,8 @@
   AttributeFactory attrFactory;
   DeclSpec DS(attrFactory);
   unsigned DiagID;
-  bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID);
+  bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID,
+                                  Context.getPrintingPolicy());
   (void)Error; // Silence warning.
   assert(!Error && "Error setting up implicit decl!");
   SourceLocation NoLoc;
@@ -10012,25 +10145,27 @@
     unsigned FormatIdx;
     bool HasVAListArg;
     if (Context.BuiltinInfo.isPrintfLike(BuiltinID, FormatIdx, HasVAListArg)) {
-      if (!FD->getAttr<FormatAttr>()) {
+      if (!FD->hasAttr<FormatAttr>()) {
         const char *fmt = "printf";
         unsigned int NumParams = FD->getNumParams();
         if (FormatIdx < NumParams && // NumParams may be 0 (e.g. vfprintf)
             FD->getParamDecl(FormatIdx)->getType()->isObjCObjectPointerType())
           fmt = "NSString";
-        FD->addAttr(::new (Context) FormatAttr(FD->getLocation(), Context,
+        FD->addAttr(FormatAttr::CreateImplicit(Context,
                                                &Context.Idents.get(fmt),
                                                FormatIdx+1,
-                                               HasVAListArg ? 0 : FormatIdx+2));
+                                               HasVAListArg ? 0 : FormatIdx+2,
+                                               FD->getLocation()));
       }
     }
     if (Context.BuiltinInfo.isScanfLike(BuiltinID, FormatIdx,
                                              HasVAListArg)) {
-     if (!FD->getAttr<FormatAttr>())
-       FD->addAttr(::new (Context) FormatAttr(FD->getLocation(), Context,
+     if (!FD->hasAttr<FormatAttr>())
+       FD->addAttr(FormatAttr::CreateImplicit(Context,
                                               &Context.Idents.get("scanf"),
                                               FormatIdx+1,
-                                              HasVAListArg ? 0 : FormatIdx+2));
+                                              HasVAListArg ? 0 : FormatIdx+2,
+                                              FD->getLocation()));
     }
 
     // Mark const if we don't care about errno and that is the only
@@ -10038,17 +10173,18 @@
     // IRgen to use LLVM intrinsics for such functions.
     if (!getLangOpts().MathErrno &&
         Context.BuiltinInfo.isConstWithoutErrno(BuiltinID)) {
-      if (!FD->getAttr<ConstAttr>())
-        FD->addAttr(::new (Context) ConstAttr(FD->getLocation(), Context));
+      if (!FD->hasAttr<ConstAttr>())
+        FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
     }
 
     if (Context.BuiltinInfo.isReturnsTwice(BuiltinID) &&
-        !FD->getAttr<ReturnsTwiceAttr>())
-      FD->addAttr(::new (Context) ReturnsTwiceAttr(FD->getLocation(), Context));
-    if (Context.BuiltinInfo.isNoThrow(BuiltinID) && !FD->getAttr<NoThrowAttr>())
-      FD->addAttr(::new (Context) NoThrowAttr(FD->getLocation(), Context));
-    if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->getAttr<ConstAttr>())
-      FD->addAttr(::new (Context) ConstAttr(FD->getLocation(), Context));
+        !FD->hasAttr<ReturnsTwiceAttr>())
+      FD->addAttr(ReturnsTwiceAttr::CreateImplicit(Context,
+                                         FD->getLocation()));
+    if (Context.BuiltinInfo.isNoThrow(BuiltinID) && !FD->hasAttr<NoThrowAttr>())
+      FD->addAttr(NoThrowAttr::CreateImplicit(Context, FD->getLocation()));
+    if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->hasAttr<ConstAttr>())
+      FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
   }
 
   IdentifierInfo *Name = FD->getIdentifier();
@@ -10067,17 +10203,19 @@
   if (Name->isStr("asprintf") || Name->isStr("vasprintf")) {
     // FIXME: asprintf and vasprintf aren't C99 functions. Should they be
     // target-specific builtins, perhaps?
-    if (!FD->getAttr<FormatAttr>())
-      FD->addAttr(::new (Context) FormatAttr(FD->getLocation(), Context,
+    if (!FD->hasAttr<FormatAttr>())
+      FD->addAttr(FormatAttr::CreateImplicit(Context,
                                              &Context.Idents.get("printf"), 2,
-                                             Name->isStr("vasprintf") ? 0 : 3));
+                                             Name->isStr("vasprintf") ? 0 : 3,
+                                             FD->getLocation()));
   }
 
   if (Name->isStr("__CFStringMakeConstantString")) {
     // We already have a __builtin___CFStringMakeConstantString,
     // but builds that use -fno-constant-cfstrings don't go through that.
-    if (!FD->getAttr<FormatArgAttr>())
-      FD->addAttr(::new (Context) FormatArgAttr(FD->getLocation(), Context, 1));
+    if (!FD->hasAttr<FormatArgAttr>())
+      FD->addAttr(FormatArgAttr::CreateImplicit(Context, 1,
+                                                FD->getLocation()));
   }
 }
 
@@ -10140,6 +10278,27 @@
     if (!Context.hasSameType(T, Context.getTagDeclType(tagFromDeclSpec)))
       break;
 
+    // If we've already computed linkage for the anonymous tag, then
+    // adding a typedef name for the anonymous decl can change that
+    // linkage, which might be a serious problem.  Diagnose this as
+    // unsupported and ignore the typedef name.  TODO: we should
+    // pursue this as a language defect and establish a formal rule
+    // for how to handle it.
+    if (tagFromDeclSpec->hasLinkageBeenComputed()) {
+      Diag(D.getIdentifierLoc(), diag::err_typedef_changes_linkage);
+
+      SourceLocation tagLoc = D.getDeclSpec().getTypeSpecTypeLoc();
+      tagLoc = Lexer::getLocForEndOfToken(tagLoc, 0, getSourceManager(),
+                                          getLangOpts());
+
+      llvm::SmallString<40> textToInsert;
+      textToInsert += ' ';
+      textToInsert += D.getIdentifier()->getName();
+      Diag(tagLoc, diag::note_typedef_changes_linkage)
+        << FixItHint::CreateInsertion(tagLoc, textToInsert);
+      break;
+    }
+
     // Otherwise, set this is the anon-decl typedef for the tag.
     tagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
     break;
@@ -10179,7 +10338,7 @@
   if (IsScoped != Prev->isScoped()) {
     Diag(EnumLoc, diag::err_enum_redeclare_scoped_mismatch)
       << Prev->isScoped();
-    Diag(Prev->getLocation(), diag::note_previous_use);
+    Diag(Prev->getLocation(), diag::note_previous_declaration);
     return true;
   }
 
@@ -10188,15 +10347,17 @@
         !Prev->getIntegerType()->isDependentType() &&
         !Context.hasSameUnqualifiedType(EnumUnderlyingTy,
                                         Prev->getIntegerType())) {
+      // TODO: Highlight the underlying type of the redeclaration.
       Diag(EnumLoc, diag::err_enum_redeclare_type_mismatch)
         << EnumUnderlyingTy << Prev->getIntegerType();
-      Diag(Prev->getLocation(), diag::note_previous_use);
+      Diag(Prev->getLocation(), diag::note_previous_declaration)
+          << Prev->getIntegerTypeRange();
       return true;
     }
   } else if (IsFixed != Prev->isFixed()) {
     Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch)
       << Prev->isFixed();
-    Diag(Prev->getLocation(), diag::note_previous_use);
+    Diag(Prev->getLocation(), diag::note_previous_declaration);
     return true;
   }
 
@@ -10276,8 +10437,7 @@
       }
 
       bool previousMismatch = false;
-      for (TagDecl::redecl_iterator I(Previous->redecls_begin()),
-           E(Previous->redecls_end()); I != E; ++I) {
+      for (auto I : Previous->redecls()) {
         if (I->getTagKind() != NewTag) {
           if (!previousMismatch) {
             previousMismatch = true;
@@ -10308,7 +10468,7 @@
       << getRedeclDiagFromTagKind(OldTag);
     Diag(Redecl->getLocation(), diag::note_previous_use);
 
-    // If there is a previous defintion, suggest a fix-it.
+    // If there is a previous definition, suggest a fix-it.
     if (Previous->getDefinition()) {
         Diag(NewTagLoc, diag::note_struct_class_suggestion)
           << getRedeclDiagFromTagKind(Redecl->getTagKind())
@@ -10325,6 +10485,9 @@
 /// former case, Name will be non-null.  In the later case, Name will be null.
 /// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
 /// reference/declaration/definition of a tag.
+///
+/// IsTypeSpecifier is true if this is a type-specifier (or
+/// trailing-type-specifier) other than one in an alias-declaration.
 Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
                      SourceLocation KWLoc, CXXScopeSpec &SS,
                      IdentifierInfo *Name, SourceLocation NameLoc,
@@ -10334,7 +10497,8 @@
                      bool &OwnedDecl, bool &IsDependent,
                      SourceLocation ScopedEnumKWLoc,
                      bool ScopedEnumUsesClassTag,
-                     TypeResult UnderlyingType) {
+                     TypeResult UnderlyingType,
+                     bool IsTypeSpecifier) {
   // If this is not a definition, it must have a name.
   IdentifierInfo *OrigName = Name;
   assert((Name != 0 || TUK == TUK_Definition) &&
@@ -10412,7 +10576,7 @@
                                           UPPC_FixedUnderlyingType))
         EnumUnderlying = Context.IntTy.getTypePtr();
 
-    } else if (getLangOpts().MicrosoftMode)
+    } else if (getLangOpts().MSVCCompat)
       // Microsoft enums are always of int type.
       EnumUnderlying = Context.IntTy.getTypePtr();
   }
@@ -10634,7 +10798,9 @@
   }
 
   if (!Previous.empty()) {
-    NamedDecl *PrevDecl = (*Previous.begin())->getUnderlyingDecl();
+    NamedDecl *PrevDecl = Previous.getFoundDecl();
+    NamedDecl *DirectPrevDecl =
+        getLangOpts().MSVCCompat ? *Previous.begin() : PrevDecl;
 
     // It's okay to have a tag decl in the same scope as a typedef
     // which hides a tag decl in the same scope.  Finding this
@@ -10666,7 +10832,8 @@
       // in the same scope (so that the definition/declaration completes or
       // rementions the tag), reuse the decl.
       if (TUK == TUK_Reference || TUK == TUK_Friend ||
-          isDeclInScope(PrevDecl, SearchDC, S, isExplicitSpecialization)) {
+          isDeclInScope(DirectPrevDecl, SearchDC, S,
+                        SS.isNotEmpty() || isExplicitSpecialization)) {
         // Make sure that this wasn't declared as an enum and now used as a
         // struct or something similar.
         if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
@@ -10709,7 +10876,7 @@
 
           QualType EnumUnderlyingTy;
           if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
-            EnumUnderlyingTy = TI->getType();
+            EnumUnderlyingTy = TI->getType().getUnqualifiedType();
           else if (const Type *T = EnumUnderlying.dyn_cast<const Type*>())
             EnumUnderlyingTy = QualType(T, 0);
 
@@ -10827,8 +10994,8 @@
         Invalid = true;
 
       // Otherwise, only diagnose if the declaration is in scope.
-      } else if (!isDeclInScope(PrevDecl, SearchDC, S, 
-                                isExplicitSpecialization)) {
+      } else if (!isDeclInScope(PrevDecl, SearchDC, S,
+                                SS.isNotEmpty() || isExplicitSpecialization)) {
         // do nothing
 
       // Diagnose implicit declarations introduced by elaborated types.
@@ -10904,7 +11071,7 @@
         Diag(Def->getLocation(), diag::note_previous_definition);
       } else {
         unsigned DiagID = diag::ext_forward_ref_enum;
-        if (getLangOpts().MicrosoftMode)
+        if (getLangOpts().MSVCCompat)
           DiagID = diag::ext_ms_forward_ref_enum;
         else if (getLangOpts().CPlusPlus)
           DiagID = diag::err_forward_ref_enum;
@@ -10944,6 +11111,14 @@
                                cast_or_null<RecordDecl>(PrevDecl));
   }
 
+  // C++11 [dcl.type]p3:
+  //   A type-specifier-seq shall not define a class or enumeration [...].
+  if (getLangOpts().CPlusPlus && IsTypeSpecifier && TUK == TUK_Definition) {
+    Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
+      << Context.getTagDeclType(New);
+    Invalid = true;
+  }
+
   // Maybe add qualifier info.
   if (SS.isNotEmpty()) {
     if (SS.isSet()) {
@@ -11007,10 +11182,14 @@
   if (Attr)
     ProcessDeclAttributeList(S, New, Attr);
 
-  // If we're declaring or defining a tag in function prototype scope
-  // in C, note that this type can only be used within the function.
-  if (Name && S->isFunctionPrototypeScope() && !getLangOpts().CPlusPlus)
+  // If we're declaring or defining a tag in function prototype scope in C,
+  // note that this type can only be used within the function and add it to
+  // the list of decls to inject into the function definition scope.
+  if (!getLangOpts().CPlusPlus && (Name || Kind == TTK_Enum) &&
+      getNonFieldDeclScope(S)->isFunctionPrototypeScope()) {
     Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
+    DeclsInPrototypeScope.push_back(New);
+  }
 
   // Set the lexical context. If the tag has a C++ scope specifier, the
   // lexical context will be different from the semantic context.
@@ -11060,12 +11239,6 @@
         II->isStr("FILE"))
       Context.setFILEDecl(New);
 
-  // If we were in function prototype scope (and not in C++ mode), add this
-  // tag to the list of decls to inject into the function definition scope.
-  if (S->isFunctionPrototypeScope() && !getLangOpts().CPlusPlus &&
-      InFunctionDeclarator && Name)
-    DeclsInPrototypeScope.push_back(New);
-
   if (PrevDecl)
     mergeDeclAttributes(New, PrevDecl);
 
@@ -11255,7 +11428,8 @@
   if (!FieldTy->isDependentType()) {
     uint64_t TypeSize = Context.getTypeSize(FieldTy);
     if (Value.getZExtValue() > TypeSize) {
-      if (!getLangOpts().CPlusPlus || IsMsStruct) {
+      if (!getLangOpts().CPlusPlus || IsMsStruct ||
+          Context.getTargetInfo().getCXXABI().isMicrosoft()) {
         if (FieldName) 
           return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_size)
             << FieldName << (unsigned)Value.getZExtValue() 
@@ -11500,6 +11674,12 @@
     }
   }
 
+  // C++11 [class.union]p8 (DR1460):
+  //   At most one variant member of a union may have a
+  //   brace-or-equal-initializer.
+  if (InitStyle != ICIS_NoInit)
+    checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Record), Loc);
+
   FieldDecl *NewFD = FieldDecl::Create(Context, Record, TSSL, Loc, II, T, TInfo,
                                        BitWidth, Mutable, InitStyle);
   if (InvalidDecl)
@@ -11603,8 +11783,9 @@
           SourceLocation Loc = FD->getLocation();
           if (getSourceManager().isInSystemHeader(Loc)) {
             if (!FD->hasAttr<UnavailableAttr>())
-              FD->addAttr(new (Context) UnavailableAttr(Loc, Context,
-                                  "this system field has retaining ownership"));
+              FD->addAttr(UnavailableAttr::CreateImplicit(Context,
+                                  "this system field has retaining ownership",
+                                  Loc));
             return false;
           }
         }
@@ -11813,9 +11994,8 @@
   // members of anonymous structs and unions in the total.
   unsigned NumNamedMembers = 0;
   if (Record) {
-    for (RecordDecl::decl_iterator i = Record->decls_begin(),
-                                   e = Record->decls_end(); i != e; i++) {
-      if (IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(*i))
+    for (const auto *I : Record->decls()) {
+      if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I))
         if (IFD->getDeclName())
           ++NumNamedMembers;
     }
@@ -11902,9 +12082,14 @@
         Diag(FD->getLocation(), diag::ext_c99_flexible_array_member)
           << FD->getDeclName() << Record->getTagKind();
 
-      if (!FD->getType()->isDependentType() &&
-          !Context.getBaseElementType(FD->getType()).isPODType(Context)) {
-        Diag(FD->getLocation(), diag::err_flexible_array_has_nonpod_type)
+      // If the element type has a non-trivial destructor, we would not
+      // implicitly destroy the elements, so disallow it for now.
+      //
+      // FIXME: GCC allows this. We should probably either implicitly delete
+      // the destructor of the containing class, or just allow this.
+      QualType BaseElem = Context.getBaseElementType(FD->getType());
+      if (!BaseElem->isDependentType() && BaseElem.isDestructedType()) {
+        Diag(FD->getLocation(), diag::err_flexible_array_has_nontrivial_dtor)
           << FD->getDeclName() << FD->getType();
         FD->setInvalidDecl();
         EnclosingDecl->setInvalidDecl();
@@ -11972,8 +12157,9 @@
         SourceLocation loc = FD->getLocation();
         if (getSourceManager().isInSystemHeader(loc)) {
           if (!FD->hasAttr<UnavailableAttr>()) {
-            FD->addAttr(new (Context) UnavailableAttr(loc, Context,
-                              "this system field has retaining ownership"));
+            FD->addAttr(UnavailableAttr::CreateImplicit(Context,
+                              "this system field has retaining ownership",
+                              loc));
           }
         } else {
           Diag(FD->getLocation(), diag::err_arc_objc_object_in_tag) 
@@ -12021,12 +12207,6 @@
             if (getLangOpts().CPlusPlus11)
               AdjustDestructorExceptionSpec(CXXRecord,
                                             CXXRecord->getDestructor());
-
-            // The Microsoft ABI requires that we perform the destructor body
-            // checks (i.e. operator delete() lookup) at every declaration, as
-            // any translation unit may need to emit a deleting destructor.
-            if (Context.getTargetInfo().getCXXABI().isMicrosoft())
-              CheckDestructor(CXXRecord->getDestructor());
           }
 
           // Add any implicitly-declared members to this class.
@@ -12078,9 +12258,15 @@
     if (!Completed)
       Record->completeDefinition();
 
-    if (Record->hasAttrs())
+    if (Record->hasAttrs()) {
       CheckAlignasUnderalignment(Record);
 
+      if (const MSInheritanceAttr *IA = Record->getAttr<MSInheritanceAttr>())
+        checkMSInheritanceAttrOnDefinition(cast<CXXRecordDecl>(Record),
+                                           IA->getRange(), IA->getBestCase(),
+                                           IA->getSemanticSpelling());
+    }
+
     // Check if the structure/union declaration is a type that can have zero
     // size in C. For C this is a language extension, for C++ it may cause
     // compatibility problems.
@@ -12175,10 +12361,7 @@
             Diag(ClsIvar->getLocation(), diag::note_previous_definition);
             continue;
           }
-          for (ObjCInterfaceDecl::known_extensions_iterator
-                 Ext = IDecl->known_extensions_begin(),
-                 ExtEnd = IDecl->known_extensions_end();
-               Ext != ExtEnd; ++Ext) {
+          for (const auto *Ext : IDecl->known_extensions()) {
             if (const ObjCIvarDecl *ClsExtIvar
                   = Ext->getIvarDecl(ClsFields[i]->getIdentifier())) {
               Diag(ClsFields[i]->getLocation(), 
@@ -12262,7 +12445,7 @@
     else {
       SourceLocation ExpLoc;
       if (getLangOpts().CPlusPlus11 && Enum->isFixed() &&
-          !getLangOpts().MicrosoftMode) {
+          !getLangOpts().MSVCCompat) {
         // C++11 [dcl.enum]p5: If the underlying type is fixed, [...] the
         // constant-expression in the enumerator-definition shall be a converted
         // constant expression of the underlying type.
@@ -12287,7 +12470,7 @@
           // we perform a non-narrowing conversion as part of converted constant
           // expression checking.
           if (!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
-            if (getLangOpts().MicrosoftMode) {
+            if (getLangOpts().MSVCCompat) {
               Diag(IdLoc, diag::ext_enumerator_too_large) << EltTy;
               Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).take();
             } else
@@ -12371,7 +12554,7 @@
               << EnumVal.toString(10)
               << EltTy;
           else
-            Diag(IdLoc, diag::warn_enumerator_too_large)
+            Diag(IdLoc, diag::ext_enumerator_increment_too_large)
               << EnumVal.toString(10);
         } else {
           EltTy = T;
@@ -12729,7 +12912,7 @@
   // The C99 rule is modified by a gcc extension 
   QualType BestPromotionType;
 
-  bool Packed = Enum->getAttr<PackedAttr>() ? true : false;
+  bool Packed = Enum->hasAttr<PackedAttr>();
   // -fshort-enums is the equivalent to specifying the packed attribute on all
   // enum definitions.
   if (LangOpts.ShortEnums)
@@ -12769,7 +12952,7 @@
         BestWidth = Context.getTargetInfo().getLongLongWidth();
 
         if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
-          Diag(Enum->getLocation(), diag::warn_enum_too_large);
+          Diag(Enum->getLocation(), diag::ext_enum_too_large);
         BestType = Context.LongLongTy;
       }
     }
@@ -12874,11 +13057,6 @@
   Enum->completeDefinition(BestType, BestPromotionType,
                            NumPositiveBits, NumNegativeBits);
 
-  // If we're declaring a function, ensure this decl isn't forgotten about -
-  // it needs to go into the function scope.
-  if (InFunctionDeclarator)
-    DeclsInPrototypeScope.push_back(Enum);
-
   CheckForDuplicateEnumValues(*this, Elements, Enum, EnumType);
 
   // Now that the enum type is defined, ensure it's not been underaligned.
@@ -12898,6 +13076,36 @@
   return New;
 }
 
+static void checkModuleImportContext(Sema &S, Module *M,
+                                     SourceLocation ImportLoc,
+                                     DeclContext *DC) {
+  if (auto *LSD = dyn_cast<LinkageSpecDecl>(DC)) {
+    switch (LSD->getLanguage()) {
+    case LinkageSpecDecl::lang_c:
+      if (!M->IsExternC) {
+        S.Diag(ImportLoc, diag::err_module_import_in_extern_c)
+          << M->getFullModuleName();
+        S.Diag(LSD->getLocStart(), diag::note_module_import_in_extern_c);
+        return;
+      }
+      break;
+    case LinkageSpecDecl::lang_cxx:
+      break;
+    }
+    DC = LSD->getParent();
+  }
+
+  while (isa<LinkageSpecDecl>(DC))
+    DC = DC->getParent();
+  if (!isa<TranslationUnitDecl>(DC)) {
+    S.Diag(ImportLoc, diag::err_module_import_not_at_top_level)
+      << M->getFullModuleName() << DC;
+    S.Diag(cast<Decl>(DC)->getLocStart(),
+           diag::note_module_import_not_at_top_level)
+      << DC;
+  }
+}
+
 DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc, 
                                    SourceLocation ImportLoc, 
                                    ModuleIdPath Path) {
@@ -12906,7 +13114,9 @@
                                                 /*IsIncludeDirective=*/false);
   if (!Mod)
     return true;
-  
+
+  checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
+
   SmallVector<SourceLocation, 2> IdentifierLocs;
   Module *ModCheck = Mod;
   for (unsigned I = 0, N = Path.size(); I != N; ++I) {
@@ -12928,6 +13138,8 @@
 }
 
 void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
+  checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext);
+
   // FIXME: Should we synthesize an ImportDecl here?
   PP.getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc,
                                          /*Complain=*/true);
@@ -12953,8 +13165,8 @@
                                       SourceLocation AliasNameLoc) {
   Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc,
                                     LookupOrdinaryName);
-  AsmLabelAttr *Attr =
-     ::new (Context) AsmLabelAttr(AliasNameLoc, Context, AliasName->getName());
+  AsmLabelAttr *Attr = ::new (Context) AsmLabelAttr(AliasNameLoc, Context,
+                                                    AliasName->getName(), 0);
 
   if (PrevDecl) 
     PrevDecl->addAttr(Attr);
@@ -12969,7 +13181,7 @@
   Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName);
 
   if (PrevDecl) {
-    PrevDecl->addAttr(::new (Context) WeakAttr(PragmaLoc, Context));
+    PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc));
   } else {
     (void)WeakUndeclaredIdentifiers.insert(
       std::pair<IdentifierInfo*,WeakInfo>
@@ -13002,5 +13214,22 @@
 
 AvailabilityResult Sema::getCurContextAvailability() const {
   const Decl *D = cast<Decl>(getCurObjCLexicalContext());
+  // If we are within an Objective-C method, we should consult
+  // both the availability of the method as well as the
+  // enclosing class.  If the class is (say) deprecated,
+  // the entire method is considered deprecated from the
+  // purpose of checking if the current context is deprecated.
+  if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+    AvailabilityResult R = MD->getAvailability();
+    if (R != AR_Available)
+      return R;
+    D = MD->getClassInterface();
+  }
+  // If we are within an Objective-c @implementation, it
+  // gets the same availability context as the @interface.
+  else if (const ObjCImplementationDecl *ID =
+            dyn_cast<ObjCImplementationDecl>(D)) {
+    D = ID->getClassInterface();
+  }
   return D->getAvailability();
 }
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 318575a..38b3c45 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -12,7 +12,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Sema/SemaInternal.h"
-#include "TargetAttributesSema.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/CXXInheritance.h"
 #include "clang/AST/DeclCXX.h"
@@ -32,86 +31,23 @@
 using namespace clang;
 using namespace sema;
 
-/// These constants match the enumerated choices of
-/// warn_attribute_wrong_decl_type and err_attribute_wrong_decl_type.
-enum AttributeDeclKind {
-  ExpectedFunction,
-  ExpectedUnion,
-  ExpectedVariableOrFunction,
-  ExpectedFunctionOrMethod,
-  ExpectedParameter,
-  ExpectedFunctionMethodOrBlock,
-  ExpectedFunctionMethodOrClass,
-  ExpectedFunctionMethodOrParameter,
-  ExpectedClass,
-  ExpectedVariable,
-  ExpectedMethod,
-  ExpectedVariableFunctionOrLabel,
-  ExpectedFieldOrGlobalVar,
-  ExpectedStruct,
-  ExpectedVariableFunctionOrTag,
-  ExpectedTLSVar,
-  ExpectedVariableOrField,
-  ExpectedVariableFieldOrTag,
-  ExpectedTypeOrNamespace,
-  ExpectedObjectiveCInterface,
-  ExpectedMethodOrProperty,
-  ExpectedStructOrUnion,
-  ExpectedStructOrUnionOrClass
-};
+namespace AttributeLangSupport {
+  enum LANG {
+    C,
+    Cpp,
+    ObjC
+  };
+}
 
 //===----------------------------------------------------------------------===//
 //  Helper functions
 //===----------------------------------------------------------------------===//
 
-static const FunctionType *getFunctionType(const Decl *D,
-                                           bool blocksToo = true) {
-  QualType Ty;
-  if (const ValueDecl *decl = dyn_cast<ValueDecl>(D))
-    Ty = decl->getType();
-  else if (const FieldDecl *decl = dyn_cast<FieldDecl>(D))
-    Ty = decl->getType();
-  else if (const TypedefNameDecl* decl = dyn_cast<TypedefNameDecl>(D))
-    Ty = decl->getUnderlyingType();
-  else
-    return 0;
-
-  if (Ty->isFunctionPointerType())
-    Ty = Ty->getAs<PointerType>()->getPointeeType();
-  else if (blocksToo && Ty->isBlockPointerType())
-    Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
-
-  return Ty->getAs<FunctionType>();
-}
-
-// FIXME: We should provide an abstraction around a method or function
-// to provide the following bits of information.
-
-/// isFunction - Return true if the given decl has function
-/// type (function or function-typed variable).
-static bool isFunction(const Decl *D) {
-  return getFunctionType(D, false) != NULL;
-}
-
 /// isFunctionOrMethod - Return true if the given decl has function
 /// type (function or function-typed variable) or an Objective-C
 /// method.
 static bool isFunctionOrMethod(const Decl *D) {
-  return isFunction(D) || isa<ObjCMethodDecl>(D);
-}
-
-/// isFunctionOrMethodOrBlock - Return true if the given decl has function
-/// type (function or function-typed variable) or an Objective-C
-/// method or a block.
-static bool isFunctionOrMethodOrBlock(const Decl *D) {
-  if (isFunctionOrMethod(D))
-    return true;
-  // check for block is more involved.
-  if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
-    QualType Ty = V->getType();
-    return Ty->isBlockPointerType();
-  }
-  return isa<BlockDecl>(D);
+  return (D->getFunctionType() != NULL) || isa<ObjCMethodDecl>(D);
 }
 
 /// Return true if the given decl has a declarator that should have
@@ -126,28 +62,25 @@
 /// information. This decl should have already passed
 /// isFunctionOrMethod or isFunctionOrMethodOrBlock.
 static bool hasFunctionProto(const Decl *D) {
-  if (const FunctionType *FnTy = getFunctionType(D))
+  if (const FunctionType *FnTy = D->getFunctionType())
     return isa<FunctionProtoType>(FnTy);
-  else {
-    assert(isa<ObjCMethodDecl>(D) || isa<BlockDecl>(D));
-    return true;
-  }
+  return isa<ObjCMethodDecl>(D) || isa<BlockDecl>(D);
 }
 
-/// getFunctionOrMethodNumArgs - Return number of function or method
-/// arguments. It is an error to call this on a K&R function (use
+/// getFunctionOrMethodNumParams - Return number of function or method
+/// parameters. It is an error to call this on a K&R function (use
 /// hasFunctionProto first).
-static unsigned getFunctionOrMethodNumArgs(const Decl *D) {
-  if (const FunctionType *FnTy = getFunctionType(D))
-    return cast<FunctionProtoType>(FnTy)->getNumArgs();
+static unsigned getFunctionOrMethodNumParams(const Decl *D) {
+  if (const FunctionType *FnTy = D->getFunctionType())
+    return cast<FunctionProtoType>(FnTy)->getNumParams();
   if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
     return BD->getNumParams();
   return cast<ObjCMethodDecl>(D)->param_size();
 }
 
-static QualType getFunctionOrMethodArgType(const Decl *D, unsigned Idx) {
-  if (const FunctionType *FnTy = getFunctionType(D))
-    return cast<FunctionProtoType>(FnTy)->getArgType(Idx);
+static QualType getFunctionOrMethodParamType(const Decl *D, unsigned Idx) {
+  if (const FunctionType *FnTy = D->getFunctionType())
+    return cast<FunctionProtoType>(FnTy)->getParamType(Idx);
   if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
     return BD->getParamDecl(Idx)->getType();
 
@@ -155,13 +88,13 @@
 }
 
 static QualType getFunctionOrMethodResultType(const Decl *D) {
-  if (const FunctionType *FnTy = getFunctionType(D))
-    return cast<FunctionProtoType>(FnTy)->getResultType();
-  return cast<ObjCMethodDecl>(D)->getResultType();
+  if (const FunctionType *FnTy = D->getFunctionType())
+    return cast<FunctionProtoType>(FnTy)->getReturnType();
+  return cast<ObjCMethodDecl>(D)->getReturnType();
 }
 
 static bool isFunctionOrMethodVariadic(const Decl *D) {
-  if (const FunctionType *FnTy = getFunctionType(D)) {
+  if (const FunctionType *FnTy = D->getFunctionType()) {
     const FunctionProtoType *proto = cast<FunctionProtoType>(FnTy);
     return proto->isVariadic();
   } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
@@ -227,30 +160,63 @@
   return true;
 }
 
-
 /// \brief Check if the attribute has at least as many args as Num. May
 /// output an error.
 static bool checkAttributeAtLeastNumArgs(Sema &S, const AttributeList &Attr,
                                          unsigned Num) {
   if (getNumAttributeArgs(Attr) < Num) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_too_few_arguments) << Num;
+    S.Diag(Attr.getLoc(), diag::err_attribute_too_few_arguments)
+      << Attr.getName() << Num;
     return false;
   }
 
   return true;
 }
 
-/// \brief Check if IdxExpr is a valid argument index for a function or
+/// \brief If Expr is a valid integer constant, get the value of the integer
+/// expression and return success or failure. May output an error.
+static bool checkUInt32Argument(Sema &S, const AttributeList &Attr,
+                                const Expr *Expr, uint32_t &Val,
+                                unsigned Idx = UINT_MAX) {
+  llvm::APSInt I(32);
+  if (Expr->isTypeDependent() || Expr->isValueDependent() ||
+      !Expr->isIntegerConstantExpr(I, S.Context)) {
+    if (Idx != UINT_MAX)
+      S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+        << Attr.getName() << Idx << AANT_ArgumentIntegerConstant
+        << Expr->getSourceRange();
+    else
+      S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+        << Attr.getName() << AANT_ArgumentIntegerConstant
+        << Expr->getSourceRange();
+    return false;
+  }
+  Val = (uint32_t)I.getZExtValue();
+  return true;
+}
+
+/// \brief Diagnose mutually exclusive attributes when present on a given
+/// declaration. Returns true if diagnosed.
+template <typename AttrTy>
+static bool checkAttrMutualExclusion(Sema &S, Decl *D,
+                                     const AttributeList &Attr) {
+  if (AttrTy *A = D->getAttr<AttrTy>()) {
+    S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+      << Attr.getName() << A;
+    return true;
+  }
+  return false;
+}
+
+/// \brief Check if IdxExpr is a valid parameter index for a function or
 /// instance method D.  May output an error.
 ///
 /// \returns true if IdxExpr is a valid index.
-static bool checkFunctionOrMethodArgumentIndex(Sema &S, const Decl *D,
-                                               StringRef AttrName,
-                                               SourceLocation AttrLoc,
-                                               unsigned AttrArgNum,
-                                               const Expr *IdxExpr,
-                                               uint64_t &Idx)
-{
+static bool checkFunctionOrMethodParameterIndex(Sema &S, const Decl *D,
+                                                const AttributeList &Attr,
+                                                unsigned AttrArgNum,
+                                                const Expr *IdxExpr,
+                                                uint64_t &Idx) {
   assert(isFunctionOrMethod(D));
 
   // In C++ the implicit 'this' function parameter also counts.
@@ -258,30 +224,30 @@
   bool HP = hasFunctionProto(D);
   bool HasImplicitThisParam = isInstanceMethod(D);
   bool IV = HP && isFunctionOrMethodVariadic(D);
-  unsigned NumArgs = (HP ? getFunctionOrMethodNumArgs(D) : 0) +
-                     HasImplicitThisParam;
+  unsigned NumParams =
+      (HP ? getFunctionOrMethodNumParams(D) : 0) + HasImplicitThisParam;
 
   llvm::APSInt IdxInt;
   if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
       !IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) {
-    std::string Name = std::string("'") + AttrName.str() + std::string("'");
-    S.Diag(AttrLoc, diag::err_attribute_argument_n_type) << Name.c_str()
-      << AttrArgNum << AANT_ArgumentIntegerConstant << IdxExpr->getSourceRange();
+    S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+      << Attr.getName() << AttrArgNum << AANT_ArgumentIntegerConstant
+      << IdxExpr->getSourceRange();
     return false;
   }
 
   Idx = IdxInt.getLimitedValue();
-  if (Idx < 1 || (!IV && Idx > NumArgs)) {
-    S.Diag(AttrLoc, diag::err_attribute_argument_out_of_bounds)
-      << AttrName << AttrArgNum << IdxExpr->getSourceRange();
+  if (Idx < 1 || (!IV && Idx > NumParams)) {
+    S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+      << Attr.getName() << AttrArgNum << IdxExpr->getSourceRange();
     return false;
   }
   Idx--; // Convert to zero-based.
   if (HasImplicitThisParam) {
     if (Idx == 0) {
-      S.Diag(AttrLoc,
+      S.Diag(Attr.getLoc(),
              diag::err_attribute_invalid_implicit_this_argument)
-        << AttrName << IdxExpr->getSourceRange();
+        << Attr.getName() << IdxExpr->getSourceRange();
       return false;
     }
     --Idx;
@@ -326,17 +292,13 @@
   return true;
 }
 
-///
-/// \brief Check if passed in Decl is a field or potentially shared global var
-/// \return true if the Decl is a field or potentially shared global variable
-///
-static bool mayBeSharedVariable(const Decl *D) {
-  if (isa<FieldDecl>(D))
-    return true;
-  if (const VarDecl *vd = dyn_cast<VarDecl>(D))
-    return vd->hasGlobalStorage() && !vd->getTLSKind();
-
-  return false;
+/// \brief Applies the given attribute to the Decl without performing any
+/// additional semantic checking.
+template <typename AttrType>
+static void handleSimpleAttribute(Sema &S, Decl *D,
+                                  const AttributeList &Attr) {
+  D->addAttr(::new (S.Context) AttrType(Attr.getRange(), S.Context,
+                                        Attr.getAttributeSpellingListIndex()));
 }
 
 /// \brief Check if the passed-in expression is of type int or bool.
@@ -367,28 +329,24 @@
 /// \return true if the Decl is a pointer type; false otherwise
 static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D,
                                        const AttributeList &Attr) {
-  if (const ValueDecl *vd = dyn_cast<ValueDecl>(D)) {
-    QualType QT = vd->getType();
-    if (QT->isAnyPointerType())
+  const ValueDecl *vd = cast<ValueDecl>(D);
+  QualType QT = vd->getType();
+  if (QT->isAnyPointerType())
+    return true;
+
+  if (const RecordType *RT = QT->getAs<RecordType>()) {
+    // If it's an incomplete type, it could be a smart pointer; skip it.
+    // (We don't want to force template instantiation if we can avoid it,
+    // since that would alter the order in which templates are instantiated.)
+    if (RT->isIncompleteType())
       return true;
 
-    if (const RecordType *RT = QT->getAs<RecordType>()) {
-      // If it's an incomplete type, it could be a smart pointer; skip it.
-      // (We don't want to force template instantiation if we can avoid it,
-      // since that would alter the order in which templates are instantiated.)
-      if (RT->isIncompleteType())
-        return true;
-
-      if (threadSafetyCheckIsSmartPointer(S, RT))
-        return true;
-    }
-
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_pointer)
-      << Attr.getName()->getName() << QT;
-  } else {
-    S.Diag(Attr.getLoc(), diag::err_attribute_can_be_applied_only_to_value_decl)
-      << Attr.getName();
+    if (threadSafetyCheckIsSmartPointer(S, RT))
+      return true;
   }
+
+  S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_pointer)
+    << Attr.getName() << QT;
   return false;
 }
 
@@ -405,65 +363,78 @@
   return 0;
 }
 
+static bool checkRecordTypeForCapability(Sema &S, const AttributeList &Attr,
+                                         QualType Ty) {
+  const RecordType *RT = getRecordType(Ty);
 
-static bool checkBaseClassIsLockableCallback(const CXXBaseSpecifier *Specifier,
-                                             CXXBasePath &Path, void *Unused) {
-  const RecordType *RT = Specifier->getType()->getAs<RecordType>();
-  if (RT->getDecl()->getAttr<LockableAttr>())
+  if (!RT)
+    return false;
+
+  // Don't check for the capability if the class hasn't been defined yet.
+  if (RT->isIncompleteType())
     return true;
+
+  // Allow smart pointers to be used as capability objects.
+  // FIXME -- Check the type that the smart pointer points to.
+  if (threadSafetyCheckIsSmartPointer(S, RT))
+    return true;
+
+  // Check if the record itself has a capability.
+  RecordDecl *RD = RT->getDecl();
+  if (RD->hasAttr<CapabilityAttr>())
+    return true;
+
+  // Else check if any base classes have a capability.
+  if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+    CXXBasePaths BPaths(false, false);
+    if (CRD->lookupInBases([](const CXXBaseSpecifier *BS, CXXBasePath &P,
+      void *) {
+      return BS->getType()->getAs<RecordType>()
+        ->getDecl()->hasAttr<CapabilityAttr>();
+    }, 0, BPaths))
+      return true;
+  }
   return false;
 }
 
+static bool checkTypedefTypeForCapability(Sema &S, const AttributeList &Attr,
+                                          QualType Ty) {
+  const auto *TD = Ty->getAs<TypedefType>();
+  if (!TD)
+    return false;
 
-/// \brief Thread Safety Analysis: Checks that the passed in RecordType
-/// resolves to a lockable object.
-static void checkForLockableRecord(Sema &S, Decl *D, const AttributeList &Attr,
-                                   QualType Ty) {
-  const RecordType *RT = getRecordType(Ty);
+  TypedefNameDecl *TN = TD->getDecl();
+  if (!TN)
+    return false;
 
-  // Warn if could not get record type for this argument.
-  if (!RT) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_argument_not_class)
-      << Attr.getName() << Ty.getAsString();
-    return;
-  }
-
-  // Don't check for lockable if the class hasn't been defined yet.
-  if (RT->isIncompleteType())
-    return;
-
-  // Allow smart pointers to be used as lockable objects.
-  // FIXME -- Check the type that the smart pointer points to.
-  if (threadSafetyCheckIsSmartPointer(S, RT))
-    return;
-
-  // Check if the type is lockable.
-  RecordDecl *RD = RT->getDecl();
-  if (RD->getAttr<LockableAttr>())
-    return;
-
-  // Else check if any base classes are lockable.
-  if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
-    CXXBasePaths BPaths(false, false);
-    if (CRD->lookupInBases(checkBaseClassIsLockableCallback, 0, BPaths))
-      return;
-  }
-
-  S.Diag(Attr.getLoc(), diag::warn_thread_attribute_argument_not_lockable)
-    << Attr.getName() << Ty.getAsString();
+  return TN->hasAttr<CapabilityAttr>();
 }
 
-/// \brief Thread Safety Analysis: Checks that all attribute arguments, starting
-/// from Sidx, resolve to a lockable object.
+/// \brief Checks that the passed in type is qualified as a capability. This
+/// type can either be a struct, or a typedef to a built-in type (such as int).
+static void checkForCapability(Sema &S, const AttributeList &Attr,
+                               QualType Ty) {
+  if (checkTypedefTypeForCapability(S, Attr, Ty))
+    return;
+
+  if (checkRecordTypeForCapability(S, Attr, Ty))
+    return;
+
+  S.Diag(Attr.getLoc(), diag::warn_thread_attribute_argument_not_lockable)
+    << Attr.getName() << Ty;
+}
+
+/// \brief Checks that all attribute arguments, starting from Sidx, resolve to
+/// a capability object.
 /// \param Sidx The attribute argument index to start checking with.
 /// \param ParamIdxOk Whether an argument can be indexing into a function
 /// parameter list.
-static void checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
-                                         const AttributeList &Attr,
-                                         SmallVectorImpl<Expr*> &Args,
-                                         int Sidx = 0,
-                                         bool ParamIdxOk = false) {
-  for(unsigned Idx = Sidx; Idx < Attr.getNumArgs(); ++Idx) {
+static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
+                                           const AttributeList &Attr,
+                                           SmallVectorImpl<Expr *> &Args,
+                                           int Sidx = 0,
+                                           bool ParamIdxOk = false) {
+  for (unsigned Idx = Sidx; Idx < Attr.getNumArgs(); ++Idx) {
     Expr *ArgExp = Attr.getArgAsExpr(Idx);
 
     if (ArgExp->isTypeDependent()) {
@@ -499,7 +470,7 @@
           if (DRE->getDecl()->isCXXInstanceMember())
             ArgTy = DRE->getDecl()->getType();
 
-    // First see if we can just cast to record type, or point to record type.
+    // First see if we can just cast to record type, or pointer to record type.
     const RecordType *RT = getRecordType(ArgTy);
 
     // Now check if we index into a record type function param.
@@ -520,7 +491,7 @@
       }
     }
 
-    checkForLockableRecord(S, D, Attr, ArgTy);
+    checkForCapability(S, Attr, ArgTy);
 
     Args.push_back(ArgExp);
   }
@@ -534,38 +505,8 @@
 // least add some helper functions to check most argument patterns (#
 // and types of args).
 
-enum ThreadAttributeDeclKind {
-  ThreadExpectedFieldOrGlobalVar,
-  ThreadExpectedFunctionOrMethod,
-  ThreadExpectedClassOrStruct
-};
-
-static bool checkGuardedVarAttrCommon(Sema &S, Decl *D,
-                                      const AttributeList &Attr) {
-  // D must be either a member field or global (potentially shared) variable.
-  if (!mayBeSharedVariable(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFieldOrGlobalVar;
-    return false;
-  }
-
-  return true;
-}
-
-static void handleGuardedVarAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!checkGuardedVarAttrCommon(S, D, Attr))
-    return;
-
-  D->addAttr(::new (S.Context)
-             GuardedVarAttr(Attr.getRange(), S.Context,
-                            Attr.getAttributeSpellingListIndex()));
-}
-
 static void handlePtGuardedVarAttr(Sema &S, Decl *D,
                                    const AttributeList &Attr) {
-  if (!checkGuardedVarAttrCommon(S, D, Attr))
-    return;
-
   if (!threadSafetyCheckIsPointer(S, D, Attr))
     return;
 
@@ -577,16 +518,9 @@
 static bool checkGuardedByAttrCommon(Sema &S, Decl *D,
                                      const AttributeList &Attr,
                                      Expr* &Arg) {
-  // D must be either a member field or global (potentially shared) variable.
-  if (!mayBeSharedVariable(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFieldOrGlobalVar;
-    return false;
-  }
-
   SmallVector<Expr*, 1> Args;
   // check that all arguments are lockable objects
-  checkAttrArgsAreLockableObjs(S, D, Attr, Args);
+  checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
   unsigned Size = Args.size();
   if (Size != 1)
     return false;
@@ -601,7 +535,8 @@
   if (!checkGuardedByAttrCommon(S, D, Attr, Arg))
     return;
 
-  D->addAttr(::new (S.Context) GuardedByAttr(Attr.getRange(), S.Context, Arg));
+  D->addAttr(::new (S.Context) GuardedByAttr(Attr.getRange(), S.Context, Arg,
+                                        Attr.getAttributeSpellingListIndex()));
 }
 
 static void handlePtGuardedByAttr(Sema &S, Decl *D,
@@ -614,85 +549,8 @@
     return;
 
   D->addAttr(::new (S.Context) PtGuardedByAttr(Attr.getRange(),
-                                               S.Context, Arg));
-}
-
-static bool checkLockableAttrCommon(Sema &S, Decl *D,
-                                    const AttributeList &Attr) {
-  // FIXME: Lockable structs for C code.
-  if (!isa<RecordDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedClassOrStruct;
-    return false;
-  }
-
-  return true;
-}
-
-static void handleLockableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!checkLockableAttrCommon(S, D, Attr))
-    return;
-
-  D->addAttr(::new (S.Context) LockableAttr(Attr.getRange(), S.Context));
-}
-
-static void handleScopedLockableAttr(Sema &S, Decl *D,
-                             const AttributeList &Attr) {
-  if (!checkLockableAttrCommon(S, D, Attr))
-    return;
-
-  D->addAttr(::new (S.Context)
-             ScopedLockableAttr(Attr.getRange(), S.Context,
-                                Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleNoThreadSafetyAnalysis(Sema &S, Decl *D,
-                                         const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFunctionOrMethod;
-    return;
-  }
-
-  D->addAttr(::new (S.Context) NoThreadSafetyAnalysisAttr(Attr.getRange(),
-                                                          S.Context));
-}
-
-static void handleNoSanitizeAddressAttr(Sema &S, Decl *D,
-                                      const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunctionOrMethod;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             NoSanitizeAddressAttr(Attr.getRange(), S.Context,
-                                   Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleNoSanitizeMemory(Sema &S, Decl *D,
-                                   const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunctionOrMethod;
-    return;
-  }
-
-  D->addAttr(::new (S.Context) NoSanitizeMemoryAttr(Attr.getRange(),
-                                                         S.Context));
-}
-
-static void handleNoSanitizeThread(Sema &S, Decl *D,
-                                   const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunctionOrMethod;
-    return;
-  }
-
-  D->addAttr(::new (S.Context) NoSanitizeThreadAttr(Attr.getRange(),
-                                                    S.Context));
+                                               S.Context, Arg,
+                                        Attr.getAttributeSpellingListIndex()));
 }
 
 static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D,
@@ -701,19 +559,11 @@
   if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
     return false;
 
-  // D must be either a member field or global (potentially shared) variable.
-  ValueDecl *VD = dyn_cast<ValueDecl>(D);
-  if (!VD || !mayBeSharedVariable(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFieldOrGlobalVar;
-    return false;
-  }
-
   // Check that this attribute only applies to lockable types.
-  QualType QT = VD->getType();
+  QualType QT = cast<ValueDecl>(D)->getType();
   if (!QT->isDependentType()) {
     const RecordType *RT = getRecordType(QT);
-    if (!RT || !RT->getDecl()->getAttr<LockableAttr>()) {
+    if (!RT || !RT->getDecl()->hasAttr<CapabilityAttr>()) {
       S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_lockable)
         << Attr.getName();
       return false;
@@ -721,7 +571,7 @@
   }
 
   // Check that all arguments are lockable objects.
-  checkAttrArgsAreLockableObjs(S, D, Attr, Args);
+  checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
   if (Args.empty())
     return false;
 
@@ -758,47 +608,12 @@
                                    const AttributeList &Attr,
                                    SmallVectorImpl<Expr *> &Args) {
   // zero or more arguments ok
-
-  // check that the attribute is applied to a function
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFunctionOrMethod;
-    return false;
-  }
-
   // check that all arguments are lockable objects
-  checkAttrArgsAreLockableObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true);
+  checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true);
 
   return true;
 }
 
-static void handleSharedLockFunctionAttr(Sema &S, Decl *D,
-                                         const AttributeList &Attr) {
-  SmallVector<Expr*, 1> Args;
-  if (!checkLockFunAttrCommon(S, D, Attr, Args))
-    return;
-
-  unsigned Size = Args.size();
-  Expr **StartArg = Size == 0 ? 0 : &Args[0];
-  D->addAttr(::new (S.Context)
-             SharedLockFunctionAttr(Attr.getRange(), S.Context, StartArg, Size,
-                                    Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleExclusiveLockFunctionAttr(Sema &S, Decl *D,
-                                            const AttributeList &Attr) {
-  SmallVector<Expr*, 1> Args;
-  if (!checkLockFunAttrCommon(S, D, Attr, Args))
-    return;
-
-  unsigned Size = Args.size();
-  Expr **StartArg = Size == 0 ? 0 : &Args[0];
-  D->addAttr(::new (S.Context)
-             ExclusiveLockFunctionAttr(Attr.getRange(), S.Context,
-                                       StartArg, Size,
-                                       Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleAssertSharedLockAttr(Sema &S, Decl *D,
                                        const AttributeList &Attr) {
   SmallVector<Expr*, 1> Args;
@@ -833,12 +648,6 @@
   if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
     return false;
 
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFunctionOrMethod;
-    return false;
-  }
-
   if (!isIntOrBool(Attr.getArgAsExpr(0))) {
     S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
       << Attr.getName() << 1 << AANT_ArgumentIntOrBool;
@@ -846,7 +655,7 @@
   }
 
   // check that all arguments are lockable objects
-  checkAttrArgsAreLockableObjs(S, D, Attr, Args, 1);
+  checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 1);
 
   return true;
 }
@@ -877,84 +686,11 @@
                                           Attr.getAttributeSpellingListIndex()));
 }
 
-static bool checkLocksRequiredCommon(Sema &S, Decl *D,
-                                     const AttributeList &Attr,
-                                     SmallVectorImpl<Expr *> &Args) {
-  if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
-    return false;
-
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFunctionOrMethod;
-    return false;
-  }
-
-  // check that all arguments are lockable objects
-  checkAttrArgsAreLockableObjs(S, D, Attr, Args);
-  if (Args.empty())
-    return false;
-
-  return true;
-}
-
-static void handleExclusiveLocksRequiredAttr(Sema &S, Decl *D,
-                                             const AttributeList &Attr) {
-  SmallVector<Expr*, 1> Args;
-  if (!checkLocksRequiredCommon(S, D, Attr, Args))
-    return;
-
-  Expr **StartArg = &Args[0];
-  D->addAttr(::new (S.Context)
-             ExclusiveLocksRequiredAttr(Attr.getRange(), S.Context,
-                                        StartArg, Args.size(),
-                                        Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleSharedLocksRequiredAttr(Sema &S, Decl *D,
-                                          const AttributeList &Attr) {
-  SmallVector<Expr*, 1> Args;
-  if (!checkLocksRequiredCommon(S, D, Attr, Args))
-    return;
-
-  Expr **StartArg = &Args[0];
-  D->addAttr(::new (S.Context)
-             SharedLocksRequiredAttr(Attr.getRange(), S.Context,
-                                     StartArg, Args.size(),
-                                     Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleUnlockFunAttr(Sema &S, Decl *D,
-                                const AttributeList &Attr) {
-  // zero or more arguments ok
-
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFunctionOrMethod;
-    return;
-  }
-
-  // check that all arguments are lockable objects
-  SmallVector<Expr*, 1> Args;
-  checkAttrArgsAreLockableObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true);
-  unsigned Size = Args.size();
-  Expr **StartArg = Size == 0 ? 0 : &Args[0];
-
-  D->addAttr(::new (S.Context)
-             UnlockFunctionAttr(Attr.getRange(), S.Context, StartArg, Size,
-                                Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleLockReturnedAttr(Sema &S, Decl *D,
                                    const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFunctionOrMethod;
-    return;
-  }
-
   // check that the argument is lockable object
   SmallVector<Expr*, 1> Args;
-  checkAttrArgsAreLockableObjs(S, D, Attr, Args);
+  checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
   unsigned Size = Args.size();
   if (Size == 0)
     return;
@@ -969,15 +705,9 @@
   if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
     return;
 
-  if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
-      << Attr.getName() << ThreadExpectedFunctionOrMethod;
-    return;
-  }
-
   // check that all arguments are lockable objects
   SmallVector<Expr*, 1> Args;
-  checkAttrArgsAreLockableObjs(S, D, Attr, Args);
+  checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
   unsigned Size = Args.size();
   if (Size == 0)
     return;
@@ -988,6 +718,34 @@
                                Attr.getAttributeSpellingListIndex()));
 }
 
+static void handleEnableIfAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+  Expr *Cond = Attr.getArgAsExpr(0);
+  if (!Cond->isTypeDependent()) {
+    ExprResult Converted = S.PerformContextuallyConvertToBool(Cond);
+    if (Converted.isInvalid())
+      return;
+    Cond = Converted.take();
+  }
+
+  StringRef Msg;
+  if (!S.checkStringLiteralArgumentAttr(Attr, 1, Msg))
+    return;
+
+  SmallVector<PartialDiagnosticAt, 8> Diags;
+  if (!Cond->isValueDependent() &&
+      !Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(D),
+                                                Diags)) {
+    S.Diag(Attr.getLoc(), diag::err_enable_if_never_constant_expr);
+    for (int I = 0, N = Diags.size(); I != N; ++I)
+      S.Diag(Diags[I].first, Diags[I].second);
+    return;
+  }
+
+  D->addAttr(::new (S.Context)
+             EnableIfAttr(Attr.getRange(), S.Context, Cond, Msg,
+                          Attr.getAttributeSpellingListIndex()));
+}
+
 static void handleConsumableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   ConsumableAttr::ConsumedState DefaultState;
 
@@ -1004,18 +762,13 @@
         << Attr.getName() << AANT_ArgumentIdentifier;
     return;
   }
-
-  if (!isa<CXXRecordDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) <<
-      Attr.getName() << ExpectedClass;
-    return;
-  }
   
   D->addAttr(::new (S.Context)
              ConsumableAttr(Attr.getRange(), S.Context, DefaultState,
                             Attr.getAttributeSpellingListIndex()));
 }
 
+
 static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
                                         const AttributeList &Attr) {
   ASTContext &CurrContext = S.getASTContext();
@@ -1038,12 +791,6 @@
                                    const AttributeList &Attr) {
   if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
     return;
-
-  if (!isa<CXXMethodDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) <<
-      Attr.getName() << ExpectedMethod;
-    return;
-  }
   
   if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
     return;
@@ -1076,13 +823,7 @@
 static void handleParamTypestateAttr(Sema &S, Decl *D,
                                     const AttributeList &Attr) {
   if (!checkAttributeNumArgs(S, Attr, 1)) return;
-  
-  if (!isa<ParmVarDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) <<
-      Attr.getName() << ExpectedParameter;
-    return;
-  }
-  
+    
   ParamTypestateAttr::ConsumedState ParamState;
   
   if (Attr.isArgIdent(0)) {
@@ -1123,12 +864,6 @@
                                       const AttributeList &Attr) {
   if (!checkAttributeNumArgs(S, Attr, 1)) return;
   
-  if (!(isa<FunctionDecl>(D) || isa<ParmVarDecl>(D))) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) <<
-      Attr.getName() << ExpectedFunctionMethodOrParameter;
-    return;
-  }
-
   ReturnTypestateAttr::ConsumedState ReturnState;
   
   if (Attr.isArgIdent(0)) {
@@ -1179,12 +914,6 @@
 static void handleSetTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   if (!checkAttributeNumArgs(S, Attr, 1))
     return;
-
-  if (!isa<CXXMethodDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) <<
-      Attr.getName() << ExpectedMethod;
-    return;
-  }
   
   if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
     return;
@@ -1214,12 +943,6 @@
   if (!checkAttributeNumArgs(S, Attr, 1))
     return;
   
-  if (!isa<CXXMethodDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) <<
-      Attr.getName() << ExpectedMethod;
-    return;
-  }
-  
   if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
     return;
   
@@ -1245,21 +968,14 @@
 
 static void handleExtVectorTypeAttr(Sema &S, Scope *scope, Decl *D,
                                     const AttributeList &Attr) {
-  TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
-  if (TD == 0) {
-    // __attribute__((ext_vector_type(N))) can only be applied to typedefs
-    // and type-ids.
-    S.Diag(Attr.getLoc(), diag::err_typecheck_ext_vector_not_typedef);
-    return;
-  }
-
   // Remember this typedef decl, we will need it later for diagnostics.
-  S.ExtVectorDecls.push_back(TD);
+  S.ExtVectorDecls.push_back(cast<TypedefNameDecl>(D));
 }
 
 static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   if (TagDecl *TD = dyn_cast<TagDecl>(D))
-    TD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context));
+    TD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context,
+                                        Attr.getAttributeSpellingListIndex()));
   else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
     // If the alignment is less than or equal to 8 bits, the packed attribute
     // has no effect.
@@ -1276,28 +992,6 @@
     S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
 }
 
-static void handleMsStructAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (RecordDecl *RD = dyn_cast<RecordDecl>(D))
-    RD->addAttr(::new (S.Context)
-                MsStructAttr(Attr.getRange(), S.Context,
-                             Attr.getAttributeSpellingListIndex()));
-  else
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
-}
-
-static void handleIBAction(Sema &S, Decl *D, const AttributeList &Attr) {
-  // The IBAction attributes only apply to instance methods.
-  if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
-    if (MD->isInstanceMethod()) {
-      D->addAttr(::new (S.Context)
-                 IBActionAttr(Attr.getRange(), S.Context,
-                              Attr.getAttributeSpellingListIndex()));
-      return;
-    }
-
-  S.Diag(Attr.getLoc(), diag::warn_attribute_ibaction) << Attr.getName();
-}
-
 static bool checkIBOutletCommon(Sema &S, Decl *D, const AttributeList &Attr) {
   // The IBOutlet/IBOutletCollection attributes only apply to instance
   // variables or properties of Objective-C classes.  The outlet must also
@@ -1384,9 +1078,8 @@
   if (const RecordType *UT = T->getAsUnionType())
     if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
       RecordDecl *UD = UT->getDecl();
-      for (RecordDecl::field_iterator it = UD->field_begin(),
-           itend = UD->field_end(); it != itend; ++it) {
-        QualType QT = it->getType();
+      for (const auto *I : UD->fields()) {
+        QualType QT = I->getType();
         if (QT->isAnyPointerType() || QT->isBlockPointerType()) {
           T = QT;
           return;
@@ -1395,74 +1088,34 @@
     }
 }
 
-static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isFunctionOrMethod(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-    << Attr.getName() << ExpectedFunctionOrMethod;
-    return;
+static bool attrNonNullArgCheck(Sema &S, QualType T, const AttributeList &Attr,
+                                SourceRange R, bool isReturnValue = false) {
+  T = T.getNonReferenceType();
+  possibleTransparentUnionPointerType(T);
+
+  if (!T->isAnyPointerType() && !T->isBlockPointerType()) {
+    S.Diag(Attr.getLoc(),
+           isReturnValue ? diag::warn_attribute_return_pointers_only
+                         : diag::warn_attribute_pointers_only)
+      << Attr.getName() << R;
+    return false;
   }
-
-  if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
-    return;
-
-  SmallVector<unsigned, 8> SizeArgs;
-  for (unsigned i = 0; i < Attr.getNumArgs(); ++i) {
-    Expr *Ex = Attr.getArgAsExpr(i);
-    uint64_t Idx;
-    if (!checkFunctionOrMethodArgumentIndex(S, D, Attr.getName()->getName(),
-                                            Attr.getLoc(), i + 1, Ex, Idx))
-      return;
-
-    // check if the function argument is of an integer type
-    QualType T = getFunctionOrMethodArgType(D, Idx).getNonReferenceType();
-    if (!T->isIntegerType()) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
-        << Attr.getName() << AANT_ArgumentIntegerConstant
-        << Ex->getSourceRange();
-      return;
-    }
-    SizeArgs.push_back(Idx);
-  }
-
-  // check if the function returns a pointer
-  if (!getFunctionType(D)->getResultType()->isAnyPointerType()) {
-    S.Diag(Attr.getLoc(), diag::warn_ns_attribute_wrong_return_type)
-    << Attr.getName() << 0 /*function*/<< 1 /*pointer*/ << D->getSourceRange();
-  }
-
-  D->addAttr(::new (S.Context)
-             AllocSizeAttr(Attr.getRange(), S.Context,
-                           SizeArgs.data(), SizeArgs.size(),
-                           Attr.getAttributeSpellingListIndex()));
+  return true;
 }
 
 static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  // GCC ignores the nonnull attribute on K&R style function prototypes, so we
-  // ignore it as well
-  if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
-    return;
-  }
-
   SmallVector<unsigned, 8> NonNullArgs;
   for (unsigned i = 0; i < Attr.getNumArgs(); ++i) {
     Expr *Ex = Attr.getArgAsExpr(i);
     uint64_t Idx;
-    if (!checkFunctionOrMethodArgumentIndex(S, D, Attr.getName()->getName(),
-                                            Attr.getLoc(), i + 1, Ex, Idx))
+    if (!checkFunctionOrMethodParameterIndex(S, D, Attr, i + 1, Ex, Idx))
       return;
 
     // Is the function argument a pointer type?
-    QualType T = getFunctionOrMethodArgType(D, Idx).getNonReferenceType();
-    possibleTransparentUnionPointerType(T);
-    
-    if (!T->isAnyPointerType() && !T->isBlockPointerType()) {
-      // FIXME: Should also highlight argument in decl.
-      S.Diag(Attr.getLoc(), diag::warn_nonnull_pointers_only)
-        << "nonnull" << Ex->getSourceRange();
+    // FIXME: Should also highlight argument in decl in the diagnostic.
+    if (!attrNonNullArgCheck(S, getFunctionOrMethodParamType(D, Idx), Attr,
+                             Ex->getSourceRange()))
       continue;
-    }
 
     NonNullArgs.push_back(Idx);
   }
@@ -1470,8 +1123,8 @@
   // If no arguments were specified to __attribute__((nonnull)) then all pointer
   // arguments have a nonnull attribute.
   if (NonNullArgs.empty()) {
-    for (unsigned i = 0, e = getFunctionOrMethodNumArgs(D); i != e; ++i) {
-      QualType T = getFunctionOrMethodArgType(D, i).getNonReferenceType();
+    for (unsigned i = 0, e = getFunctionOrMethodNumParams(D); i != e; ++i) {
+      QualType T = getFunctionOrMethodParamType(D, i).getNonReferenceType();
       possibleTransparentUnionPointerType(T);
       if (T->isAnyPointerType() || T->isBlockPointerType())
         NonNullArgs.push_back(i);
@@ -1495,13 +1148,37 @@
                          Attr.getAttributeSpellingListIndex()));
 }
 
-static const char *ownershipKindToDiagName(OwnershipAttr::OwnershipKind K) {
-  switch (K) {
-    case OwnershipAttr::Holds:    return "'ownership_holds'";
-    case OwnershipAttr::Takes:    return "'ownership_takes'";
-    case OwnershipAttr::Returns:  return "'ownership_returns'";
+static void handleNonNullAttrParameter(Sema &S, ParmVarDecl *D,
+                                       const AttributeList &Attr) {
+  if (Attr.getNumArgs() > 0) {
+    if (D->getFunctionType()) {
+      handleNonNullAttr(S, D, Attr);
+    } else {
+      S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_parm_no_args)
+        << D->getSourceRange();
+    }
+    return;
   }
-  llvm_unreachable("unknown ownership");
+
+  // Is the argument a pointer type?
+  if (!attrNonNullArgCheck(S, D->getType(), Attr, D->getSourceRange()))
+    return;
+
+  D->addAttr(::new (S.Context)
+             NonNullAttr(Attr.getRange(), S.Context, 0, 0,
+                         Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleReturnsNonNullAttr(Sema &S, Decl *D,
+                                     const AttributeList &Attr) {
+  QualType ResultType = getFunctionOrMethodResultType(D);
+  if (!attrNonNullArgCheck(S, ResultType, Attr, Attr.getRange(),
+                           /* isReturnValue */ true))
+    return;
+
+  D->addAttr(::new (S.Context)
+            ReturnsNonNullAttr(Attr.getRange(), S.Context,
+                               Attr.getAttributeSpellingListIndex()));
 }
 
 static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
@@ -1519,58 +1196,49 @@
     return;
   }
 
-  // Figure out our Kind, and check arguments while we're at it.
-  OwnershipAttr::OwnershipKind K;
-  switch (AL.getKind()) {
-  case AttributeList::AT_ownership_takes:
-    K = OwnershipAttr::Takes;
-    if (AL.getNumArgs() < 2) {
-      S.Diag(AL.getLoc(), diag::err_attribute_too_few_arguments) << 2;
-      return;
-    }
-    break;
-  case AttributeList::AT_ownership_holds:
-    K = OwnershipAttr::Holds;
-    if (AL.getNumArgs() < 2) {
-      S.Diag(AL.getLoc(), diag::err_attribute_too_few_arguments) << 2;
-      return;
-    }
-    break;
-  case AttributeList::AT_ownership_returns:
-    K = OwnershipAttr::Returns;
+  // Figure out our Kind.
+  OwnershipAttr::OwnershipKind K =
+      OwnershipAttr(AL.getLoc(), S.Context, 0, 0, 0,
+                    AL.getAttributeSpellingListIndex()).getOwnKind();
 
+  // Check arguments.
+  switch (K) {
+  case OwnershipAttr::Takes:
+  case OwnershipAttr::Holds:
+    if (AL.getNumArgs() < 2) {
+      S.Diag(AL.getLoc(), diag::err_attribute_too_few_arguments)
+        << AL.getName() << 2;
+      return;
+    }
+    break;
+  case OwnershipAttr::Returns:
     if (AL.getNumArgs() > 2) {
-      S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << 1;
+      S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments)
+        << AL.getName() << 1;
       return;
     }
     break;
-  default:
-    // This should never happen given how we are called.
-    llvm_unreachable("Unknown ownership attribute");
   }
 
-  if (!isFunction(D) || !hasFunctionProto(D)) {
-    S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << AL.getName() << ExpectedFunction;
-    return;
-  }
-
-  StringRef Module = AL.getArgAsIdent(0)->Ident->getName();
+  IdentifierInfo *Module = AL.getArgAsIdent(0)->Ident;
 
   // Normalize the argument, __foo__ becomes foo.
-  if (Module.startswith("__") && Module.endswith("__"))
-    Module = Module.substr(2, Module.size() - 4);
+  StringRef ModuleName = Module->getName();
+  if (ModuleName.startswith("__") && ModuleName.endswith("__") &&
+      ModuleName.size() > 4) {
+    ModuleName = ModuleName.drop_front(2).drop_back(2);
+    Module = &S.PP.getIdentifierTable().get(ModuleName);
+  }
 
   SmallVector<unsigned, 8> OwnershipArgs;
   for (unsigned i = 1; i < AL.getNumArgs(); ++i) {
     Expr *Ex = AL.getArgAsExpr(i);
     uint64_t Idx;
-    if (!checkFunctionOrMethodArgumentIndex(S, D, AL.getName()->getName(),
-                                            AL.getLoc(), i, Ex, Idx))
+    if (!checkFunctionOrMethodParameterIndex(S, D, AL, i, Ex, Idx))
       return;
 
     // Is the function argument a pointer type?
-    QualType T = getFunctionOrMethodArgType(D, Idx);
+    QualType T = getFunctionOrMethodParamType(D, Idx);
     int Err = -1;  // No error
     switch (K) {
       case OwnershipAttr::Takes:
@@ -1590,13 +1258,13 @@
     }
 
     // Check we don't have a conflict with another ownership attribute.
-    for (specific_attr_iterator<OwnershipAttr>
-         i = D->specific_attr_begin<OwnershipAttr>(),
-         e = D->specific_attr_end<OwnershipAttr>(); i != e; ++i) {
-      if ((*i)->getOwnKind() != K && (*i)->args_end() !=
-          std::find((*i)->args_begin(), (*i)->args_end(), Idx)) {
+    for (const auto *I : D->specific_attrs<OwnershipAttr>()) {
+      // FIXME: A returns attribute should conflict with any returns attribute
+      // with a different index too.
+      if (I->getOwnKind() != K && I->args_end() !=
+          std::find(I->args_begin(), I->args_end(), Idx)) {
         S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
-          << AL.getName() << ownershipKindToDiagName((*i)->getOwnKind());
+          << AL.getName() << I;
         return;
       }
     }
@@ -1608,7 +1276,7 @@
   llvm::array_pod_sort(start, start + size);
 
   D->addAttr(::new (S.Context)
-             OwnershipAttr(AL.getLoc(), S.Context, K, Module, start, size,
+             OwnershipAttr(AL.getLoc(), S.Context, Module, start, size,
                            AL.getAttributeSpellingListIndex()));
 }
 
@@ -1620,12 +1288,6 @@
     return;
   }
 
-  if (!isa<VarDecl>(D) && !isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedVariableOrFunction;
-    return;
-  }
-
   NamedDecl *nd = cast<NamedDecl>(D);
 
   // gcc rejects
@@ -1640,8 +1302,8 @@
   // we reject them
   const DeclContext *Ctx = D->getDeclContext()->getRedeclContext();
   if (!Ctx->isFileContext()) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context) <<
-        nd->getNameAsString();
+    S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context)
+      << nd;
     return;
   }
 
@@ -1698,77 +1360,22 @@
                                          Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleMinSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D) && !isa<ObjCMethodDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunctionOrMethod;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             MinSizeAttr(Attr.getRange(), S.Context,
-                         Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
+  if (checkAttrMutualExclusion<HotAttr>(S, D, Attr))
     return;
-  }
-
-  if (D->hasAttr<HotAttr>()) {
-    S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
-      << Attr.getName() << "hot";
-    return;
-  }
 
   D->addAttr(::new (S.Context) ColdAttr(Attr.getRange(), S.Context,
                                         Attr.getAttributeSpellingListIndex()));
 }
 
 static void handleHotAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
+  if (checkAttrMutualExclusion<ColdAttr>(S, D, Attr))
     return;
-  }
-
-  if (D->hasAttr<ColdAttr>()) {
-    S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
-      << Attr.getName() << "cold";
-    return;
-  }
 
   D->addAttr(::new (S.Context) HotAttr(Attr.getRange(), S.Context,
                                        Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             NakedAttr(Attr.getRange(), S.Context,
-                       Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleAlwaysInlineAttr(Sema &S, Decl *D,
-                                   const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             AlwaysInlineAttr(Attr.getRange(), S.Context,
-                              Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleTLSModelAttr(Sema &S, Decl *D,
                                const AttributeList &Attr) {
   StringRef Model;
@@ -1777,12 +1384,6 @@
   if (!S.checkStringLiteralArgumentAttr(Attr, 0, Model, &LiteralLoc))
     return;
 
-  if (!isa<VarDecl>(D) || !cast<VarDecl>(D)->getTLSKind()) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedTLSVar;
-    return;
-  }
-
   // Check that the value.
   if (Model != "global-dynamic" && Model != "local-dynamic"
       && Model != "initial-exec" && Model != "local-exec") {
@@ -1795,9 +1396,19 @@
                           Attr.getAttributeSpellingListIndex()));
 }
 
+static void handleKernelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+  if (S.LangOpts.Renderscript) {
+    D->addAttr(::new (S.Context) 
+               KernelAttr(Attr.getRange(), S.Context,
+                          Attr.getAttributeSpellingListIndex()));
+  } else {
+    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "kernel";
+  }
+}
+
 static void handleMallocAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
-    QualType RetTy = FD->getResultType();
+    QualType RetTy = FD->getReturnType();
     if (RetTy->isAnyPointerType() || RetTy->isBlockPointerType()) {
       D->addAttr(::new (S.Context)
                  MallocAttr(Attr.getRange(), S.Context,
@@ -1809,35 +1420,15 @@
   S.Diag(Attr.getLoc(), diag::warn_attribute_malloc_pointer_only);
 }
 
-static void handleMayAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  D->addAttr(::new (S.Context)
-             MayAliasAttr(Attr.getRange(), S.Context,
-                          Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleNoCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (isa<VarDecl>(D))
-    D->addAttr(::new (S.Context)
-               NoCommonAttr(Attr.getRange(), S.Context,
-                            Attr.getAttributeSpellingListIndex()));
-  else
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedVariable;
-}
-
 static void handleCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   if (S.LangOpts.CPlusPlus) {
-    S.Diag(Attr.getLoc(), diag::err_common_not_supported_cplusplus);
+    S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
+      << Attr.getName() << AttributeLangSupport::Cpp;
     return;
   }
 
-  if (isa<VarDecl>(D))
-    D->addAttr(::new (S.Context)
-               CommonAttr(Attr.getRange(), S.Context,
-                          Attr.getAttributeSpellingListIndex()));
-  else
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedVariable;
+  D->addAttr(::new (S.Context) CommonAttr(Attr.getRange(), S.Context,
+                                        Attr.getAttributeSpellingListIndex()));
 }
 
 static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &attr) {
@@ -1887,23 +1478,6 @@
                                   Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleCXX11NoReturnAttr(Sema &S, Decl *D,
-                                    const AttributeList &Attr) {
-  // C++11 [dcl.attr.noreturn]p1:
-  //   The attribute may be applied to the declarator-id in a function
-  //   declaration.
-  FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
-  if (!FD) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunctionOrMethod;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             CXX11NoReturnAttr(Attr.getRange(), S.Context,
-                               Attr.getAttributeSpellingListIndex()));
-}
-
 // PS3 PPU-specific.
 static void handleVecReturnAttr(Sema &S, Decl *D, const AttributeList &Attr) {
 /*
@@ -1929,14 +1503,8 @@
     return result; // This will be returned in a register
   }
 */
-  if (!isa<RecordDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedClass;
-    return;
-  }
-
-  if (D->getAttr<VecReturnAttr>()) {
-    S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << "vecreturn";
+  if (VecReturnAttr *A = D->getAttr<VecReturnAttr>()) {
+    S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << A;
     return;
   }
 
@@ -1953,9 +1521,8 @@
     return;
   }
 
-  for (RecordDecl::field_iterator iter = record->field_begin();
-       iter != record->field_end(); iter++) {
-    if ((count == 1) || !iter->getType()->isVectorType()) {
+  for (const auto *I : record->fields()) {
+    if ((count == 1) || !I->getType()->isVectorType()) {
       S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
       return;
     }
@@ -1977,10 +1544,6 @@
              diag::err_carries_dependency_param_not_function_decl);
       return;
     }
-  } else if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunctionMethodOrParameter;
-    return;
   }
 
   D->addAttr(::new (S.Context) CarriesDependencyAttr(
@@ -1988,36 +1551,10 @@
                                    Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleUnusedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isa<VarDecl>(D) && !isa<ObjCIvarDecl>(D) && !isFunctionOrMethod(D) &&
-      !isa<TypeDecl>(D) && !isa<LabelDecl>(D) && !isa<FieldDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedVariableFunctionOrLabel;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             UnusedAttr(Attr.getRange(), S.Context,
-                        Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleReturnsTwiceAttr(Sema &S, Decl *D,
-                                   const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             ReturnsTwiceAttr(Attr.getRange(), S.Context,
-                              Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleUsedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
     if (VD->hasLocalStorage()) {
-      S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "used";
+      S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
       return;
     }
   } else if (!isFunctionOrMethod(D)) {
@@ -2034,29 +1571,15 @@
 static void handleConstructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   // check the attribute arguments.
   if (Attr.getNumArgs() > 1) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
+    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+      << Attr.getName() << 1;
     return;
   }
 
-  int priority = 65535; // FIXME: Do not hardcode such constants.
-  if (Attr.getNumArgs() > 0) {
-    Expr *E = Attr.getArgAsExpr(0);
-    llvm::APSInt Idx(32);
-    if (E->isTypeDependent() || E->isValueDependent() ||
-        !E->isIntegerConstantExpr(Idx, S.Context)) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
-        << Attr.getName() << 1 << AANT_ArgumentIntegerConstant
-        << E->getSourceRange();
-      return;
-    }
-    priority = Idx.getZExtValue();
-  }
-
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
+  uint32_t priority = ConstructorAttr::DefaultPriority;
+  if (Attr.getNumArgs() > 0 &&
+      !checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority))
     return;
-  }
 
   D->addAttr(::new (S.Context)
              ConstructorAttr(Attr.getRange(), S.Context, priority,
@@ -2066,29 +1589,15 @@
 static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   // check the attribute arguments.
   if (Attr.getNumArgs() > 1) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
+    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+      << Attr.getName() << 1;
     return;
   }
 
-  int priority = 65535; // FIXME: Do not hardcode such constants.
-  if (Attr.getNumArgs() > 0) {
-    Expr *E = Attr.getArgAsExpr(0);
-    llvm::APSInt Idx(32);
-    if (E->isTypeDependent() || E->isValueDependent() ||
-        !E->isIntegerConstantExpr(Idx, S.Context)) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
-        << Attr.getName() << 1 << AANT_ArgumentIntegerConstant
-        << E->getSourceRange();
-      return;
-    }
-    priority = Idx.getZExtValue();
-  }
-
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
+  uint32_t priority = DestructorAttr::DefaultPriority;
+  if (Attr.getNumArgs() > 0 &&
+      !checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority))
     return;
-  }
 
   D->addAttr(::new (S.Context)
              DestructorAttr(Attr.getRange(), S.Context, priority,
@@ -2100,7 +1609,8 @@
                                   const AttributeList &Attr) {
   unsigned NumArgs = Attr.getNumArgs();
   if (NumArgs > 1) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
+    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+      << Attr.getName() << 1;
     return;
   }
 
@@ -2113,38 +1623,19 @@
                                       Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleArcWeakrefUnavailableAttr(Sema &S, Decl *D, 
-                                            const AttributeList &Attr) {
+static void handleObjCSuppresProtocolAttr(Sema &S, Decl *D,
+                                          const AttributeList &Attr) {
+  if (!cast<ObjCProtocolDecl>(D)->isThisDeclarationADefinition()) {
+    S.Diag(Attr.getLoc(), diag::err_objc_attr_protocol_requires_definition)
+      << Attr.getName() << Attr.getRange();
+    return;
+  }
+
   D->addAttr(::new (S.Context)
-             ArcWeakrefUnavailableAttr(Attr.getRange(), S.Context,
+          ObjCExplicitProtocolImplAttr(Attr.getRange(), S.Context,
                                        Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleObjCRootClassAttr(Sema &S, Decl *D, 
-                                    const AttributeList &Attr) {
-  if (!isa<ObjCInterfaceDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedObjectiveCInterface;
-    return;
-  }
-  
-  D->addAttr(::new (S.Context)
-             ObjCRootClassAttr(Attr.getRange(), S.Context,
-                               Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleObjCRequiresPropertyDefsAttr(Sema &S, Decl *D,
-                                               const AttributeList &Attr) {
-  if (!isa<ObjCInterfaceDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_suppress_autosynthesis);
-    return;
-  }
-  
-  D->addAttr(::new (S.Context)
-             ObjCRequiresPropertyDefsAttr(Attr.getRange(), S.Context,
-                                          Attr.getAttributeSpellingListIndex()));
-}
-
 static bool checkAvailabilityAttr(Sema &S, SourceRange Range,
                                   IdentifierInfo *Platform,
                                   VersionTuple Introduced,
@@ -2450,13 +1941,7 @@
 
 static void handleObjCMethodFamilyAttr(Sema &S, Decl *decl,
                                        const AttributeList &Attr) {
-  ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(decl);
-  if (!method) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << ExpectedMethod;
-    return;
-  }
-
+  ObjCMethodDecl *method = cast<ObjCMethodDecl>(decl);
   if (!Attr.isArgIdent(0)) {
     S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
       << Attr.getName() << 1 << AANT_ArgumentIdentifier;
@@ -2471,30 +1956,17 @@
     return;
   }
 
-  if (F == ObjCMethodFamilyAttr::OMF_init && 
-      !method->getResultType()->isObjCObjectPointerType()) {
+  if (F == ObjCMethodFamilyAttr::OMF_init &&
+      !method->getReturnType()->isObjCObjectPointerType()) {
     S.Diag(method->getLocation(), diag::err_init_method_bad_return_type)
-      << method->getResultType();
+        << method->getReturnType();
     // Ignore the attribute.
     return;
   }
 
   method->addAttr(new (S.Context) ObjCMethodFamilyAttr(Attr.getRange(),
-                                                       S.Context, F));
-}
-
-static void handleObjCExceptionAttr(Sema &S, Decl *D,
-                                    const AttributeList &Attr) {
-  ObjCInterfaceDecl *OCI = dyn_cast<ObjCInterfaceDecl>(D);
-  if (OCI == 0) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedObjectiveCInterface;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             ObjCExceptionAttr(Attr.getRange(), S.Context,
-                               Attr.getAttributeSpellingListIndex()));
+                                                       S.Context, F,
+                                        Attr.getAttributeSpellingListIndex()));
 }
 
 static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -2526,18 +1998,6 @@
                               Attr.getAttributeSpellingListIndex()));
 }
 
-static void
-handleOverloadableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_overloadable_not_function);
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             OverloadableAttr(Attr.getRange(), S.Context,
-                              Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleBlocksAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   if (!Attr.isArgIdent(0)) {
     S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
@@ -2561,11 +2021,12 @@
 static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   // check the attribute arguments.
   if (Attr.getNumArgs() > 2) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 2;
+    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+      << Attr.getName() << 2;
     return;
   }
 
-  unsigned sentinel = 0;
+  unsigned sentinel = (unsigned)SentinelAttr::DefaultSentinel;
   if (Attr.getNumArgs() > 0) {
     Expr *E = Attr.getArgAsExpr(0);
     llvm::APSInt Idx(32);
@@ -2586,7 +2047,7 @@
     sentinel = Idx.getZExtValue();
   }
 
-  unsigned nullPos = 0;
+  unsigned nullPos = (unsigned)SentinelAttr::DefaultNullPos;
   if (Attr.getNumArgs() > 1) {
     Expr *E = Attr.getArgAsExpr(1);
     llvm::APSInt Idx(32);
@@ -2632,7 +2093,8 @@
   } else if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
     QualType Ty = V->getType();
     if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
-      const FunctionType *FT = Ty->isFunctionPointerType() ? getFunctionType(D)
+      const FunctionType *FT = Ty->isFunctionPointerType()
+       ? D->getFunctionType()
        : Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
       if (!cast<FunctionProtoType>(FT)->isVariadic()) {
         int m = Ty->isFunctionPointerType() ? 0 : 1;
@@ -2654,27 +2116,15 @@
                           Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleWarnUnusedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (RecordDecl *RD = dyn_cast<RecordDecl>(D))
-    RD->addAttr(::new (S.Context) WarnUnusedAttr(Attr.getRange(), S.Context));
-  else
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
-}
-
 static void handleWarnUnusedResult(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isFunction(D) && !isa<ObjCMethodDecl>(D) && !isa<CXXRecordDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunctionMethodOrClass;
-    return;
-  }
-
-  if (isFunction(D) && getFunctionType(D)->getResultType()->isVoidType()) {
+  if (D->getFunctionType() &&
+      D->getFunctionType()->getReturnType()->isVoidType()) {
     S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method)
       << Attr.getName() << 0;
     return;
   }
   if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
-    if (MD->getResultType()->isVoidType()) {
+    if (MD->getReturnType()->isVoidType()) {
       S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method)
       << Attr.getName() << 1;
       return;
@@ -2685,24 +2135,6 @@
                                   Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleWeakAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isa<VarDecl>(D) && !isa<FunctionDecl>(D)) {
-    if (isa<CXXRecordDecl>(D)) {
-      D->addAttr(::new (S.Context) WeakAttr(Attr.getRange(), S.Context));
-      return;
-    }
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedVariableOrFunction;
-    return;
-  }
-
-  NamedDecl *nd = cast<NamedDecl>(D);
-
-  nd->addAttr(::new (S.Context)
-              WeakAttr(Attr.getRange(), S.Context,
-                       Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleWeakImportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
   // weak_import only applies to variable & function declarations.
   bool isDef = false;
@@ -2727,59 +2159,26 @@
 }
 
 // Handles reqd_work_group_size and work_group_size_hint.
+template <typename WorkGroupAttr>
 static void handleWorkGroupSize(Sema &S, Decl *D,
                                 const AttributeList &Attr) {
-  unsigned WGSize[3];
-  for (unsigned i = 0; i < 3; ++i) {
-    Expr *E = Attr.getArgAsExpr(i);
-    llvm::APSInt ArgNum(32);
-    if (E->isTypeDependent() || E->isValueDependent() ||
-        !E->isIntegerConstantExpr(ArgNum, S.Context)) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
-        << Attr.getName() << AANT_ArgumentIntegerConstant
-        << E->getSourceRange();
+  uint32_t WGSize[3];
+  for (unsigned i = 0; i < 3; ++i)
+    if (!checkUInt32Argument(S, Attr, Attr.getArgAsExpr(i), WGSize[i], i))
       return;
-    }
-    WGSize[i] = (unsigned) ArgNum.getZExtValue();
-  }
 
-  if (Attr.getKind() == AttributeList::AT_ReqdWorkGroupSize
-    && D->hasAttr<ReqdWorkGroupSizeAttr>()) {
-      ReqdWorkGroupSizeAttr *A = D->getAttr<ReqdWorkGroupSizeAttr>();
-      if (!(A->getXDim() == WGSize[0] &&
-            A->getYDim() == WGSize[1] &&
-            A->getZDim() == WGSize[2])) {
-        S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) <<
-          Attr.getName();
-      }
-  }
+  WorkGroupAttr *Existing = D->getAttr<WorkGroupAttr>();
+  if (Existing && !(Existing->getXDim() == WGSize[0] &&
+                    Existing->getYDim() == WGSize[1] &&
+                    Existing->getZDim() == WGSize[2]))
+    S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
 
-  if (Attr.getKind() == AttributeList::AT_WorkGroupSizeHint
-    && D->hasAttr<WorkGroupSizeHintAttr>()) {
-      WorkGroupSizeHintAttr *A = D->getAttr<WorkGroupSizeHintAttr>();
-      if (!(A->getXDim() == WGSize[0] &&
-            A->getYDim() == WGSize[1] &&
-            A->getZDim() == WGSize[2])) {
-        S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) <<
-          Attr.getName();
-      }
-  }
-
-  if (Attr.getKind() == AttributeList::AT_ReqdWorkGroupSize)
-    D->addAttr(::new (S.Context)
-                 ReqdWorkGroupSizeAttr(Attr.getRange(), S.Context,
-                                       WGSize[0], WGSize[1], WGSize[2],
-                                       Attr.getAttributeSpellingListIndex()));
-  else
-    D->addAttr(::new (S.Context)
-                 WorkGroupSizeHintAttr(Attr.getRange(), S.Context,
-                                       WGSize[0], WGSize[1], WGSize[2],
+  D->addAttr(::new (S.Context) WorkGroupAttr(Attr.getRange(), S.Context,
+                                             WGSize[0], WGSize[1], WGSize[2],
                                        Attr.getAttributeSpellingListIndex()));
 }
 
 static void handleVecTypeHint(Sema &S, Decl *D, const AttributeList &Attr) {
-  assert(Attr.getKind() == AttributeList::AT_VecTypeHint);
-
   if (!Attr.hasParsedType()) {
     S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
       << Attr.getName() << 1;
@@ -2798,9 +2197,7 @@
     return;
   }
 
-  if (Attr.getKind() == AttributeList::AT_VecTypeHint &&
-      D->hasAttr<VecTypeHintAttr>()) {
-    VecTypeHintAttr *A = D->getAttr<VecTypeHintAttr>();
+  if (VecTypeHintAttr *A = D->getAttr<VecTypeHintAttr>()) {
     if (!S.Context.hasSameType(A->getTypeHint(), ParmType)) {
       S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
       return;
@@ -2808,7 +2205,8 @@
   }
 
   D->addAttr(::new (S.Context) VecTypeHintAttr(Attr.getLoc(), S.Context,
-                                               ParmTSI));
+                                               ParmTSI,
+                                        Attr.getAttributeSpellingListIndex()));
 }
 
 SectionAttr *Sema::mergeSectionAttr(Decl *D, SourceRange Range,
@@ -2841,12 +2239,6 @@
     return;
   }
 
-  // This attribute cannot be applied to local variables.
-  if (isa<VarDecl>(D) && cast<VarDecl>(D)->hasLocalStorage()) {
-    S.Diag(LiteralLoc, diag::err_attribute_section_local_variable);
-    return;
-  }
-  
   unsigned Index = Attr.getAttributeSpellingListIndex();
   SectionAttr *NewAttr = S.mergeSectionAttr(D, Attr.getRange(), Str, Index);
   if (NewAttr)
@@ -2854,37 +2246,9 @@
 }
 
 
-static void handleNothrowAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (NoThrowAttr *Existing = D->getAttr<NoThrowAttr>()) {
-    if (Existing->getLocation().isInvalid())
-      Existing->setRange(Attr.getRange());
-  } else {
-    D->addAttr(::new (S.Context)
-               NoThrowAttr(Attr.getRange(), S.Context,
-                           Attr.getAttributeSpellingListIndex()));
-  }
-}
-
-static void handleConstAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (ConstAttr *Existing = D->getAttr<ConstAttr>()) {
-   if (Existing->getLocation().isInvalid())
-     Existing->setRange(Attr.getRange());
-  } else {
-    D->addAttr(::new (S.Context)
-               ConstAttr(Attr.getRange(), S.Context,
-                         Attr.getAttributeSpellingListIndex() ));
-  }
-}
-
-static void handlePureAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  D->addAttr(::new (S.Context)
-             PureAttr(Attr.getRange(), S.Context,
-                      Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  VarDecl *VD = dyn_cast<VarDecl>(D);
-  if (!VD || !VD->hasLocalStorage()) {
+  VarDecl *VD = cast<VarDecl>(D);
+  if (!VD->hasLocalStorage()) {
     S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
     return;
   }
@@ -2948,20 +2312,13 @@
 /// Handle __attribute__((format_arg((idx)))) attribute based on
 /// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
 static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
-    return;
-  }
-
   Expr *IdxExpr = Attr.getArgAsExpr(0);
-  uint64_t ArgIdx;
-  if (!checkFunctionOrMethodArgumentIndex(S, D, Attr.getName()->getName(),
-                                          Attr.getLoc(), 1, IdxExpr, ArgIdx))
+  uint64_t Idx;
+  if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 1, IdxExpr, Idx))
     return;
 
   // make sure the format string is really a string
-  QualType Ty = getFunctionOrMethodArgType(D, ArgIdx);
+  QualType Ty = getFunctionOrMethodParamType(D, Idx);
 
   bool not_nsstring_type = !isNSStringType(Ty, S.Context);
   if (not_nsstring_type &&
@@ -2986,7 +2343,7 @@
     return;
   }
 
-  // We cannot use the ArgIdx returned from checkFunctionOrMethodArgumentIndex
+  // We cannot use the Idx returned from checkFunctionOrMethodParameterIndex
   // because that has corrected for the implicit this parameter, and is zero-
   // based.  The attribute expects what the user wrote explicitly.
   llvm::APSInt Val;
@@ -3033,12 +2390,12 @@
     return;
   }
   
-  if (!isa<VarDecl>(D) || S.getCurFunctionOrMethodDecl()) {
+  if (S.getCurFunctionOrMethodDecl()) {
     S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
     Attr.setInvalid();
     return;
   }
-  QualType T = dyn_cast<VarDecl>(D)->getType();
+  QualType T = cast<VarDecl>(D)->getType();
   if (S.Context.getAsArrayType(T))
     T = S.Context.getBaseElementType(T);
   if (!T->getAs<RecordType>()) {
@@ -3046,22 +2403,17 @@
     Attr.setInvalid();
     return;
   }
-  
-  Expr *priorityExpr = Attr.getArgAsExpr(0);
-  
-  llvm::APSInt priority(32);
-  if (priorityExpr->isTypeDependent() || priorityExpr->isValueDependent() ||
-      !priorityExpr->isIntegerConstantExpr(priority, S.Context)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
-      << Attr.getName() << AANT_ArgumentIntegerConstant
-      << priorityExpr->getSourceRange();
+
+  Expr *E = Attr.getArgAsExpr(0);
+  uint32_t prioritynum;
+  if (!checkUInt32Argument(S, Attr, E, prioritynum)) {
     Attr.setInvalid();
     return;
   }
-  unsigned prioritynum = priority.getZExtValue();
+
   if (prioritynum < 101 || prioritynum > 65535) {
     S.Diag(Attr.getLoc(), diag::err_attribute_argument_outof_range)
-    <<  priorityExpr->getSourceRange();
+      << E->getSourceRange();
     Attr.setInvalid();
     return;
   }
@@ -3075,18 +2427,14 @@
                                   int FirstArg,
                                   unsigned AttrSpellingListIndex) {
   // Check whether we already have an equivalent format attribute.
-  for (specific_attr_iterator<FormatAttr>
-         i = D->specific_attr_begin<FormatAttr>(),
-         e = D->specific_attr_end<FormatAttr>();
-       i != e ; ++i) {
-    FormatAttr *f = *i;
-    if (f->getType() == Format &&
-        f->getFormatIdx() == FormatIdx &&
-        f->getFirstArg() == FirstArg) {
+  for (auto *F : D->specific_attrs<FormatAttr>()) {
+    if (F->getType() == Format &&
+        F->getFormatIdx() == FormatIdx &&
+        F->getFirstArg() == FirstArg) {
       // If we don't have a valid location for this attribute, adopt the
       // location.
-      if (f->getLocation().isInvalid())
-        f->setRange(Range);
+      if (F->getLocation().isInvalid())
+        F->setRange(Range);
       return NULL;
     }
   }
@@ -3104,17 +2452,10 @@
     return;
   }
 
-  if (!isFunctionOrMethodOrBlock(D) || !hasFunctionProto(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
-    return;
-  }
-
   // In C++ the implicit 'this' function parameter also counts, and they are
   // counted from one.
   bool HasImplicitThisParam = isInstanceMethod(D);
-  unsigned NumArgs  = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
-  unsigned FirstIdx = 1;
+  unsigned NumArgs = getFunctionOrMethodNumParams(D) + HasImplicitThisParam;
 
   IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident;
   StringRef Format = II->getName();
@@ -3134,29 +2475,24 @@
   
   if (Kind == InvalidFormat) {
     S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
-      << "format" << II->getName();
+      << Attr.getName() << II->getName();
     return;
   }
 
   // checks for the 2nd argument
   Expr *IdxExpr = Attr.getArgAsExpr(1);
-  llvm::APSInt Idx(32);
-  if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
-      !IdxExpr->isIntegerConstantExpr(Idx, S.Context)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
-      << Attr.getName() << 2 << AANT_ArgumentIntegerConstant
-      << IdxExpr->getSourceRange();
+  uint32_t Idx;
+  if (!checkUInt32Argument(S, Attr, IdxExpr, Idx, 2))
     return;
-  }
 
-  if (Idx.getZExtValue() < FirstIdx || Idx.getZExtValue() > NumArgs) {
+  if (Idx < 1 || Idx > NumArgs) {
     S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
-      << "format" << 2 << IdxExpr->getSourceRange();
+      << Attr.getName() << 2 << IdxExpr->getSourceRange();
     return;
   }
 
   // FIXME: Do we need to bounds check?
-  unsigned ArgIdx = Idx.getZExtValue() - 1;
+  unsigned ArgIdx = Idx - 1;
 
   if (HasImplicitThisParam) {
     if (ArgIdx == 0) {
@@ -3169,7 +2505,7 @@
   }
 
   // make sure the format string is really a string
-  QualType Ty = getFunctionOrMethodArgType(D, ArgIdx);
+  QualType Ty = getFunctionOrMethodParamType(D, ArgIdx);
 
   if (Kind == CFStringFormat) {
     if (!isCFStringType(Ty, S.Context)) {
@@ -3196,14 +2532,9 @@
 
   // check the 3rd argument
   Expr *FirstArgExpr = Attr.getArgAsExpr(2);
-  llvm::APSInt FirstArg(32);
-  if (FirstArgExpr->isTypeDependent() || FirstArgExpr->isValueDependent() ||
-      !FirstArgExpr->isIntegerConstantExpr(FirstArg, S.Context)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
-      << Attr.getName() << 3 << AANT_ArgumentIntegerConstant
-      << FirstArgExpr->getSourceRange();
+  uint32_t FirstArg;
+  if (!checkUInt32Argument(S, Attr, FirstArgExpr, FirstArg, 3))
     return;
-  }
 
   // check if the function is variadic if the 3rd argument non-zero
   if (FirstArg != 0) {
@@ -3226,13 +2557,12 @@
   // if 0 it disables parameter checking (to use with e.g. va_list)
   } else if (FirstArg != 0 && FirstArg != NumArgs) {
     S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
-      << "format" << 3 << FirstArgExpr->getSourceRange();
+      << Attr.getName() << 3 << FirstArgExpr->getSourceRange();
     return;
   }
 
   FormatAttr *NewAttr = S.mergeFormatAttr(D, Attr.getRange(), II,
-                                          Idx.getZExtValue(),
-                                          FirstArg.getZExtValue(),
+                                          Idx, FirstArg,
                                           Attr.getAttributeSpellingListIndex());
   if (NewAttr)
     D->addAttr(NewAttr);
@@ -3280,8 +2610,13 @@
   uint64_t FirstAlign = S.Context.getTypeAlign(FirstType);
   for (; Field != FieldEnd; ++Field) {
     QualType FieldType = Field->getType();
+    // FIXME: this isn't fully correct; we also need to test whether the
+    // members of the union would all have the same calling convention as the
+    // first member of the union. Checking just the size and alignment isn't
+    // sufficient (consider structs passed on the stack instead of in registers
+    // as an example).
     if (S.Context.getTypeSize(FieldType) != FirstSize ||
-        S.Context.getTypeAlign(FieldType) != FirstAlign) {
+        S.Context.getTypeAlign(FieldType) > FirstAlign) {
       // Warn if we drop the attribute.
       bool isSize = S.Context.getTypeSize(FieldType) != FirstSize;
       unsigned FieldBits = isSize? S.Context.getTypeSize(FieldType)
@@ -3310,10 +2645,8 @@
     return;
 
   // Don't duplicate annotations that are already set.
-  for (specific_attr_iterator<AnnotateAttr>
-       i = D->specific_attr_begin<AnnotateAttr>(),
-       e = D->specific_attr_end<AnnotateAttr>(); i != e; ++i) {
-    if ((*i)->getAnnotation() == Str)
+  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
+    if (I->getAnnotation() == Str)
       return;
   }
   
@@ -3380,15 +2713,14 @@
       if (FD->isBitField())
         DiagKind = 3;
     } else if (!isa<TagDecl>(D)) {
-      Diag(AttrLoc, diag::err_attribute_wrong_decl_type)
-        << (TmpAttr.isC11() ? "'_Alignas'" : "'alignas'")
+      Diag(AttrLoc, diag::err_attribute_wrong_decl_type) << &TmpAttr
         << (TmpAttr.isC11() ? ExpectedVariableOrField
                             : ExpectedVariableFieldOrTag);
       return;
     }
     if (DiagKind != -1) {
       Diag(AttrLoc, diag::err_alignas_attribute_wrong_decl_type)
-        << TmpAttr.isC11() << DiagKind;
+        << &TmpAttr << DiagKind;
       return;
     }
   }
@@ -3422,14 +2754,12 @@
     return;
   }
 
-  if (TmpAttr.isDeclspec()) {
-    // We've already verified it's a power of 2, now let's make sure it's
-    // 8192 or less.
-    if (Alignment.getZExtValue() > 8192) {
-      Diag(AttrLoc, diag::err_attribute_aligned_greater_than_8192)
-        << E->getSourceRange();
-      return;
-    }
+  // Alignment calculations can wrap around if it's greater than 2**28.
+  unsigned MaxValidAlignment = TmpAttr.isDeclspec() ? 8192 : 268435456;
+  if (Alignment.getZExtValue() > MaxValidAlignment) {
+    Diag(AttrLoc, diag::err_attribute_aligned_too_great) << MaxValidAlignment
+                                                         << E->getSourceRange();
+    return;
   }
 
   AlignedAttr *AA = ::new (Context) AlignedAttr(AttrRange, Context, true,
@@ -3465,13 +2795,11 @@
   //   would otherwise be required for the entity being declared.
   AlignedAttr *AlignasAttr = 0;
   unsigned Align = 0;
-  for (specific_attr_iterator<AlignedAttr>
-         I = D->specific_attr_begin<AlignedAttr>(),
-         E = D->specific_attr_end<AlignedAttr>(); I != E; ++I) {
+  for (auto *I : D->specific_attrs<AlignedAttr>()) {
     if (I->isAlignmentDependent())
       return;
     if (I->isAlignas())
-      AlignasAttr = *I;
+      AlignasAttr = I;
     Align = std::max(Align, I->getAlignment(Context));
   }
 
@@ -3484,6 +2812,35 @@
   }
 }
 
+bool Sema::checkMSInheritanceAttrOnDefinition(
+    CXXRecordDecl *RD, SourceRange Range, bool BestCase,
+    MSInheritanceAttr::Spelling SemanticSpelling) {
+  assert(RD->hasDefinition() && "RD has no definition!");
+
+  // We may not have seen base specifiers or any virtual methods yet.  We will
+  // have to wait until the record is defined to catch any mismatches.
+  if (!RD->getDefinition()->isCompleteDefinition())
+    return false;
+
+  // The unspecified model never matches what a definition could need.
+  if (SemanticSpelling == MSInheritanceAttr::Keyword_unspecified_inheritance)
+    return false;
+
+  if (BestCase) {
+    if (RD->calculateInheritanceModel() == SemanticSpelling)
+      return false;
+  } else {
+    if (RD->calculateInheritanceModel() <= SemanticSpelling)
+      return false;
+  }
+
+  Diag(Range.getBegin(), diag::err_mismatched_ms_inheritance)
+      << 0 /*definition*/;
+  Diag(RD->getDefinition()->getLocation(), diag::note_defined_here)
+      << RD->getNameAsString();
+  return true;
+}
+
 /// handleModeAttr - This attribute modifies the width of a decl with primitive
 /// type.
 ///
@@ -3553,7 +2910,7 @@
     OldTy = VD->getType();
   else {
     S.Diag(D->getLocation(), diag::err_attr_wrong_decl)
-      << "mode" << Attr.getRange();
+      << Attr.getName() << Attr.getRange();
     return;
   }
 
@@ -3575,7 +2932,7 @@
   // FIXME: Make sure floating-point mappings are accurate
   // FIXME: Support XF and TF types
   if (!DestWidth) {
-    S.Diag(Attr.getLoc(), diag::err_unknown_machine_mode) << Name;
+    S.Diag(Attr.getLoc(), diag::err_machine_mode) << 0 /*Unknown*/ << Name;
     return;
   }
 
@@ -3588,7 +2945,7 @@
     NewTy = S.Context.getRealTypeForBitwidth(DestWidth);
 
   if (NewTy.isNull()) {
-    S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+    S.Diag(Attr.getLoc(), diag::err_machine_mode) << 1 /*Unsupported*/ << Name;
     return;
   }
 
@@ -3625,149 +2982,49 @@
                          Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleNoInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
+static void handleAlwaysInlineAttr(Sema &S, Decl *D,
+                                   const AttributeList &Attr) {
+  if (checkAttrMutualExclusion<OptimizeNoneAttr>(S, D, Attr))
     return;
-  }
 
   D->addAttr(::new (S.Context)
-             NoInlineAttr(Attr.getRange(), S.Context,
-             Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleNoInstrumentFunctionAttr(Sema &S, Decl *D,
-                                           const AttributeList &Attr) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             NoInstrumentFunctionAttr(Attr.getRange(), S.Context,
-                                      Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleKernelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (S.LangOpts.Renderscript) {
-    D->addAttr(::new (S.Context) KernelAttr(Attr.getRange(), S.Context));
-  } else {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "kernel";
-  }
-}
-
-static void handleConstantAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (S.LangOpts.CUDA) {
-    if (!isa<VarDecl>(D)) {
-      S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-        << Attr.getName() << ExpectedVariable;
-      return;
-    }
-
-    D->addAttr(::new (S.Context)
-               CUDAConstantAttr(Attr.getRange(), S.Context,
-                                Attr.getAttributeSpellingListIndex()));
-  } else {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "constant";
-  }
-}
-
-static void handleDeviceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (S.LangOpts.CUDA) {
-    // check the attribute arguments.
-    if (Attr.getNumArgs() != 0) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-        << Attr.getName() << 0;
-      return;
-    }
-
-    if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) {
-      S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-        << Attr.getName() << ExpectedVariableOrFunction;
-      return;
-    }
-
-    D->addAttr(::new (S.Context)
-               CUDADeviceAttr(Attr.getRange(), S.Context,
+             AlwaysInlineAttr(Attr.getRange(), S.Context,
                               Attr.getAttributeSpellingListIndex()));
-  } else {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "device";
-  }
+}
+
+static void handleOptimizeNoneAttr(Sema &S, Decl *D,
+                                   const AttributeList &Attr) {
+  if (checkAttrMutualExclusion<AlwaysInlineAttr>(S, D, Attr))
+    return;
+
+  D->addAttr(::new (S.Context)
+             OptimizeNoneAttr(Attr.getRange(), S.Context,
+                              Attr.getAttributeSpellingListIndex()));
 }
 
 static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (S.LangOpts.CUDA) {
-    if (!isa<FunctionDecl>(D)) {
-      S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-        << Attr.getName() << ExpectedFunction;
-      return;
+  FunctionDecl *FD = cast<FunctionDecl>(D);
+  if (!FD->getReturnType()->isVoidType()) {
+    TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
+    if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
+      S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
+        << FD->getType()
+        << FixItHint::CreateReplacement(FTL.getReturnLoc().getSourceRange(),
+                                        "void");
+    } else {
+      S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
+        << FD->getType();
     }
-
-    FunctionDecl *FD = cast<FunctionDecl>(D);
-    if (!FD->getResultType()->isVoidType()) {
-      TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
-      if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
-        S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
-          << FD->getType()
-          << FixItHint::CreateReplacement(FTL.getResultLoc().getSourceRange(),
-                                          "void");
-      } else {
-        S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
-          << FD->getType();
-      }
-      return;
-    }
-
-    D->addAttr(::new (S.Context)
-               CUDAGlobalAttr(Attr.getRange(), S.Context,
-                              Attr.getAttributeSpellingListIndex()));
-  } else {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "global";
-  }
-}
-
-static void handleHostAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (S.LangOpts.CUDA) {
-    if (!isa<FunctionDecl>(D)) {
-      S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-        << Attr.getName() << ExpectedFunction;
-      return;
-    }
-
-    D->addAttr(::new (S.Context)
-               CUDAHostAttr(Attr.getRange(), S.Context,
-                            Attr.getAttributeSpellingListIndex()));
-  } else {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "host";
-  }
-}
-
-static void handleSharedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (S.LangOpts.CUDA) {
-    if (!isa<VarDecl>(D)) {
-      S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-        << Attr.getName() << ExpectedVariable;
-      return;
-    }
-
-    D->addAttr(::new (S.Context)
-               CUDASharedAttr(Attr.getRange(), S.Context,
-                              Attr.getAttributeSpellingListIndex()));
-  } else {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "shared";
-  }
-}
-
-static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  FunctionDecl *Fn = dyn_cast<FunctionDecl>(D);
-  if (Fn == 0) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunction;
     return;
   }
 
+  D->addAttr(::new (S.Context)
+              CUDAGlobalAttr(Attr.getRange(), S.Context,
+                            Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+  FunctionDecl *Fn = cast<FunctionDecl>(D);
   if (!Fn->isInlineSpecified()) {
     S.Diag(Attr.getLoc(), diag::warn_gnu_inline_attribute_requires_inline);
     return;
@@ -3864,25 +3121,6 @@
   }
 }
 
-static void handleOpenCLKernelAttr(Sema &S, Decl *D, const AttributeList &Attr){
-  D->addAttr(::new (S.Context) OpenCLKernelAttr(Attr.getRange(), S.Context));
-}
-
-static void handleOpenCLImageAccessAttr(Sema &S, Decl *D, const AttributeList &Attr){
-  Expr *E = Attr.getArgAsExpr(0);
-  llvm::APSInt ArgNum(32);
-  if (E->isTypeDependent() || E->isValueDependent() ||
-      !E->isIntegerConstantExpr(ArgNum, S.Context)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
-      << Attr.getName() << AANT_ArgumentIntegerConstant
-      << E->getSourceRange();
-    return;
-  }
-
-  D->addAttr(::new (S.Context) OpenCLImageAccessAttr(
-    Attr.getRange(), S.Context, ArgNum.getZExtValue()));
-}
-
 bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, 
                                 const FunctionDecl *FD) {
   if (attr.isInvalid())
@@ -3894,8 +3132,7 @@
     return true;
   }
 
-  // TODO: diagnose uses of these conventions on the wrong target. Or, better
-  // move to TargetAttributesSema one day.
+  // TODO: diagnose uses of these conventions on the wrong target.
   switch (attr.getKind()) {
   case AttributeList::AT_CDecl: CC = CC_C; break;
   case AttributeList::AT_FastCall: CC = CC_X86FastCall; break;
@@ -3948,24 +3185,6 @@
   return false;
 }
 
-static void handleRegparmAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (hasDeclarator(D)) return;
-
-  unsigned numParams;
-  if (S.CheckRegparmAttr(Attr, numParams))
-    return;
-
-  if (!isa<ObjCMethodDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << ExpectedFunctionOrMethod;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             RegparmAttr(Attr.getRange(), S.Context, numParams,
-                         Attr.getAttributeSpellingListIndex()));
-}
-
 /// Checks a regparm attribute, returning true if it is ill-formed and
 /// otherwise setting numParams to the appropriate value.
 bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) {
@@ -3977,13 +3196,9 @@
     return true;
   }
 
+  uint32_t NP;
   Expr *NumParamsExpr = Attr.getArgAsExpr(0);
-  llvm::APSInt NumParams(32);
-  if (NumParamsExpr->isTypeDependent() || NumParamsExpr->isValueDependent() ||
-      !NumParamsExpr->isIntegerConstantExpr(NumParams, Context)) {
-    Diag(Attr.getLoc(), diag::err_attribute_argument_type)
-      << Attr.getName() << AANT_ArgumentIntegerConstant
-      << NumParamsExpr->getSourceRange();
+  if (!checkUInt32Argument(*this, Attr, NumParamsExpr, NP)) {
     Attr.setInvalid();
     return true;
   }
@@ -3995,7 +3210,7 @@
     return true;
   }
 
-  numParams = NumParams.getZExtValue();
+  numParams = NP;
   if (numParams > Context.getTargetInfo().getRegParmMax()) {
     Diag(Attr.getLoc(), diag::err_attribute_regparm_invalid_number)
       << Context.getTargetInfo().getRegParmMax() << NumParamsExpr->getSourceRange();
@@ -4006,53 +3221,28 @@
   return false;
 }
 
-static void handleLaunchBoundsAttr(Sema &S, Decl *D, const AttributeList &Attr){
-  if (S.LangOpts.CUDA) {
-    // check the attribute arguments.
-    if (Attr.getNumArgs() != 1 && Attr.getNumArgs() != 2) {
-      // FIXME: 0 is not okay.
-      S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 2;
-      return;
-    }
-
-    if (!isFunctionOrMethod(D)) {
-      S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-        << Attr.getName() << ExpectedFunctionOrMethod;
-      return;
-    }
-
-    Expr *MaxThreadsExpr = Attr.getArgAsExpr(0);
-    llvm::APSInt MaxThreads(32);
-    if (MaxThreadsExpr->isTypeDependent() ||
-        MaxThreadsExpr->isValueDependent() ||
-        !MaxThreadsExpr->isIntegerConstantExpr(MaxThreads, S.Context)) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
-        << Attr.getName() << 1 << AANT_ArgumentIntegerConstant
-        << MaxThreadsExpr->getSourceRange();
-      return;
-    }
-
-    llvm::APSInt MinBlocks(32);
-    if (Attr.getNumArgs() > 1) {
-      Expr *MinBlocksExpr = Attr.getArgAsExpr(1);
-      if (MinBlocksExpr->isTypeDependent() ||
-          MinBlocksExpr->isValueDependent() ||
-          !MinBlocksExpr->isIntegerConstantExpr(MinBlocks, S.Context)) {
-        S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
-          << Attr.getName() << 2 << AANT_ArgumentIntegerConstant
-          << MinBlocksExpr->getSourceRange();
-        return;
-      }
-    }
-
-    D->addAttr(::new (S.Context)
-               CUDALaunchBoundsAttr(Attr.getRange(), S.Context,
-                                    MaxThreads.getZExtValue(),
-                                    MinBlocks.getZExtValue(),
-                                    Attr.getAttributeSpellingListIndex()));
-  } else {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "launch_bounds";
+static void handleLaunchBoundsAttr(Sema &S, Decl *D,
+                                   const AttributeList &Attr) {
+  // check the attribute arguments.
+  if (Attr.getNumArgs() != 1 && Attr.getNumArgs() != 2) {
+    // FIXME: 0 is not okay.
+    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+      << Attr.getName() << 2;
+    return;
   }
+
+  uint32_t MaxThreads, MinBlocks = 0;
+  if (!checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), MaxThreads, 1))
+    return;
+  if (Attr.getNumArgs() > 1 && !checkUInt32Argument(S, Attr,
+                                                    Attr.getArgAsExpr(1),
+                                                    MinBlocks, 2))
+    return;
+
+  D->addAttr(::new (S.Context)
+              CUDALaunchBoundsAttr(Attr.getRange(), S.Context,
+                                  MaxThreads, MinBlocks,
+                                  Attr.getAttributeSpellingListIndex()));
 }
 
 static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
@@ -4066,7 +3256,6 @@
   if (!checkAttributeNumArgs(S, Attr, 3))
     return;
 
-  StringRef AttrName = Attr.getName()->getName();
   IdentifierInfo *ArgumentKind = Attr.getArgAsIdent(0)->Ident;
 
   if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
@@ -4076,21 +3265,19 @@
   }
 
   uint64_t ArgumentIdx;
-  if (!checkFunctionOrMethodArgumentIndex(S, D, AttrName,
-                                          Attr.getLoc(), 2,
-                                          Attr.getArgAsExpr(1), ArgumentIdx))
+  if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 2, Attr.getArgAsExpr(1),
+                                           ArgumentIdx))
     return;
 
   uint64_t TypeTagIdx;
-  if (!checkFunctionOrMethodArgumentIndex(S, D, AttrName,
-                                          Attr.getLoc(), 3,
-                                          Attr.getArgAsExpr(2), TypeTagIdx))
+  if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 3, Attr.getArgAsExpr(2),
+                                           TypeTagIdx))
     return;
 
-  bool IsPointer = (AttrName == "pointer_with_type_tag");
+  bool IsPointer = (Attr.getName()->getName() == "pointer_with_type_tag");
   if (IsPointer) {
     // Ensure that buffer has a pointer type.
-    QualType BufferTy = getFunctionOrMethodArgType(D, ArgumentIdx);
+    QualType BufferTy = getFunctionOrMethodParamType(D, ArgumentIdx);
     if (!BufferTy->isPointerType()) {
       S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only)
         << Attr.getName();
@@ -4114,6 +3301,12 @@
   if (!checkAttributeNumArgs(S, Attr, 1))
     return;
 
+  if (!isa<VarDecl>(D)) {
+    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+      << Attr.getName() << ExpectedVariable;
+    return;
+  }
+
   IdentifierInfo *PointerKind = Attr.getArgAsIdent(0)->Ident;
   TypeSourceInfo *MatchingCTypeLoc = 0;
   S.GetTypeFromParser(Attr.getMatchingCType(), &MatchingCTypeLoc);
@@ -4143,14 +3336,9 @@
 }
 
 static void handleNSConsumedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  ParmVarDecl *param = dyn_cast<ParmVarDecl>(D);
-  if (!param) {
-    S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getRange() << Attr.getName() << ExpectedParameter;
-    return;
-  }
-
+  ParmVarDecl *param = cast<ParmVarDecl>(D);
   bool typeOK, cf;
+
   if (Attr.getKind() == AttributeList::AT_NSConsumed) {
     typeOK = isValidSubjectOfNSAttribute(S, param->getType());
     cf = false;
@@ -4175,33 +3363,20 @@
                                   Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleNSConsumesSelfAttr(Sema &S, Decl *D,
-                                     const AttributeList &Attr) {
-  if (!isa<ObjCMethodDecl>(D)) {
-    S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getRange() << Attr.getName() << ExpectedMethod;
-    return;
-  }
-
-  D->addAttr(::new (S.Context)
-             NSConsumesSelfAttr(Attr.getRange(), S.Context,
-                                Attr.getAttributeSpellingListIndex()));
-}
-
 static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
                                         const AttributeList &Attr) {
 
   QualType returnType;
 
   if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
-    returnType = MD->getResultType();
+    returnType = MD->getReturnType();
   else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
            (Attr.getKind() == AttributeList::AT_NSReturnsRetained))
     return; // ignore: was handled as a type attribute
   else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
     returnType = PD->getType();
   else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
-    returnType = FD->getResultType();
+    returnType = FD->getReturnType();
   else {
     S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
         << Attr.getRange() << Attr.getName()
@@ -4271,27 +3446,17 @@
   
   SourceLocation loc = attr.getLoc();
   QualType resultType;
-  
-  ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(D);
-
-  if (!method) {
-    ObjCPropertyDecl *property = dyn_cast<ObjCPropertyDecl>(D);
-    if (!property) {
-      S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
-        << SourceRange(loc, loc) << attr.getName() << ExpectedMethodOrProperty;
-      return;
-    }
-    resultType = property->getType();
-  }
+  if (isa<ObjCMethodDecl>(D))
+    resultType = cast<ObjCMethodDecl>(D)->getReturnType();
   else
-    // Check that the method returns a normal pointer.
-    resultType = method->getResultType();
+    resultType = cast<ObjCPropertyDecl>(D)->getType();
 
   if (!resultType->isReferenceType() &&
       (!resultType->isPointerType() || resultType->isObjCRetainableType())) {
     S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
       << SourceRange(loc)
-    << attr.getName() << (method ? EP_ObjCMethod : EP_ObjCProperty)
+    << attr.getName()
+    << (isa<ObjCMethodDecl>(D) ? EP_ObjCMethod : EP_ObjCProperty)
     << /*non-retainable pointer*/ 2;
 
     // Drop the attribute.
@@ -4305,14 +3470,8 @@
 
 static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
                                         const AttributeList &attr) {
-  SourceLocation loc = attr.getLoc();
-  ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(D);
+  ObjCMethodDecl *method = cast<ObjCMethodDecl>(D);
   
-  if (!method) {
-   S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
-   << SourceRange(loc, loc) << attr.getName() << ExpectedMethod;
-    return;
-  }
   DeclContext *DC = method->getDeclContext();
   if (const ObjCProtocolDecl *PDecl = dyn_cast_or_null<ObjCProtocolDecl>(DC)) {
     S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
@@ -4331,99 +3490,85 @@
                                         attr.getAttributeSpellingListIndex()));
 }
 
-/// Handle cf_audited_transfer and cf_unknown_transfer.
-static void handleCFTransferAttr(Sema &S, Decl *D, const AttributeList &A) {
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
-      << A.getRange() << A.getName() << ExpectedFunction;
+static void handleCFAuditedTransferAttr(Sema &S, Decl *D,
+                                        const AttributeList &Attr) {
+  if (checkAttrMutualExclusion<CFUnknownTransferAttr>(S, D, Attr))
     return;
-  }
-
-  bool IsAudited = (A.getKind() == AttributeList::AT_CFAuditedTransfer);
-
-  // Check whether there's a conflicting attribute already present.
-  Attr *Existing;
-  if (IsAudited) {
-    Existing = D->getAttr<CFUnknownTransferAttr>();
-  } else {
-    Existing = D->getAttr<CFAuditedTransferAttr>();
-  }
-  if (Existing) {
-    S.Diag(D->getLocStart(), diag::err_attributes_are_not_compatible)
-      << A.getName()
-      << (IsAudited ? "cf_unknown_transfer" : "cf_audited_transfer")
-      << A.getRange() << Existing->getRange();
-    return;
-  }
-
-  // All clear;  add the attribute.
-  if (IsAudited) {
-    D->addAttr(::new (S.Context)
-               CFAuditedTransferAttr(A.getRange(), S.Context,
-                                     A.getAttributeSpellingListIndex()));
-  } else {
-    D->addAttr(::new (S.Context)
-               CFUnknownTransferAttr(A.getRange(), S.Context,
-                                     A.getAttributeSpellingListIndex()));
-  }
-}
-
-static void handleNSBridgedAttr(Sema &S, Scope *Sc, Decl *D,
-                                const AttributeList &Attr) {
-  RecordDecl *RD = dyn_cast<RecordDecl>(D);
-  if (!RD || RD->isUnion()) {
-    S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
-      << Attr.getRange() << Attr.getName() << ExpectedStruct;
-  }
-
-  IdentifierLoc *Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : 0;
-
-  // In Objective-C, verify that the type names an Objective-C type.
-  // We don't want to check this outside of ObjC because people sometimes
-  // do crazy C declarations of Objective-C types.
-  if (Parm && S.getLangOpts().ObjC1) {
-    // Check for an existing type with this name.
-    LookupResult R(S, DeclarationName(Parm->Ident), Parm->Loc,
-                   Sema::LookupOrdinaryName);
-    if (S.LookupName(R, Sc)) {
-      NamedDecl *Target = R.getFoundDecl();
-      if (Target && !isa<ObjCInterfaceDecl>(Target)) {
-        S.Diag(D->getLocStart(), diag::err_ns_bridged_not_interface);
-        S.Diag(Target->getLocStart(), diag::note_declared_at);
-      }
-    }
-  }
 
   D->addAttr(::new (S.Context)
-             NSBridgedAttr(Attr.getRange(), S.Context, Parm ? Parm->Ident : 0,
-                           Attr.getAttributeSpellingListIndex()));
+             CFAuditedTransferAttr(Attr.getRange(), S.Context,
+                                   Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleCFUnknownTransferAttr(Sema &S, Decl *D,
+                                        const AttributeList &Attr) {
+  if (checkAttrMutualExclusion<CFAuditedTransferAttr>(S, D, Attr))
+    return;
+
+  D->addAttr(::new (S.Context)
+             CFUnknownTransferAttr(Attr.getRange(), S.Context,
+             Attr.getAttributeSpellingListIndex()));
 }
 
 static void handleObjCBridgeAttr(Sema &S, Scope *Sc, Decl *D,
                                 const AttributeList &Attr) {
-  if (!isa<RecordDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-    << Attr.getName()
-    << (S.getLangOpts().CPlusPlus ? ExpectedStructOrUnionOrClass
-                                  : ExpectedStructOrUnion);
-    return;
-  }
-  
-  if (Attr.getNumArgs() != 1) {
-    S.Diag(D->getLocStart(), diag::err_objc_bridge_not_id);
-    return;
-  }
-  IdentifierLoc *Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : 0;
+  IdentifierLoc * Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : 0;
+
   if (!Parm) {
-    S.Diag(D->getLocStart(), diag::err_objc_bridge_not_id);
+    S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
     return;
   }
   
   D->addAttr(::new (S.Context)
-             ObjCBridgeAttr(Attr.getRange(), S.Context, Parm ? Parm->Ident : 0,
+             ObjCBridgeAttr(Attr.getRange(), S.Context, Parm->Ident,
                            Attr.getAttributeSpellingListIndex()));
 }
 
+static void handleObjCBridgeMutableAttr(Sema &S, Scope *Sc, Decl *D,
+                                        const AttributeList &Attr) {
+  IdentifierLoc * Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : 0;
+  
+  if (!Parm) {
+    S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
+    return;
+  }
+  
+  D->addAttr(::new (S.Context)
+             ObjCBridgeMutableAttr(Attr.getRange(), S.Context, Parm->Ident,
+                            Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCBridgeRelatedAttr(Sema &S, Scope *Sc, Decl *D,
+                                 const AttributeList &Attr) {
+  IdentifierInfo *RelatedClass =
+    Attr.isArgIdent(0) ? Attr.getArgAsIdent(0)->Ident : 0;
+  if (!RelatedClass) {
+    S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
+    return;
+  }
+  IdentifierInfo *ClassMethod =
+    Attr.getArgAsIdent(1) ? Attr.getArgAsIdent(1)->Ident : 0;
+  IdentifierInfo *InstanceMethod =
+    Attr.getArgAsIdent(2) ? Attr.getArgAsIdent(2)->Ident : 0;
+  D->addAttr(::new (S.Context)
+             ObjCBridgeRelatedAttr(Attr.getRange(), S.Context, RelatedClass,
+                                   ClassMethod, InstanceMethod,
+                                   Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCDesignatedInitializer(Sema &S, Decl *D,
+                                            const AttributeList &Attr) {
+  ObjCInterfaceDecl *IFace;
+  if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
+    IFace = CatDecl->getClassInterface();
+  else
+    IFace = cast<ObjCInterfaceDecl>(D->getDeclContext());
+  IFace->setHasDesignatedInitializers();
+  D->addAttr(::new (S.Context)
+                  ObjCDesignatedInitializerAttr(Attr.getRange(), S.Context,
+                                         Attr.getAttributeSpellingListIndex()));
+}
+
 static void handleObjCOwnershipAttr(Sema &S, Decl *D,
                                     const AttributeList &Attr) {
   if (hasDeclarator(D)) return;
@@ -4434,12 +3579,6 @@
 
 static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
                                           const AttributeList &Attr) {
-  if (!isa<VarDecl>(D) && !isa<FieldDecl>(D)) {
-    S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
-      << Attr.getRange() << Attr.getName() << ExpectedVariable;
-    return;
-  }
-
   ValueDecl *vd = cast<ValueDecl>(D);
   QualType type = vd->getType();
 
@@ -4483,19 +3622,18 @@
 // Microsoft specific attribute handlers.
 //===----------------------------------------------------------------------===//
 
-// Check if MS extensions or some other language extensions are enabled.  If
-// not, issue a diagnostic that the given attribute is unused.
-static bool checkMicrosoftExt(Sema &S, const AttributeList &Attr,
-                              bool OtherExtension = false) {
-  if (S.LangOpts.MicrosoftExt || OtherExtension)
-    return true;
-  S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
-  return false;
-}
-
 static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!checkMicrosoftExt(S, Attr, S.LangOpts.Borland))
+  if (!S.LangOpts.CPlusPlus) {
+    S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
+      << Attr.getName() << AttributeLangSupport::C;
     return;
+  }
+
+  if (!isa<CXXRecordDecl>(D)) {
+    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+      << Attr.getName() << ExpectedClass;
+    return;
+  }
 
   StringRef StrRef;
   SourceLocation LiteralLoc;
@@ -4529,55 +3667,305 @@
                                         Attr.getAttributeSpellingListIndex()));
 }
 
-static void handleInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!checkMicrosoftExt(S, Attr))
+static void handleMSInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+  if (!S.LangOpts.CPlusPlus) {
+    S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
+      << Attr.getName() << AttributeLangSupport::C;
     return;
-
-  AttributeList::Kind Kind = Attr.getKind();
-  if (Kind == AttributeList::AT_SingleInheritance)
-    D->addAttr(
-        ::new (S.Context)
-               SingleInheritanceAttr(Attr.getRange(), S.Context,
-                                     Attr.getAttributeSpellingListIndex()));
-  else if (Kind == AttributeList::AT_MultipleInheritance)
-    D->addAttr(
-        ::new (S.Context)
-               MultipleInheritanceAttr(Attr.getRange(), S.Context,
-                                       Attr.getAttributeSpellingListIndex()));
-  else if (Kind == AttributeList::AT_VirtualInheritance)
-    D->addAttr(
-        ::new (S.Context)
-               VirtualInheritanceAttr(Attr.getRange(), S.Context,
-                                      Attr.getAttributeSpellingListIndex()));
+  }
+  MSInheritanceAttr *IA = S.mergeMSInheritanceAttr(
+      D, Attr.getRange(), /*BestCase=*/true,
+      Attr.getAttributeSpellingListIndex(),
+      (MSInheritanceAttr::Spelling)Attr.getSemanticSpelling());
+  if (IA)
+    D->addAttr(IA);
 }
 
-static void handlePortabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!checkMicrosoftExt(S, Attr))
+static void handleARMInterruptAttr(Sema &S, Decl *D,
+                                   const AttributeList &Attr) {
+  // Check the attribute arguments.
+  if (Attr.getNumArgs() > 1) {
+    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+      << Attr.getName() << 1;
+    return;
+  }
+
+  StringRef Str;
+  SourceLocation ArgLoc;
+
+  if (Attr.getNumArgs() == 0)
+    Str = "";
+  else if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc))
     return;
 
-  AttributeList::Kind Kind = Attr.getKind();
-    if (Kind == AttributeList::AT_Win64)
-    D->addAttr(
-        ::new (S.Context) Win64Attr(Attr.getRange(), S.Context,
-                                    Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleForceInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!checkMicrosoftExt(S, Attr))
+  ARMInterruptAttr::InterruptType Kind;
+  if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
+    S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+      << Attr.getName() << Str << ArgLoc;
     return;
+  }
+
+  unsigned Index = Attr.getAttributeSpellingListIndex();
   D->addAttr(::new (S.Context)
-             ForceInlineAttr(Attr.getRange(), S.Context,
-                             Attr.getAttributeSpellingListIndex()));
+             ARMInterruptAttr(Attr.getLoc(), S.Context, Kind, Index));
 }
 
-static void handleSelectAnyAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-  if (!checkMicrosoftExt(S, Attr))
+static void handleMSP430InterruptAttr(Sema &S, Decl *D,
+                                      const AttributeList &Attr) {
+  if (!checkAttributeNumArgs(S, Attr, 1))
     return;
-  // Check linkage after possibly merging declaratinos.  See
-  // checkAttributesAfterMerging().
+
+  if (!Attr.isArgExpr(0)) {
+    S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName()
+      << AANT_ArgumentIntegerConstant;
+    return;    
+  }
+
+  // FIXME: Check for decl - it should be void ()(void).
+
+  Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+  llvm::APSInt NumParams(32);
+  if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
+    S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+      << Attr.getName() << AANT_ArgumentIntegerConstant
+      << NumParamsExpr->getSourceRange();
+    return;
+  }
+
+  unsigned Num = NumParams.getLimitedValue(255);
+  if ((Num & 1) || Num > 30) {
+    S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+      << Attr.getName() << (int)NumParams.getSExtValue()
+      << NumParamsExpr->getSourceRange();
+    return;
+  }
+
   D->addAttr(::new (S.Context)
-             SelectAnyAttr(Attr.getRange(), S.Context,
-                           Attr.getAttributeSpellingListIndex()));
+              MSP430InterruptAttr(Attr.getLoc(), S.Context, Num,
+                                  Attr.getAttributeSpellingListIndex()));
+  D->addAttr(UsedAttr::CreateImplicit(S.Context));
+}
+
+static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+  // Dispatch the interrupt attribute based on the current target.
+  if (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::msp430)
+    handleMSP430InterruptAttr(S, D, Attr);
+  else
+    handleARMInterruptAttr(S, D, Attr);
+}
+
+static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
+                                              const AttributeList& Attr) {
+  // If we try to apply it to a function pointer, don't warn, but don't
+  // do anything, either. It doesn't matter anyway, because there's nothing
+  // special about calling a force_align_arg_pointer function.
+  ValueDecl *VD = dyn_cast<ValueDecl>(D);
+  if (VD && VD->getType()->isFunctionPointerType())
+    return;
+  // Also don't warn on function pointer typedefs.
+  TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
+  if (TD && (TD->getUnderlyingType()->isFunctionPointerType() ||
+    TD->getUnderlyingType()->isFunctionType()))
+    return;
+  // Attribute can only be applied to function types.
+  if (!isa<FunctionDecl>(D)) {
+    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+      << Attr.getName() << /* function */0;
+    return;
+  }
+
+  D->addAttr(::new (S.Context)
+              X86ForceAlignArgPointerAttr(Attr.getRange(), S.Context,
+                                        Attr.getAttributeSpellingListIndex()));
+}
+
+DLLImportAttr *Sema::mergeDLLImportAttr(Decl *D, SourceRange Range,
+                                        unsigned AttrSpellingListIndex) {
+  if (D->hasAttr<DLLExportAttr>()) {
+    Diag(Range.getBegin(), diag::warn_attribute_ignored) << "'dllimport'";
+    return NULL;
+  }
+
+  if (D->hasAttr<DLLImportAttr>())
+    return NULL;
+
+  return ::new (Context) DLLImportAttr(Range, Context, AttrSpellingListIndex);
+}
+
+static void handleDLLImportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+  // Attribute can be applied only to functions or variables.
+  FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+  if (!FD && !isa<VarDecl>(D)) {
+    // Apparently Visual C++ thinks it is okay to not emit a warning
+    // in this case, so only emit a warning when -fms-extensions is not
+    // specified.
+    if (!S.getLangOpts().MicrosoftExt)
+      S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+        << Attr.getName() << ExpectedVariableOrFunction;
+    return;
+  }
+
+  // Currently, the dllimport attribute is ignored for inlined functions.
+  // Warning is emitted.
+  if (FD && FD->isInlineSpecified()) {
+    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+    return;
+  }
+
+  unsigned Index = Attr.getAttributeSpellingListIndex();
+  DLLImportAttr *NewAttr = S.mergeDLLImportAttr(D, Attr.getRange(), Index);
+  if (NewAttr)
+    D->addAttr(NewAttr);
+}
+
+DLLExportAttr *Sema::mergeDLLExportAttr(Decl *D, SourceRange Range,
+                                        unsigned AttrSpellingListIndex) {
+  if (DLLImportAttr *Import = D->getAttr<DLLImportAttr>()) {
+    Diag(Import->getLocation(), diag::warn_attribute_ignored) << Import;
+    D->dropAttr<DLLImportAttr>();
+  }
+
+  if (D->hasAttr<DLLExportAttr>())
+    return NULL;
+
+  return ::new (Context) DLLExportAttr(Range, Context, AttrSpellingListIndex);
+}
+
+static void handleDLLExportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+  // Currently, the dllexport attribute is ignored for inlined functions, unless
+  // the -fkeep-inline-functions flag has been used. Warning is emitted.
+  if (isa<FunctionDecl>(D) && cast<FunctionDecl>(D)->isInlineSpecified()) {
+    // FIXME: ... unless the -fkeep-inline-functions flag has been used.
+    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+    return;
+  }
+
+  unsigned Index = Attr.getAttributeSpellingListIndex();
+  DLLExportAttr *NewAttr = S.mergeDLLExportAttr(D, Attr.getRange(), Index);
+  if (NewAttr)
+    D->addAttr(NewAttr);
+}
+
+MSInheritanceAttr *
+Sema::mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
+                             unsigned AttrSpellingListIndex,
+                             MSInheritanceAttr::Spelling SemanticSpelling) {
+  if (MSInheritanceAttr *IA = D->getAttr<MSInheritanceAttr>()) {
+    if (IA->getSemanticSpelling() == SemanticSpelling)
+      return 0;
+    Diag(IA->getLocation(), diag::err_mismatched_ms_inheritance)
+        << 1 /*previous declaration*/;
+    Diag(Range.getBegin(), diag::note_previous_ms_inheritance);
+    D->dropAttr<MSInheritanceAttr>();
+  }
+
+  CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
+  if (RD->hasDefinition()) {
+    if (checkMSInheritanceAttrOnDefinition(RD, Range, BestCase,
+                                           SemanticSpelling)) {
+      return 0;
+    }
+  } else {
+    if (isa<ClassTemplatePartialSpecializationDecl>(RD)) {
+      Diag(Range.getBegin(), diag::warn_ignored_ms_inheritance)
+          << 1 /*partial specialization*/;
+      return 0;
+    }
+    if (RD->getDescribedClassTemplate()) {
+      Diag(Range.getBegin(), diag::warn_ignored_ms_inheritance)
+          << 0 /*primary template*/;
+      return 0;
+    }
+  }
+
+  return ::new (Context)
+      MSInheritanceAttr(Range, Context, BestCase, AttrSpellingListIndex);
+}
+
+static void handleCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+  // The capability attributes take a single string parameter for the name of
+  // the capability they represent. The lockable attribute does not take any
+  // parameters. However, semantically, both attributes represent the same
+  // concept, and so they use the same semantic attribute. Eventually, the
+  // lockable attribute will be removed.
+  //
+  // For backwards compatibility, any capability which has no specified string
+  // literal will be considered a "mutex."
+  StringRef N("mutex");
+  SourceLocation LiteralLoc;
+  if (Attr.getKind() == AttributeList::AT_Capability &&
+      !S.checkStringLiteralArgumentAttr(Attr, 0, N, &LiteralLoc))
+    return;
+
+  // Currently, there are only two names allowed for a capability: role and
+  // mutex (case insensitive). Diagnose other capability names.
+  if (!N.equals_lower("mutex") && !N.equals_lower("role"))
+    S.Diag(LiteralLoc, diag::warn_invalid_capability_name) << N;
+
+  D->addAttr(::new (S.Context) CapabilityAttr(Attr.getRange(), S.Context, N,
+                                        Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAssertCapabilityAttr(Sema &S, Decl *D,
+                                       const AttributeList &Attr) {
+  D->addAttr(::new (S.Context) AssertCapabilityAttr(Attr.getRange(), S.Context,
+                                                    Attr.getArgAsExpr(0),
+                                        Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAcquireCapabilityAttr(Sema &S, Decl *D,
+                                        const AttributeList &Attr) {
+  SmallVector<Expr*, 1> Args;
+  if (!checkLockFunAttrCommon(S, D, Attr, Args))
+    return;
+
+  D->addAttr(::new (S.Context) AcquireCapabilityAttr(Attr.getRange(),
+                                                     S.Context,
+                                                     Args.data(), Args.size(),
+                                        Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleTryAcquireCapabilityAttr(Sema &S, Decl *D,
+                                           const AttributeList &Attr) {
+  SmallVector<Expr*, 2> Args;
+  if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
+    return;
+
+  D->addAttr(::new (S.Context) TryAcquireCapabilityAttr(Attr.getRange(),
+                                                        S.Context,
+                                                        Attr.getArgAsExpr(0),
+                                                        Args.data(),
+                                                        Args.size(),
+                                        Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleReleaseCapabilityAttr(Sema &S, Decl *D,
+                                        const AttributeList &Attr) {
+  // Check that all arguments are lockable objects.
+  SmallVector<Expr *, 1> Args;
+  checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 0, true);
+
+  D->addAttr(::new (S.Context) ReleaseCapabilityAttr(
+      Attr.getRange(), S.Context, Args.data(), Args.size(),
+      Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleRequiresCapabilityAttr(Sema &S, Decl *D,
+                                         const AttributeList &Attr) {
+  if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+    return;
+
+  // check that all arguments are lockable objects
+  SmallVector<Expr*, 1> Args;
+  checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+  if (Args.empty())
+    return;
+
+  RequiresCapabilityAttr *RCA = ::new (S.Context)
+    RequiresCapabilityAttr(Attr.getRange(), S.Context, Args.data(),
+                           Args.size(), Attr.getAttributeSpellingListIndex());
+
+  D->addAttr(RCA);
 }
 
 /// Handles semantic checking for features that are common to all attributes,
@@ -4591,15 +3979,24 @@
   // We also bail on unknown and ignored attributes because those are handled
   // as part of the target-specific handling logic.
   if (Attr.hasCustomParsing() ||
-      Attr.getKind() == AttributeList::UnknownAttribute ||
-      Attr.getKind() == AttributeList::IgnoredAttribute)
+      Attr.getKind() == AttributeList::UnknownAttribute)
     return false;
 
+  // Check whether the attribute requires specific language extensions to be
+  // enabled.
+  if (!Attr.diagnoseLangOpts(S))
+    return true;
+
   // If there are no optional arguments, then checking for the argument count
   // is trivial.
   if (Attr.getMinArgs() == Attr.getMaxArgs() &&
       !checkAttributeNumArgs(S, Attr, Attr.getMinArgs()))
     return true;
+
+  // Check whether the attribute appertains to the given subject.
+  if (!Attr.diagnoseAppertainsTo(S, D))
+    return true;
+
   return false;
 }
 
@@ -4613,7 +4010,7 @@
 static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
                                  const AttributeList &Attr,
                                  bool IncludeCXX11Attributes) {
-  if (Attr.isInvalid())
+  if (Attr.isInvalid() || Attr.getKind() == AttributeList::IgnoredAttribute)
     return;
 
   // Ignore C++11 attributes on declarator chunks: they appertain to the type
@@ -4621,146 +4018,277 @@
   if (Attr.isCXX11Attribute() && !IncludeCXX11Attributes)
     return;
 
+  // Unknown attributes are automatically warned on. Target-specific attributes
+  // which do not apply to the current target architecture are treated as
+  // though they were unknown attributes.
+  if (Attr.getKind() == AttributeList::UnknownAttribute ||
+      !Attr.existsInTarget(S.Context.getTargetInfo().getTriple())) {
+    S.Diag(Attr.getLoc(), Attr.isDeclspecAttribute()
+                              ? diag::warn_unhandled_ms_attribute_ignored
+                              : diag::warn_unknown_attribute_ignored)
+        << Attr.getName();
+    return;
+  }
+
   if (handleCommonAttributeFeatures(S, scope, D, Attr))
     return;
 
   switch (Attr.getKind()) {
-  case AttributeList::AT_IBAction:    handleIBAction(S, D, Attr); break;
-  case AttributeList::AT_IBOutlet:    handleIBOutlet(S, D, Attr); break;
-  case AttributeList::AT_IBOutletCollection:
-    handleIBOutletCollection(S, D, Attr); break;
-  case AttributeList::AT_AddressSpace:
-  case AttributeList::AT_ObjCGC:
-  case AttributeList::AT_VectorSize:
-  case AttributeList::AT_NeonVectorType:
-  case AttributeList::AT_NeonPolyVectorType:
-  case AttributeList::AT_Ptr32:
-  case AttributeList::AT_Ptr64:
-  case AttributeList::AT_SPtr:
-  case AttributeList::AT_UPtr:
-    // Ignore these, these are type attributes, handled by
-    // ProcessTypeAttributes.
+  default:
+    // Type attributes are handled elsewhere; silently move on.
+    assert(Attr.isTypeAttr() && "Non-type attribute not handled");
     break;
-  case AttributeList::AT_Alias:       handleAliasAttr       (S, D, Attr); break;
-  case AttributeList::AT_Aligned:     handleAlignedAttr     (S, D, Attr); break;
-  case AttributeList::AT_AllocSize:   handleAllocSizeAttr   (S, D, Attr); break;
+  case AttributeList::AT_Interrupt:
+    handleInterruptAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_X86ForceAlignArgPointer:
+    handleX86ForceAlignArgPointerAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_DLLExport:
+    handleDLLExportAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_DLLImport:
+    handleDLLImportAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Mips16:
+    handleSimpleAttribute<Mips16Attr>(S, D, Attr);
+    break;
+  case AttributeList::AT_NoMips16:
+    handleSimpleAttribute<NoMips16Attr>(S, D, Attr);
+    break;
+  case AttributeList::AT_IBAction:
+    handleSimpleAttribute<IBActionAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_IBOutlet:
+    handleIBOutlet(S, D, Attr);
+    break;
+  case AttributeList::AT_IBOutletCollection:
+    handleIBOutletCollection(S, D, Attr);
+    break;
+  case AttributeList::AT_Alias:
+    handleAliasAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Aligned:
+    handleAlignedAttr(S, D, Attr);
+    break;
   case AttributeList::AT_AlwaysInline:
-    handleAlwaysInlineAttr  (S, D, Attr); break;
+    handleAlwaysInlineAttr(S, D, Attr);
+    break;
   case AttributeList::AT_AnalyzerNoReturn:
-    handleAnalyzerNoReturnAttr  (S, D, Attr); break;
-  case AttributeList::AT_TLSModel:    handleTLSModelAttr    (S, D, Attr); break;
-  case AttributeList::AT_Annotate:    handleAnnotateAttr    (S, D, Attr); break;
-  case AttributeList::AT_Availability:handleAvailabilityAttr(S, D, Attr); break;
+    handleAnalyzerNoReturnAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_TLSModel:
+    handleTLSModelAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Annotate:
+    handleAnnotateAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Availability:
+    handleAvailabilityAttr(S, D, Attr);
+    break;
   case AttributeList::AT_CarriesDependency:
     handleDependencyAttr(S, scope, D, Attr);
     break;
-  case AttributeList::AT_Common:      handleCommonAttr      (S, D, Attr); break;
-  case AttributeList::AT_CUDAConstant:handleConstantAttr    (S, D, Attr); break;
-  case AttributeList::AT_Constructor: handleConstructorAttr (S, D, Attr); break;
+  case AttributeList::AT_Common:
+    handleCommonAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_CUDAConstant:
+    handleSimpleAttribute<CUDAConstantAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_Constructor:
+    handleConstructorAttr(S, D, Attr);
+    break;
   case AttributeList::AT_CXX11NoReturn:
-    handleCXX11NoReturnAttr(S, D, Attr);
+    handleSimpleAttribute<CXX11NoReturnAttr>(S, D, Attr);
     break;
   case AttributeList::AT_Deprecated:
     handleAttrWithMessage<DeprecatedAttr>(S, D, Attr);
     break;
-  case AttributeList::AT_Destructor:  handleDestructorAttr  (S, D, Attr); break;
+  case AttributeList::AT_Destructor:
+    handleDestructorAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_EnableIf:
+    handleEnableIfAttr(S, D, Attr);
+    break;
   case AttributeList::AT_ExtVectorType:
     handleExtVectorTypeAttr(S, scope, D, Attr);
     break;
   case AttributeList::AT_MinSize:
-    handleMinSizeAttr(S, D, Attr);
+    handleSimpleAttribute<MinSizeAttr>(S, D, Attr);
     break;
-  case AttributeList::AT_Format:      handleFormatAttr      (S, D, Attr); break;
-  case AttributeList::AT_FormatArg:   handleFormatArgAttr   (S, D, Attr); break;
-  case AttributeList::AT_CUDAGlobal:  handleGlobalAttr      (S, D, Attr); break;
-  case AttributeList::AT_CUDADevice:  handleDeviceAttr      (S, D, Attr); break;
-  case AttributeList::AT_CUDAHost:    handleHostAttr        (S, D, Attr); break;
-  case AttributeList::AT_GNUInline:   handleGNUInlineAttr   (S, D, Attr); break;
+  case AttributeList::AT_OptimizeNone:
+    handleOptimizeNoneAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Format:
+    handleFormatAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_FormatArg:
+    handleFormatArgAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_CUDAGlobal:
+    handleGlobalAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_CUDADevice:
+    handleSimpleAttribute<CUDADeviceAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_CUDAHost:
+    handleSimpleAttribute<CUDAHostAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_GNUInline:
+    handleGNUInlineAttr(S, D, Attr);
+    break;
   case AttributeList::AT_CUDALaunchBounds:
     handleLaunchBoundsAttr(S, D, Attr);
     break;
-  case AttributeList::AT_Kernel:      handleKernelAttr      (S, D, Attr); break;
-  case AttributeList::AT_Malloc:      handleMallocAttr      (S, D, Attr); break;
-  case AttributeList::AT_MayAlias:    handleMayAliasAttr    (S, D, Attr); break;
-  case AttributeList::AT_Mode:        handleModeAttr        (S, D, Attr); break;
-  case AttributeList::AT_NoCommon:    handleNoCommonAttr    (S, D, Attr); break;
-  case AttributeList::AT_NonNull:     handleNonNullAttr     (S, D, Attr); break;
-  case AttributeList::AT_Overloadable:handleOverloadableAttr(S, D, Attr); break;
-  case AttributeList::AT_ownership_returns:
-  case AttributeList::AT_ownership_takes:
-  case AttributeList::AT_ownership_holds:
-      handleOwnershipAttr     (S, D, Attr); break;
-  case AttributeList::AT_Cold:        handleColdAttr        (S, D, Attr); break;
-  case AttributeList::AT_Hot:         handleHotAttr         (S, D, Attr); break;
-  case AttributeList::AT_Naked:       handleNakedAttr       (S, D, Attr); break;
-  case AttributeList::AT_NoReturn:    handleNoReturnAttr    (S, D, Attr); break;
-  case AttributeList::AT_NoThrow:     handleNothrowAttr     (S, D, Attr); break;
-  case AttributeList::AT_CUDAShared:  handleSharedAttr      (S, D, Attr); break;
-  case AttributeList::AT_VecReturn:   handleVecReturnAttr   (S, D, Attr); break;
+  case AttributeList::AT_Kernel:
+    handleKernelAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Malloc:
+    handleMallocAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_MayAlias:
+    handleSimpleAttribute<MayAliasAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_Mode:
+    handleModeAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_NoCommon:
+    handleSimpleAttribute<NoCommonAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_NonNull:
+    if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(D))
+      handleNonNullAttrParameter(S, PVD, Attr);
+    else
+      handleNonNullAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_ReturnsNonNull:
+    handleReturnsNonNullAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Overloadable:
+    handleSimpleAttribute<OverloadableAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_Ownership:
+    handleOwnershipAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Cold:
+    handleColdAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Hot:
+    handleHotAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Naked:
+    handleSimpleAttribute<NakedAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_NoReturn:
+    handleNoReturnAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_NoThrow:
+    handleSimpleAttribute<NoThrowAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_CUDAShared:
+    handleSimpleAttribute<CUDASharedAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_VecReturn:
+    handleVecReturnAttr(S, D, Attr);
+    break;
 
   case AttributeList::AT_ObjCOwnership:
-    handleObjCOwnershipAttr(S, D, Attr); break;
+    handleObjCOwnershipAttr(S, D, Attr);
+    break;
   case AttributeList::AT_ObjCPreciseLifetime:
-    handleObjCPreciseLifetimeAttr(S, D, Attr); break;
+    handleObjCPreciseLifetimeAttr(S, D, Attr);
+    break;
 
   case AttributeList::AT_ObjCReturnsInnerPointer:
-    handleObjCReturnsInnerPointerAttr(S, D, Attr); break;
+    handleObjCReturnsInnerPointerAttr(S, D, Attr);
+    break;
 
   case AttributeList::AT_ObjCRequiresSuper:
-      handleObjCRequiresSuperAttr(S, D, Attr); break;
-      
-  case AttributeList::AT_NSBridged:
-    handleNSBridgedAttr(S, scope, D, Attr); break;
-      
+    handleObjCRequiresSuperAttr(S, D, Attr);
+    break;
+
   case AttributeList::AT_ObjCBridge:
-    handleObjCBridgeAttr(S, scope, D, Attr); break;
+    handleObjCBridgeAttr(S, scope, D, Attr);
+    break;
+
+  case AttributeList::AT_ObjCBridgeMutable:
+    handleObjCBridgeMutableAttr(S, scope, D, Attr);
+    break;
+
+  case AttributeList::AT_ObjCBridgeRelated:
+    handleObjCBridgeRelatedAttr(S, scope, D, Attr);
+    break;
+
+  case AttributeList::AT_ObjCDesignatedInitializer:
+    handleObjCDesignatedInitializer(S, D, Attr);
+    break;
 
   case AttributeList::AT_CFAuditedTransfer:
+    handleCFAuditedTransferAttr(S, D, Attr);
+    break;
   case AttributeList::AT_CFUnknownTransfer:
-    handleCFTransferAttr(S, D, Attr); break;
+    handleCFUnknownTransferAttr(S, D, Attr);
+    break;
 
-  // Checker-specific.
   case AttributeList::AT_CFConsumed:
-  case AttributeList::AT_NSConsumed:  handleNSConsumedAttr  (S, D, Attr); break;
+  case AttributeList::AT_NSConsumed:
+    handleNSConsumedAttr(S, D, Attr);
+    break;
   case AttributeList::AT_NSConsumesSelf:
-    handleNSConsumesSelfAttr(S, D, Attr); break;
+    handleSimpleAttribute<NSConsumesSelfAttr>(S, D, Attr);
+    break;
 
   case AttributeList::AT_NSReturnsAutoreleased:
   case AttributeList::AT_NSReturnsNotRetained:
   case AttributeList::AT_CFReturnsNotRetained:
   case AttributeList::AT_NSReturnsRetained:
   case AttributeList::AT_CFReturnsRetained:
-    handleNSReturnsRetainedAttr(S, D, Attr); break;
-
+    handleNSReturnsRetainedAttr(S, D, Attr);
+    break;
   case AttributeList::AT_WorkGroupSizeHint:
+    handleWorkGroupSize<WorkGroupSizeHintAttr>(S, D, Attr);
+    break;
   case AttributeList::AT_ReqdWorkGroupSize:
-    handleWorkGroupSize(S, D, Attr); break;
-
+    handleWorkGroupSize<ReqdWorkGroupSizeAttr>(S, D, Attr);
+    break;
   case AttributeList::AT_VecTypeHint:
-    handleVecTypeHint(S, D, Attr); break;
+    handleVecTypeHint(S, D, Attr);
+    break;
 
-  case AttributeList::AT_InitPriority: 
-      handleInitPriorityAttr(S, D, Attr); break;
-      
-  case AttributeList::AT_Packed:      handlePackedAttr      (S, D, Attr); break;
-  case AttributeList::AT_Section:     handleSectionAttr     (S, D, Attr); break;
+  case AttributeList::AT_InitPriority:
+    handleInitPriorityAttr(S, D, Attr);
+    break;
+
+  case AttributeList::AT_Packed:
+    handlePackedAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Section:
+    handleSectionAttr(S, D, Attr);
+    break;
   case AttributeList::AT_Unavailable:
     handleAttrWithMessage<UnavailableAttr>(S, D, Attr);
     break;
-  case AttributeList::AT_ArcWeakrefUnavailable: 
-    handleArcWeakrefUnavailableAttr (S, D, Attr); 
+  case AttributeList::AT_ArcWeakrefUnavailable:
+    handleSimpleAttribute<ArcWeakrefUnavailableAttr>(S, D, Attr);
     break;
   case AttributeList::AT_ObjCRootClass:
-    handleObjCRootClassAttr(S, D, Attr);
+    handleSimpleAttribute<ObjCRootClassAttr>(S, D, Attr);
     break;
-  case AttributeList::AT_ObjCRequiresPropertyDefs: 
-    handleObjCRequiresPropertyDefsAttr (S, D, Attr); 
+  case AttributeList::AT_ObjCExplicitProtocolImpl:
+    handleObjCSuppresProtocolAttr(S, D, Attr);
     break;
-  case AttributeList::AT_Unused:      handleUnusedAttr      (S, D, Attr); break;
+  case AttributeList::AT_ObjCRequiresPropertyDefs:
+    handleSimpleAttribute<ObjCRequiresPropertyDefsAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_Unused:
+    handleSimpleAttribute<UnusedAttr>(S, D, Attr);
+    break;
   case AttributeList::AT_ReturnsTwice:
-    handleReturnsTwiceAttr(S, D, Attr);
+    handleSimpleAttribute<ReturnsTwiceAttr>(S, D, Attr);
     break;
-  case AttributeList::AT_Used:        handleUsedAttr        (S, D, Attr); break;
+  case AttributeList::AT_Used:
+    handleUsedAttr(S, D, Attr);
+    break;
   case AttributeList::AT_Visibility:
     handleVisibilityAttr(S, D, Attr, false);
     break;
@@ -4768,36 +4296,58 @@
     handleVisibilityAttr(S, D, Attr, true);
     break;
   case AttributeList::AT_WarnUnused:
-    handleWarnUnusedAttr(S, D, Attr);
+    handleSimpleAttribute<WarnUnusedAttr>(S, D, Attr);
     break;
-  case AttributeList::AT_WarnUnusedResult: handleWarnUnusedResult(S, D, Attr);
+  case AttributeList::AT_WarnUnusedResult:
+    handleWarnUnusedResult(S, D, Attr);
     break;
-  case AttributeList::AT_Weak:        handleWeakAttr        (S, D, Attr); break;
-  case AttributeList::AT_WeakRef:     handleWeakRefAttr     (S, D, Attr); break;
-  case AttributeList::AT_WeakImport:  handleWeakImportAttr  (S, D, Attr); break;
+  case AttributeList::AT_Weak:
+    handleSimpleAttribute<WeakAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_WeakRef:
+    handleWeakRefAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_WeakImport:
+    handleWeakImportAttr(S, D, Attr);
+    break;
   case AttributeList::AT_TransparentUnion:
     handleTransparentUnionAttr(S, D, Attr);
     break;
   case AttributeList::AT_ObjCException:
-    handleObjCExceptionAttr(S, D, Attr);
+    handleSimpleAttribute<ObjCExceptionAttr>(S, D, Attr);
     break;
   case AttributeList::AT_ObjCMethodFamily:
     handleObjCMethodFamilyAttr(S, D, Attr);
     break;
-  case AttributeList::AT_ObjCNSObject:handleObjCNSObject    (S, D, Attr); break;
-  case AttributeList::AT_Blocks:      handleBlocksAttr      (S, D, Attr); break;
-  case AttributeList::AT_Sentinel:    handleSentinelAttr    (S, D, Attr); break;
-  case AttributeList::AT_Const:       handleConstAttr       (S, D, Attr); break;
-  case AttributeList::AT_Pure:        handlePureAttr        (S, D, Attr); break;
-  case AttributeList::AT_Cleanup:     handleCleanupAttr     (S, D, Attr); break;
-  case AttributeList::AT_NoDebug:     handleNoDebugAttr     (S, D, Attr); break;
-  case AttributeList::AT_NoInline:    handleNoInlineAttr    (S, D, Attr); break;
-  case AttributeList::AT_Regparm:     handleRegparmAttr     (S, D, Attr); break;
-  case AttributeList::IgnoredAttribute:
-    // Just ignore
+  case AttributeList::AT_ObjCNSObject:
+    handleObjCNSObject(S, D, Attr);
     break;
-  case AttributeList::AT_NoInstrumentFunction:  // Interacts with -pg.
-    handleNoInstrumentFunctionAttr(S, D, Attr);
+  case AttributeList::AT_Blocks:
+    handleBlocksAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Sentinel:
+    handleSentinelAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_Const:
+    handleSimpleAttribute<ConstAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_Pure:
+    handleSimpleAttribute<PureAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_Cleanup:
+    handleCleanupAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_NoDebug:
+    handleNoDebugAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_NoDuplicate:
+    handleSimpleAttribute<NoDuplicateAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_NoInline:
+    handleSimpleAttribute<NoInlineAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_NoInstrumentFunction: // Interacts with -pg.
+    handleSimpleAttribute<NoInstrumentFunctionAttr>(S, D, Attr);
     break;
   case AttributeList::AT_StdCall:
   case AttributeList::AT_CDecl:
@@ -4812,32 +4362,24 @@
     handleCallConvAttr(S, D, Attr);
     break;
   case AttributeList::AT_OpenCLKernel:
-    handleOpenCLKernelAttr(S, D, Attr);
+    handleSimpleAttribute<OpenCLKernelAttr>(S, D, Attr);
     break;
   case AttributeList::AT_OpenCLImageAccess:
-    handleOpenCLImageAccessAttr(S, D, Attr);
+    handleSimpleAttribute<OpenCLImageAccessAttr>(S, D, Attr);
     break;
 
   // Microsoft attributes:
   case AttributeList::AT_MsStruct:
-    handleMsStructAttr(S, D, Attr);
+    handleSimpleAttribute<MsStructAttr>(S, D, Attr);
     break;
   case AttributeList::AT_Uuid:
     handleUuidAttr(S, D, Attr);
     break;
-  case AttributeList::AT_SingleInheritance:
-  case AttributeList::AT_MultipleInheritance:
-  case AttributeList::AT_VirtualInheritance:
-    handleInheritanceAttr(S, D, Attr);
-    break;
-  case AttributeList::AT_Win64:
-    handlePortabilityAttr(S, D, Attr);
-    break;
-  case AttributeList::AT_ForceInline:
-    handleForceInlineAttr(S, D, Attr);
+  case AttributeList::AT_MSInheritance:
+    handleMSInheritanceAttr(S, D, Attr);
     break;
   case AttributeList::AT_SelectAny:
-    handleSelectAnyAttr(S, D, Attr);
+    handleSimpleAttribute<SelectAnyAttr>(S, D, Attr);
     break;
 
   // Thread safety attributes:
@@ -4848,28 +4390,25 @@
     handleAssertSharedLockAttr(S, D, Attr);
     break;
   case AttributeList::AT_GuardedVar:
-    handleGuardedVarAttr(S, D, Attr);
+    handleSimpleAttribute<GuardedVarAttr>(S, D, Attr);
     break;
   case AttributeList::AT_PtGuardedVar:
     handlePtGuardedVarAttr(S, D, Attr);
     break;
   case AttributeList::AT_ScopedLockable:
-    handleScopedLockableAttr(S, D, Attr);
+    handleSimpleAttribute<ScopedLockableAttr>(S, D, Attr);
     break;
   case AttributeList::AT_NoSanitizeAddress:
-    handleNoSanitizeAddressAttr(S, D, Attr);
+    handleSimpleAttribute<NoSanitizeAddressAttr>(S, D, Attr);
     break;
   case AttributeList::AT_NoThreadSafetyAnalysis:
-    handleNoThreadSafetyAnalysis(S, D, Attr);
+    handleSimpleAttribute<NoThreadSafetyAnalysisAttr>(S, D, Attr);
     break;
   case AttributeList::AT_NoSanitizeThread:
-    handleNoSanitizeThread(S, D, Attr);
+    handleSimpleAttribute<NoSanitizeThreadAttr>(S, D, Attr);
     break;
   case AttributeList::AT_NoSanitizeMemory:
-    handleNoSanitizeMemory(S, D, Attr);
-    break;
-  case AttributeList::AT_Lockable:
-    handleLockableAttr(S, D, Attr);
+    handleSimpleAttribute<NoSanitizeMemoryAttr>(S, D, Attr);
     break;
   case AttributeList::AT_GuardedBy:
     handleGuardedByAttr(S, D, Attr);
@@ -4877,12 +4416,6 @@
   case AttributeList::AT_PtGuardedBy:
     handlePtGuardedByAttr(S, D, Attr);
     break;
-  case AttributeList::AT_ExclusiveLockFunction:
-    handleExclusiveLockFunctionAttr(S, D, Attr);
-    break;
-  case AttributeList::AT_ExclusiveLocksRequired:
-    handleExclusiveLocksRequiredAttr(S, D, Attr);
-    break;
   case AttributeList::AT_ExclusiveTrylockFunction:
     handleExclusiveTrylockFunctionAttr(S, D, Attr);
     break;
@@ -4892,18 +4425,9 @@
   case AttributeList::AT_LocksExcluded:
     handleLocksExcludedAttr(S, D, Attr);
     break;
-  case AttributeList::AT_SharedLockFunction:
-    handleSharedLockFunctionAttr(S, D, Attr);
-    break;
-  case AttributeList::AT_SharedLocksRequired:
-    handleSharedLocksRequiredAttr(S, D, Attr);
-    break;
   case AttributeList::AT_SharedTrylockFunction:
     handleSharedTrylockFunctionAttr(S, D, Attr);
     break;
-  case AttributeList::AT_UnlockFunction:
-    handleUnlockFunAttr(S, D, Attr);
-    break;
   case AttributeList::AT_AcquiredBefore:
     handleAcquiredBeforeAttr(S, D, Attr);
     break;
@@ -4911,10 +4435,38 @@
     handleAcquiredAfterAttr(S, D, Attr);
     break;
 
+  // Capability analysis attributes.
+  case AttributeList::AT_Capability:
+  case AttributeList::AT_Lockable:
+    handleCapabilityAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_RequiresCapability:
+    handleRequiresCapabilityAttr(S, D, Attr);
+    break;
+
+  case AttributeList::AT_AssertCapability:
+    handleAssertCapabilityAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_AcquireCapability:
+    handleAcquireCapabilityAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_ReleaseCapability:
+    handleReleaseCapabilityAttr(S, D, Attr);
+    break;
+  case AttributeList::AT_TryAcquireCapability:
+    handleTryAcquireCapabilityAttr(S, D, Attr);
+    break;
+
   // Consumed analysis attributes.
   case AttributeList::AT_Consumable:
     handleConsumableAttr(S, D, Attr);
     break;
+  case AttributeList::AT_ConsumableAutoCast:
+    handleSimpleAttribute<ConsumableAutoCastAttr>(S, D, Attr);
+    break;
+  case AttributeList::AT_ConsumableSetOnRead:
+    handleSimpleAttribute<ConsumableSetOnReadAttr>(S, D, Attr);
+    break;
   case AttributeList::AT_CallableWhen:
     handleCallableWhenAttr(S, D, Attr);
     break;
@@ -4938,15 +4490,6 @@
   case AttributeList::AT_TypeTagForDatatype:
     handleTypeTagForDatatypeAttr(S, D, Attr);
     break;
-
-  default:
-    // Ask target about the attribute.
-    const TargetAttributesSema &TargetAttrs = S.getTargetAttributesSema();
-    if (!TargetAttrs.ProcessDeclAttribute(scope, D, Attr, S))
-      S.Diag(Attr.getLoc(), Attr.isDeclspecAttribute() ? 
-             diag::warn_unhandled_ms_attribute_ignored : 
-             diag::warn_unknown_attribute_ignored) << Attr.getName();
-    break;
   }
 }
 
@@ -4958,15 +4501,32 @@
   for (const AttributeList* l = AttrList; l; l = l->getNext())
     ProcessDeclAttribute(*this, S, D, *l, IncludeCXX11Attributes);
 
+  // FIXME: We should be able to handle these cases in TableGen.
   // GCC accepts
   // static int a9 __attribute__((weakref));
   // but that looks really pointless. We reject it.
   if (D->hasAttr<WeakRefAttr>() && !D->hasAttr<AliasAttr>()) {
-    Diag(AttrList->getLoc(), diag::err_attribute_weakref_without_alias) <<
-    cast<NamedDecl>(D)->getNameAsString();
+    Diag(AttrList->getLoc(), diag::err_attribute_weakref_without_alias)
+      << cast<NamedDecl>(D);
     D->dropAttr<WeakRefAttr>();
     return;
   }
+
+  if (!D->hasAttr<OpenCLKernelAttr>()) {
+    // These attributes cannot be applied to a non-kernel function.
+    if (Attr *A = D->getAttr<ReqdWorkGroupSizeAttr>()) {
+      Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
+      D->setInvalidDecl();
+    }
+    if (Attr *A = D->getAttr<WorkGroupSizeHintAttr>()) {
+      Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
+      D->setInvalidDecl();
+    }
+    if (Attr *A = D->getAttr<VecTypeHintAttr>()) {
+      Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
+      D->setInvalidDecl();
+    }
+  }
 }
 
 // Annotation attributes are the only attributes allowed after an access
@@ -5041,9 +4601,8 @@
     QualType FDTy = FD->getType();
     if (const FunctionProtoType *FT = FDTy->getAs<FunctionProtoType>()) {
       SmallVector<ParmVarDecl*, 16> Params;
-      for (FunctionProtoType::arg_type_iterator AI = FT->arg_type_begin(),
-           AE = FT->arg_type_end(); AI != AE; ++AI) {
-        ParmVarDecl *Param = BuildParmVarDeclForTypedef(NewFD, Loc, *AI);
+      for (const auto &AI : FT->param_types()) {
+        ParmVarDecl *Param = BuildParmVarDeclForTypedef(NewFD, Loc, AI);
         Param->setScopeInfo(0, Params.size());
         Params.push_back(Param);
       }
@@ -5070,18 +4629,20 @@
   if (W.getAlias()) { // clone decl, impersonate __attribute(weak,alias(...))
     IdentifierInfo *NDId = ND->getIdentifier();
     NamedDecl *NewD = DeclClonePragmaWeak(ND, W.getAlias(), W.getLocation());
-    NewD->addAttr(::new (Context) AliasAttr(W.getLocation(), Context,
-                                            NDId->getName()));
-    NewD->addAttr(::new (Context) WeakAttr(W.getLocation(), Context));
+    NewD->addAttr(AliasAttr::CreateImplicit(Context, NDId->getName(),
+                                            W.getLocation()));
+    NewD->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation()));
     WeakTopLevelDecl.push_back(NewD);
     // FIXME: "hideous" code from Sema::LazilyCreateBuiltin
     // to insert Decl at TU scope, sorry.
     DeclContext *SavedContext = CurContext;
     CurContext = Context.getTranslationUnitDecl();
+    NewD->setDeclContext(CurContext);
+    NewD->setLexicalDeclContext(CurContext);
     PushOnScopeChains(NewD, S);
     CurContext = SavedContext;
   } else { // just add weak to existing
-    ND->addAttr(::new (Context) WeakAttr(W.getLocation(), Context));
+    ND->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation()));
   }
 }
 
@@ -5150,8 +4711,9 @@
 static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
                                        Decl *decl) {
   if (decl && isForbiddenTypeAllowed(S, decl)) {
-    decl->addAttr(new (S.Context) UnavailableAttr(diag.Loc, S.Context,
-                        "this system declaration uses an unsupported type"));
+    decl->addAttr(UnavailableAttr::CreateImplicit(S.Context,
+                        "this system declaration uses an unsupported type",
+                        diag.Loc));
     return;
   }
   if (S.getLangOpts().ObjCAutoRefCount)
@@ -5199,9 +4761,11 @@
 
       switch (diag.Kind) {
       case DelayedDiagnostic::Deprecation:
-        // Don't bother giving deprecation diagnostics if the decl is invalid.
+      case DelayedDiagnostic::Unavailable:
+        // Don't bother giving deprecation/unavailable diagnostics if
+        // the decl is invalid.
         if (!decl->isInvalidDecl())
-          HandleDelayedDeprecationCheck(diag, decl);
+          HandleDelayedAvailabilityCheck(diag, decl);
         break;
 
       case DelayedDiagnostic::Access:
@@ -5236,61 +4800,120 @@
   return false;
 }
 
+static bool isDeclUnavailable(Decl *D) {
+  do {
+    if (D->isUnavailable())
+      return true;
+    // A category implicitly has the availability of the interface.
+    if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D))
+      return CatD->getClassInterface()->isUnavailable();
+  } while ((D = cast_or_null<Decl>(D->getDeclContext())));
+  return false;
+}
+
 static void
-DoEmitDeprecationWarning(Sema &S, const NamedDecl *D, StringRef Message,
-                         SourceLocation Loc,
-                         const ObjCInterfaceDecl *UnknownObjCClass,
-                         const ObjCPropertyDecl *ObjCPropery) {
+DoEmitAvailabilityWarning(Sema &S,
+                          DelayedDiagnostic::DDKind K,
+                          Decl *Ctx,
+                          const NamedDecl *D,
+                          StringRef Message,
+                          SourceLocation Loc,
+                          const ObjCInterfaceDecl *UnknownObjCClass,
+                          const ObjCPropertyDecl *ObjCProperty) {
+
+  // Diagnostics for deprecated or unavailable.
+  unsigned diag, diag_message, diag_fwdclass_message;
+
+  // Matches 'diag::note_property_attribute' options.
+  unsigned property_note_select;
+
+  // Matches diag::note_availability_specified_here.
+  unsigned available_here_select_kind;
+
+  // Don't warn if our current context is deprecated or unavailable.
+  switch (K) {
+    case DelayedDiagnostic::Deprecation:
+      if (isDeclDeprecated(Ctx))
+        return;
+      diag = diag::warn_deprecated;
+      diag_message = diag::warn_deprecated_message;
+      diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
+      property_note_select = /* deprecated */ 0;
+      available_here_select_kind = /* deprecated */ 2;
+      break;
+
+    case DelayedDiagnostic::Unavailable:
+      if (isDeclUnavailable(Ctx))
+        return;
+      diag = diag::err_unavailable;
+      diag_message = diag::err_unavailable_message;
+      diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
+      property_note_select = /* unavailable */ 1;
+      available_here_select_kind = /* unavailable */ 0;
+      break;
+
+    default:
+      llvm_unreachable("Neither a deprecation or unavailable kind");
+  }
+
   DeclarationName Name = D->getDeclName();
   if (!Message.empty()) {
-    S.Diag(Loc, diag::warn_deprecated_message) << Name << Message;
-    S.Diag(D->getLocation(),
-           isa<ObjCMethodDecl>(D) ? diag::note_method_declared_at
-                                  : diag::note_previous_decl) << Name;
-    if (ObjCPropery)
-      S.Diag(ObjCPropery->getLocation(), diag::note_property_attribute)
-        << ObjCPropery->getDeclName() << 0;
+    S.Diag(Loc, diag_message) << Name << Message;
+    if (ObjCProperty)
+      S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
+        << ObjCProperty->getDeclName() << property_note_select;
   } else if (!UnknownObjCClass) {
-    S.Diag(Loc, diag::warn_deprecated) << D->getDeclName();
-    S.Diag(D->getLocation(),
-           isa<ObjCMethodDecl>(D) ? diag::note_method_declared_at
-                                  : diag::note_previous_decl) << Name;
-    if (ObjCPropery)
-      S.Diag(ObjCPropery->getLocation(), diag::note_property_attribute)
-        << ObjCPropery->getDeclName() << 0;
+    S.Diag(Loc, diag) << Name;
+    if (ObjCProperty)
+      S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
+        << ObjCProperty->getDeclName() << property_note_select;
   } else {
-    S.Diag(Loc, diag::warn_deprecated_fwdclass_message) << Name;
+    S.Diag(Loc, diag_fwdclass_message) << Name;
     S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
   }
+
+  S.Diag(D->getLocation(), diag::note_availability_specified_here)
+    << D << available_here_select_kind;
 }
 
-void Sema::HandleDelayedDeprecationCheck(DelayedDiagnostic &DD,
-                                         Decl *Ctx) {
-  if (isDeclDeprecated(Ctx))
-    return;
-
+void Sema::HandleDelayedAvailabilityCheck(DelayedDiagnostic &DD,
+                                          Decl *Ctx) {
   DD.Triggered = true;
-  DoEmitDeprecationWarning(*this, DD.getDeprecationDecl(),
-                           DD.getDeprecationMessage(), DD.Loc,
-                           DD.getUnknownObjCClass(),
-                           DD.getObjCProperty());
+  DoEmitAvailabilityWarning(*this,
+                            (DelayedDiagnostic::DDKind) DD.Kind,
+                            Ctx,
+                            DD.getDeprecationDecl(),
+                            DD.getDeprecationMessage(),
+                            DD.Loc,
+                            DD.getUnknownObjCClass(),
+                            DD.getObjCProperty());
 }
 
-void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message,
-                                  SourceLocation Loc,
-                                  const ObjCInterfaceDecl *UnknownObjCClass,
-                                  const ObjCPropertyDecl  *ObjCProperty) {
+void Sema::EmitAvailabilityWarning(AvailabilityDiagnostic AD,
+                                   NamedDecl *D, StringRef Message,
+                                   SourceLocation Loc,
+                                   const ObjCInterfaceDecl *UnknownObjCClass,
+                                   const ObjCPropertyDecl  *ObjCProperty) {
   // Delay if we're currently parsing a declaration.
   if (DelayedDiagnostics.shouldDelayDiagnostics()) {
-    DelayedDiagnostics.add(DelayedDiagnostic::makeDeprecation(Loc, D, 
-                                                              UnknownObjCClass,
-                                                              ObjCProperty,
-                                                              Message));
+    DelayedDiagnostics.add(DelayedDiagnostic::makeAvailability(AD, Loc, D,
+                                                               UnknownObjCClass,
+                                                               ObjCProperty,
+                                                               Message));
     return;
   }
 
-  // Otherwise, don't warn if our current context is deprecated.
-  if (isDeclDeprecated(cast<Decl>(getCurLexicalContext())))
-    return;
-  DoEmitDeprecationWarning(*this, D, Message, Loc, UnknownObjCClass, ObjCProperty);
+  Decl *Ctx = cast<Decl>(getCurLexicalContext());
+  DelayedDiagnostic::DDKind K;
+  switch (AD) {
+    case AD_Deprecation:
+      K = DelayedDiagnostic::Deprecation;
+      break;
+    case AD_Unavailable:
+      K = DelayedDiagnostic::Unavailable;
+      break;
+  }
+
+  DoEmitAvailabilityWarning(*this, K, Ctx, D, Message, Loc,
+                            UnknownObjCClass, ObjCProperty);
 }
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 6b3400a..850db26 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -212,11 +212,9 @@
          "Shouldn't collect exceptions when throw-all is guaranteed.");
   ComputedEST = EST_Dynamic;
   // Record the exceptions in this function's exception specification.
-  for (FunctionProtoType::exception_iterator E = Proto->exception_begin(),
-                                          EEnd = Proto->exception_end();
-       E != EEnd; ++E)
-    if (ExceptionsSeen.insert(Self->Context.getCanonicalType(*E)))
-      Exceptions.push_back(*E);
+  for (const auto &E : Proto->exceptions())
+    if (ExceptionsSeen.insert(Self->Context.getCanonicalType(E)))
+      Exceptions.push_back(E);
 }
 
 void Sema::ImplicitExceptionSpecification::CalledExpr(Expr *E) {
@@ -380,16 +378,16 @@
         MightBeFunction = false;
         continue;
       }
-      for (unsigned argIdx = 0, e = chunk.Fun.NumArgs; argIdx != e; ++argIdx) {
-        ParmVarDecl *Param =
-          cast<ParmVarDecl>(chunk.Fun.ArgInfo[argIdx].Param);
+      for (unsigned argIdx = 0, e = chunk.Fun.NumParams; argIdx != e;
+           ++argIdx) {
+        ParmVarDecl *Param = cast<ParmVarDecl>(chunk.Fun.Params[argIdx].Param);
         if (Param->hasUnparsedDefaultArg()) {
-          CachedTokens *Toks = chunk.Fun.ArgInfo[argIdx].DefaultArgTokens;
+          CachedTokens *Toks = chunk.Fun.Params[argIdx].DefaultArgTokens;
           Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
             << SourceRange((*Toks)[1].getLocation(),
                            Toks->back().getLocation());
           delete Toks;
-          chunk.Fun.ArgInfo[argIdx].DefaultArgTokens = 0;
+          chunk.Fun.Params[argIdx].DefaultArgTokens = 0;
         } else if (Param->getDefaultArg()) {
           Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
             << Param->getDefaultArg()->getSourceRange();
@@ -586,6 +584,7 @@
     }
   }
 
+  const FunctionDecl *Def;
   // C++11 [dcl.constexpr]p1: If any declaration of a function or function
   // template has a constexpr specifier then all its declarations shall
   // contain the constexpr specifier.
@@ -594,6 +593,13 @@
       << New << New->isConstexpr();
     Diag(Old->getLocation(), diag::note_previous_declaration);
     Invalid = true;
+  } else if (!Old->isInlined() && New->isInlined() && Old->isDefined(Def)) {
+    // C++11 [dcl.fcn.spec]p4:
+    //   If the definition of a function appears in a translation unit before its
+    //   first declaration as inline, the program is ill-formed.
+    Diag(New->getLocation(), diag::err_inline_decl_follows_def) << New;
+    Diag(Def->getLocation(), diag::note_previous_definition);
+    Invalid = true;
   }
 
   // C++11 [dcl.fct.default]p4: If a friend declaration specifies a default
@@ -714,8 +720,9 @@
                                          const FunctionDecl *FD) {
   unsigned ArgIndex = 0;
   const FunctionProtoType *FT = FD->getType()->getAs<FunctionProtoType>();
-  for (FunctionProtoType::arg_type_iterator i = FT->arg_type_begin(),
-       e = FT->arg_type_end(); i != e; ++i, ++ArgIndex) {
+  for (FunctionProtoType::param_type_iterator i = FT->param_type_begin(),
+                                              e = FT->param_type_end();
+       i != e; ++i, ++ArgIndex) {
     const ParmVarDecl *PD = FD->getParamDecl(ArgIndex);
     SourceLocation ParamLoc = PD->getLocation();
     if (!(*i)->isDependentType() &&
@@ -760,10 +767,9 @@
       Diag(NewFD->getLocation(), diag::err_constexpr_virtual_base)
         << isa<CXXConstructorDecl>(NewFD)
         << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
-      for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
-             E = RD->vbases_end(); I != E; ++I)
-        Diag(I->getLocStart(),
-             diag::note_constexpr_virtual_base_here) << I->getSourceRange();
+      for (const auto &I : RD->vbases())
+        Diag(I.getLocStart(),
+             diag::note_constexpr_virtual_base_here) << I.getSourceRange();
       return false;
     }
   }
@@ -789,7 +795,7 @@
     }
 
     // - its return type shall be a literal type;
-    QualType RT = NewFD->getResultType();
+    QualType RT = NewFD->getReturnType();
     if (!RT->isDependentType() &&
         RequireLiteralType(NewFD->getLocation(), RT,
                            diag::err_constexpr_non_literal_return))
@@ -813,9 +819,8 @@
   // C++11 [dcl.constexpr]p3 and p4:
   //  The definition of a constexpr function(p3) or constructor(p4) [...] shall
   //  contain only
-  for (DeclStmt::decl_iterator DclIt = DS->decl_begin(),
-         DclEnd = DS->decl_end(); DclIt != DclEnd; ++DclIt) {
-    switch ((*DclIt)->getKind()) {
+  for (const auto *DclIt : DS->decls()) {
+    switch (DclIt->getKind()) {
     case Decl::StaticAssert:
     case Decl::Using:
     case Decl::UsingShadow:
@@ -831,7 +836,7 @@
     case Decl::TypeAlias: {
       //   - typedef declarations and alias-declarations that do not define
       //     classes or enumerations,
-      TypedefNameDecl *TN = cast<TypedefNameDecl>(*DclIt);
+      const auto *TN = cast<TypedefNameDecl>(DclIt);
       if (TN->getUnderlyingType()->isVariablyModifiedType()) {
         // Don't allow variably-modified types in constexpr functions.
         TypeLoc TL = TN->getTypeSourceInfo()->getTypeLoc();
@@ -846,7 +851,7 @@
     case Decl::Enum:
     case Decl::CXXRecord:
       // C++1y allows types to be defined, not just declared.
-      if (cast<TagDecl>(*DclIt)->isThisDeclarationADefinition())
+      if (cast<TagDecl>(DclIt)->isThisDeclarationADefinition())
         SemaRef.Diag(DS->getLocStart(),
                      SemaRef.getLangOpts().CPlusPlus1y
                        ? diag::warn_cxx11_compat_constexpr_type_definition
@@ -865,7 +870,7 @@
       // C++1y [dcl.constexpr]p3 allows anything except:
       //   a definition of a variable of non-literal type or of static or
       //   thread storage duration or for which no initialization is performed.
-      VarDecl *VD = cast<VarDecl>(*DclIt);
+      const auto *VD = cast<VarDecl>(DclIt);
       if (VD->isThisDeclarationADefinition()) {
         if (VD->isStaticLocal()) {
           SemaRef.Diag(VD->getLocation(),
@@ -880,7 +885,8 @@
               diag::err_constexpr_local_var_non_literal_type,
               isa<CXXConstructorDecl>(Dcl)))
           return false;
-        if (!VD->hasInit() && !VD->isCXXForRangeDecl()) {
+        if (!VD->getType()->isDependentType() &&
+            !VD->hasInit() && !VD->isCXXForRangeDecl()) {
           SemaRef.Diag(VD->getLocation(),
                        diag::err_constexpr_local_var_no_init)
             << isa<CXXConstructorDecl>(Dcl);
@@ -932,8 +938,13 @@
   if (Field->isUnnamedBitfield())
     return;
 
+  // Anonymous unions with no variant members and empty anonymous structs do not
+  // need to be explicitly initialized. FIXME: Anonymous structs that contain no
+  // indirect fields don't need initializing.
   if (Field->isAnonymousStructOrUnion() &&
-      Field->getType()->getAsCXXRecordDecl()->isEmpty())
+      (Field->getType()->isUnionType()
+           ? !Field->getType()->getAsCXXRecordDecl()->hasVariantMembers()
+           : Field->getType()->getAsCXXRecordDecl()->isEmpty()))
     return;
 
   if (!Inits.count(Field)) {
@@ -944,12 +955,11 @@
     SemaRef.Diag(Field->getLocation(), diag::note_constexpr_ctor_missing_init);
   } else if (Field->isAnonymousStructOrUnion()) {
     const RecordDecl *RD = Field->getType()->castAs<RecordType>()->getDecl();
-    for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
-         I != E; ++I)
+    for (auto *I : RD->fields())
       // If an anonymous union contains an anonymous struct of which any member
       // is initialized, all members must be initialized.
-      if (!RD->isUnion() || Inits.count(*I))
-        CheckConstexprCtorInitializer(SemaRef, Dcl, *I, Inits, Diagnosed);
+      if (!RD->isUnion() || Inits.count(I))
+        CheckConstexprCtorInitializer(SemaRef, Dcl, I, Inits, Diagnosed);
   }
 }
 
@@ -993,9 +1003,8 @@
       Cxx1yLoc = S->getLocStart();
 
     CompoundStmt *CompStmt = cast<CompoundStmt>(S);
-    for (CompoundStmt::body_iterator BodyIt = CompStmt->body_begin(),
-           BodyEnd = CompStmt->body_end(); BodyIt != BodyEnd; ++BodyIt) {
-      if (!CheckConstexprFunctionStmt(SemaRef, Dcl, *BodyIt, ReturnStmts,
+    for (auto *BodyIt : CompStmt->body()) {
+      if (!CheckConstexprFunctionStmt(SemaRef, Dcl, BodyIt, ReturnStmts,
                                       Cxx1yLoc))
         return false;
     }
@@ -1097,9 +1106,8 @@
   //   [... list of cases ...]
   CompoundStmt *CompBody = cast<CompoundStmt>(Body);
   SourceLocation Cxx1yLoc;
-  for (CompoundStmt::body_iterator BodyIt = CompBody->body_begin(),
-         BodyEnd = CompBody->body_end(); BodyIt != BodyEnd; ++BodyIt) {
-    if (!CheckConstexprFunctionStmt(*this, Dcl, *BodyIt, ReturnStmts, Cxx1yLoc))
+  for (auto *BodyIt : CompBody->body()) {
+    if (!CheckConstexprFunctionStmt(*this, Dcl, BodyIt, ReturnStmts, Cxx1yLoc))
       return false;
   }
 
@@ -1116,11 +1124,12 @@
     // DR1359:
     // - every non-variant non-static data member and base class sub-object
     //   shall be initialized;
-    // - if the class is a non-empty union, or for each non-empty anonymous
-    //   union member of a non-union class, exactly one non-static data member
+    // DR1460:
+    // - if the class is a union having variant members, exactly one of them
     //   shall be initialized;
     if (RD->isUnion()) {
-      if (Constructor->getNumCtorInitializers() == 0 && !RD->isEmpty()) {
+      if (Constructor->getNumCtorInitializers() == 0 &&
+          RD->hasVariantMembers()) {
         Diag(Dcl->getLocation(), diag::err_constexpr_union_ctor_no_init);
         return false;
       }
@@ -1139,25 +1148,26 @@
           break;
         }
       }
+      // DR1460:
+      // - if the class is a union-like class, but is not a union, for each of
+      //   its anonymous union members having variant members, exactly one of
+      //   them shall be initialized;
       if (AnyAnonStructUnionMembers ||
           Constructor->getNumCtorInitializers() != RD->getNumBases() + Fields) {
         // Check initialization of non-static data members. Base classes are
         // always initialized so do not need to be checked. Dependent bases
         // might not have initializers in the member initializer list.
         llvm::SmallSet<Decl*, 16> Inits;
-        for (CXXConstructorDecl::init_const_iterator
-               I = Constructor->init_begin(), E = Constructor->init_end();
-             I != E; ++I) {
-          if (FieldDecl *FD = (*I)->getMember())
+        for (const auto *I: Constructor->inits()) {
+          if (FieldDecl *FD = I->getMember())
             Inits.insert(FD);
-          else if (IndirectFieldDecl *ID = (*I)->getIndirectMember())
+          else if (IndirectFieldDecl *ID = I->getIndirectMember())
             Inits.insert(ID->chain_begin(), ID->chain_end());
         }
 
         bool Diagnosed = false;
-        for (CXXRecordDecl::field_iterator I = RD->field_begin(),
-             E = RD->field_end(); I != E; ++I)
-          CheckConstexprCtorInitializer(*this, Dcl, *I, Inits, Diagnosed);
+        for (auto *I : RD->fields())
+          CheckConstexprCtorInitializer(*this, Dcl, I, Inits, Diagnosed);
         if (Diagnosed)
           return false;
       }
@@ -1168,7 +1178,7 @@
       // statement. We still do, unless the return type is void, because
       // otherwise if there's no return statement, the function cannot
       // be used in a core constant expression.
-      bool OK = getLangOpts().CPlusPlus1y && Dcl->getResultType()->isVoidType();
+      bool OK = getLangOpts().CPlusPlus1y && Dcl->getReturnType()->isVoidType();
       Diag(Dcl->getLocation(),
            OK ? diag::warn_cxx11_compat_constexpr_body_no_return
               : diag::err_constexpr_body_no_return);
@@ -1261,10 +1271,8 @@
 
   Class = Class->getCanonicalDecl();
   while (true) {
-    for (CXXRecordDecl::base_class_const_iterator I = Current->bases_begin(),
-                                                  E = Current->bases_end();
-         I != E; ++I) {
-      CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
+    for (const auto &I : Current->bases()) {
+      CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
       if (!Base)
         continue;
 
@@ -1509,7 +1517,7 @@
           Invalid = true;
         }
         if (RD->hasAttr<WeakAttr>())
-          Class->addAttr(::new (Context) WeakAttr(SourceRange(), Context));
+          Class->addAttr(WeakAttr::CreateImplicit(Context));
       }
     }
   }
@@ -1965,7 +1973,8 @@
       const char *PrevSpec;
       unsigned DiagID;
       if (D.getMutableDeclSpec().SetStorageClassSpec(
-          *this, DeclSpec::SCS_static, ConstexprLoc, PrevSpec, DiagID)) {
+          *this, DeclSpec::SCS_static, ConstexprLoc, PrevSpec, DiagID,
+          Context.getPrintingPolicy())) {
         assert(DS.getStorageClassSpec() == DeclSpec::SCS_mutable &&
                "This is the only DeclSpec that should fail to be applied");
         B << 1;
@@ -2082,7 +2091,7 @@
   }
 
   if (VS.isOverrideSpecified())
-    Member->addAttr(new (Context) OverrideAttr(VS.getOverrideLoc(), Context));
+    Member->addAttr(new (Context) OverrideAttr(VS.getOverrideLoc(), Context, 0));
   if (VS.isFinalSpecified())
     Member->addAttr(new (Context) FinalAttr(VS.getFinalLoc(), Context,
                                             VS.isFinalSpelledSealed()));
@@ -2306,38 +2315,44 @@
     llvm::SmallPtrSet<ValueDecl*, 4> UninitializedFields;
 
     // At the beginning, all fields are uninitialized.
-    for (DeclContext::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
-         I != E; ++I) {
-      if (FieldDecl *FD = dyn_cast<FieldDecl>(*I)) {
+    for (auto *I : RD->decls()) {
+      if (auto *FD = dyn_cast<FieldDecl>(I)) {
         UninitializedFields.insert(FD);
-      } else if (IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(*I)) {
+      } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(I)) {
         UninitializedFields.insert(IFD->getAnonField());
       }
     }
 
-    for (CXXConstructorDecl::init_const_iterator FieldInit =
-             Constructor->init_begin(),
-             FieldInitEnd = Constructor->init_end();
-         FieldInit != FieldInitEnd; ++FieldInit) {
-
-      Expr *InitExpr = (*FieldInit)->getInit();
+    for (const auto *FieldInit : Constructor->inits()) {
+      Expr *InitExpr = FieldInit->getInit();
 
       CheckInitExprContainsUninitializedFields(
           SemaRef, InitExpr, UninitializedFields, Constructor);
 
-      if (FieldDecl *Field = (*FieldInit)->getAnyMember())
+      if (FieldDecl *Field = FieldInit->getAnyMember())
         UninitializedFields.erase(Field);
     }
   }
 } // namespace
 
-/// ActOnCXXInClassMemberInitializer - This is invoked after parsing an
-/// in-class initializer for a non-static C++ class member, and after
-/// instantiating an in-class initializer in a class template. Such actions
-/// are deferred until the class is complete.
-void
-Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation InitLoc,
-                                       Expr *InitExpr) {
+/// \brief Enter a new C++ default initializer scope. After calling this, the
+/// caller must call \ref ActOnFinishCXXInClassMemberInitializer, even if
+/// parsing or instantiating the initializer failed.
+void Sema::ActOnStartCXXInClassMemberInitializer() {
+  // Create a synthetic function scope to represent the call to the constructor
+  // that notionally surrounds a use of this initializer.
+  PushFunctionScope();
+}
+
+/// \brief This is invoked after parsing an in-class initializer for a
+/// non-static C++ class member, and after instantiating an in-class initializer
+/// in a class template. Such actions are deferred until the class is complete.
+void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
+                                                  SourceLocation InitLoc,
+                                                  Expr *InitExpr) {
+  // Pop the notional constructor scope we created earlier.
+  PopFunctionScopeInfo(0, D);
+
   FieldDecl *FD = cast<FieldDecl>(D);
   assert(FD->getInClassInitStyle() != ICIS_NoInit &&
          "must set init style when field is created");
@@ -2392,13 +2407,11 @@
                                 const CXXBaseSpecifier *&VirtualBaseSpec) {
   // First, check for a direct base class.
   DirectBaseSpec = 0;
-  for (CXXRecordDecl::base_class_const_iterator Base
-         = ClassDecl->bases_begin(); 
-       Base != ClassDecl->bases_end(); ++Base) {
-    if (SemaRef.Context.hasSameUnqualifiedType(BaseType, Base->getType())) {
+  for (const auto &Base : ClassDecl->bases()) {
+    if (SemaRef.Context.hasSameUnqualifiedType(BaseType, Base.getType())) {
       // We found a direct base of this type. That's what we're
       // initializing.
-      DirectBaseSpec = &*Base;
+      DirectBaseSpec = &Base;
       break;
     }
   }
@@ -2471,7 +2484,7 @@
   explicit MemInitializerValidatorCCC(CXXRecordDecl *ClassDecl)
       : ClassDecl(ClassDecl) {}
 
-  bool ValidateCandidate(const TypoCorrection &candidate) LLVM_OVERRIDE {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     if (NamedDecl *ND = candidate.getCorrectionDecl()) {
       if (FieldDecl *Member = dyn_cast<FieldDecl>(ND))
         return Member->getDeclContext()->getRedeclContext()->Equals(ClassDecl);
@@ -2629,13 +2642,10 @@
 
     if (BaseType.isNull()) {
       BaseType = Context.getTypeDeclType(TyD);
-      if (SS.isSet()) {
-        NestedNameSpecifier *Qualifier =
-          static_cast<NestedNameSpecifier*>(SS.getScopeRep());
-
+      if (SS.isSet())
         // FIXME: preserve source range information
-        BaseType = Context.getElaboratedType(ETK_None, Qualifier, BaseType);
-      }
+        BaseType = Context.getElaboratedType(ETK_None, SS.getScopeRep(),
+                                             BaseType);
     }
   }
 
@@ -3320,6 +3330,7 @@
   ImplicitInitializerKind IIK;
   llvm::DenseMap<const void *, CXXCtorInitializer*> AllBaseFields;
   SmallVector<CXXCtorInitializer*, 8> AllToInit;
+  llvm::DenseMap<TagDecl*, FieldDecl*> ActiveUnionMember;
 
   BaseAndFieldInfo(Sema &S, CXXConstructorDecl *Ctor, bool ErrorsInInits)
     : S(S), Ctor(Ctor), AnyErrorsInInits(ErrorsInInits) {
@@ -3357,20 +3368,48 @@
 
     return false;
   }
-};
-}
 
-/// \brief Determine whether the given indirect field declaration is somewhere
-/// within an anonymous union.
-static bool isWithinAnonymousUnion(IndirectFieldDecl *F) {
-  for (IndirectFieldDecl::chain_iterator C = F->chain_begin(), 
-                                      CEnd = F->chain_end();
-       C != CEnd; ++C)
-    if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>((*C)->getDeclContext()))
-      if (Record->isUnion())
+  bool isInactiveUnionMember(FieldDecl *Field) {
+    RecordDecl *Record = Field->getParent();
+    if (!Record->isUnion())
+      return false;
+
+    if (FieldDecl *Active =
+            ActiveUnionMember.lookup(Record->getCanonicalDecl()))
+      return Active != Field->getCanonicalDecl();
+
+    // In an implicit copy or move constructor, ignore any in-class initializer.
+    if (isImplicitCopyOrMove())
+      return true;
+
+    // If there's no explicit initialization, the field is active only if it
+    // has an in-class initializer...
+    if (Field->hasInClassInitializer())
+      return false;
+    // ... or it's an anonymous struct or union whose class has an in-class
+    // initializer.
+    if (!Field->isAnonymousStructOrUnion())
+      return true;
+    CXXRecordDecl *FieldRD = Field->getType()->getAsCXXRecordDecl();
+    return !FieldRD->hasInClassInitializer();
+  }
+
+  /// \brief Determine whether the given field is, or is within, a union member
+  /// that is inactive (because there was an initializer given for a different
+  /// member of the union, or because the union was not initialized at all).
+  bool isWithinInactiveUnionMember(FieldDecl *Field,
+                                   IndirectFieldDecl *Indirect) {
+    if (!Indirect)
+      return isInactiveUnionMember(Field);
+
+    for (auto *C : Indirect->chain()) {
+      FieldDecl *Field = dyn_cast<FieldDecl>(C);
+      if (Field && isInactiveUnionMember(Field))
         return true;
-        
-  return false;
+    }
+    return false;
+  }
+};
 }
 
 /// \brief Determine whether the given type is an incomplete or zero-lenfgth
@@ -3399,9 +3438,21 @@
   if (CXXCtorInitializer *Init = Info.AllBaseFields.lookup(Field))
     return Info.addFieldInitializer(Init);
 
-  // C++11 [class.base.init]p8: if the entity is a non-static data member that
-  // has a brace-or-equal-initializer, the entity is initialized as specified
-  // in [dcl.init].
+  // C++11 [class.base.init]p8:
+  //   if the entity is a non-static data member that has a
+  //   brace-or-equal-initializer and either
+  //   -- the constructor's class is a union and no other variant member of that
+  //      union is designated by a mem-initializer-id or
+  //   -- the constructor's class is not a union, and, if the entity is a member
+  //      of an anonymous union, no other member of that union is designated by
+  //      a mem-initializer-id,
+  //   the entity is initialized as specified in [dcl.init].
+  //
+  // We also apply the same rules to handle anonymous structs within anonymous
+  // unions.
+  if (Info.isWithinInactiveUnionMember(Field, Indirect))
+    return false;
+
   if (Field->hasInClassInitializer() && !Info.isImplicitCopyOrMove()) {
     Expr *DIE = CXXDefaultInitExpr::Create(SemaRef.Context,
                                            Info.Ctor->getLocation(), Field);
@@ -3419,12 +3470,6 @@
     return Info.addFieldInitializer(Init);
   }
 
-  // Don't build an implicit initializer for union members if none was
-  // explicitly specified.
-  if (Field->getParent()->isUnion() ||
-      (Indirect && isWithinAnonymousUnion(Indirect)))
-    return false;
-
   // Don't initialize incomplete or zero-length arrays.
   if (isIncompleteOrZeroLengthArrayType(SemaRef.Context, Field->getType()))
     return false;
@@ -3502,24 +3547,35 @@
 
     if (Member->isBaseInitializer())
       Info.AllBaseFields[Member->getBaseClass()->getAs<RecordType>()] = Member;
-    else
+    else {
       Info.AllBaseFields[Member->getAnyMember()] = Member;
+
+      if (IndirectFieldDecl *F = Member->getIndirectMember()) {
+        for (auto *C : F->chain()) {
+          FieldDecl *FD = dyn_cast<FieldDecl>(C);
+          if (FD && FD->getParent()->isUnion())
+            Info.ActiveUnionMember.insert(std::make_pair(
+                FD->getParent()->getCanonicalDecl(), FD->getCanonicalDecl()));
+        }
+      } else if (FieldDecl *FD = Member->getMember()) {
+        if (FD->getParent()->isUnion())
+          Info.ActiveUnionMember.insert(std::make_pair(
+              FD->getParent()->getCanonicalDecl(), FD->getCanonicalDecl()));
+      }
+    }
   }
 
   // Keep track of the direct virtual bases.
   llvm::SmallPtrSet<CXXBaseSpecifier *, 16> DirectVBases;
-  for (CXXRecordDecl::base_class_iterator I = ClassDecl->bases_begin(),
-       E = ClassDecl->bases_end(); I != E; ++I) {
-    if (I->isVirtual())
-      DirectVBases.insert(I);
+  for (auto &I : ClassDecl->bases()) {
+    if (I.isVirtual())
+      DirectVBases.insert(&I);
   }
 
   // Push virtual bases before others.
-  for (CXXRecordDecl::base_class_iterator VBase = ClassDecl->vbases_begin(),
-       E = ClassDecl->vbases_end(); VBase != E; ++VBase) {
-
+  for (auto &VBase : ClassDecl->vbases()) {
     if (CXXCtorInitializer *Value
-        = Info.AllBaseFields.lookup(VBase->getType()->getAs<RecordType>())) {
+        = Info.AllBaseFields.lookup(VBase.getType()->getAs<RecordType>())) {
       // [class.base.init]p7, per DR257:
       //   A mem-initializer where the mem-initializer-id names a virtual base
       //   class is ignored during execution of a constructor of any class that
@@ -3528,7 +3584,7 @@
         // FIXME: Provide a fixit to remove the base specifier. This requires
         // tracking the location of the associated comma for a base specifier.
         Diag(Value->getSourceLocation(), diag::warn_abstract_vbase_init_ignored)
-          << VBase->getType() << ClassDecl;
+          << VBase.getType() << ClassDecl;
         DiagnoseAbstractType(ClassDecl);
       }
 
@@ -3538,10 +3594,10 @@
       //   If a given [...] base class is not named by a mem-initializer-id
       //   [...] and the entity is not a virtual base class of an abstract
       //   class, then [...] the entity is default-initialized.
-      bool IsInheritedVirtualBase = !DirectVBases.count(VBase);
+      bool IsInheritedVirtualBase = !DirectVBases.count(&VBase);
       CXXCtorInitializer *CXXBaseInit;
       if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK,
-                                       VBase, IsInheritedVirtualBase,
+                                       &VBase, IsInheritedVirtualBase,
                                        CXXBaseInit)) {
         HadError = true;
         continue;
@@ -3552,19 +3608,18 @@
   }
 
   // Non-virtual bases.
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
-       E = ClassDecl->bases_end(); Base != E; ++Base) {
+  for (auto &Base : ClassDecl->bases()) {
     // Virtuals are in the virtual base list and already constructed.
-    if (Base->isVirtual())
+    if (Base.isVirtual())
       continue;
 
     if (CXXCtorInitializer *Value
-          = Info.AllBaseFields.lookup(Base->getType()->getAs<RecordType>())) {
+          = Info.AllBaseFields.lookup(Base.getType()->getAs<RecordType>())) {
       Info.AllToInit.push_back(Value);
     } else if (!AnyErrors) {
       CXXCtorInitializer *CXXBaseInit;
       if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK,
-                                       Base, /*IsInheritedVirtualBase=*/false,
+                                       &Base, /*IsInheritedVirtualBase=*/false,
                                        CXXBaseInit)) {
         HadError = true;
         continue;
@@ -3575,10 +3630,8 @@
   }
 
   // Fields.
-  for (DeclContext::decl_iterator Mem = ClassDecl->decls_begin(),
-                               MemEnd = ClassDecl->decls_end();
-       Mem != MemEnd; ++Mem) {
-    if (FieldDecl *F = dyn_cast<FieldDecl>(*Mem)) {
+  for (auto *Mem : ClassDecl->decls()) {
+    if (auto *F = dyn_cast<FieldDecl>(Mem)) {
       // C++ [class.bit]p2:
       //   A declaration for a bit-field that omits the identifier declares an
       //   unnamed bit-field. Unnamed bit-fields are not members and cannot be
@@ -3601,7 +3654,7 @@
     if (Info.isImplicitCopyOrMove())
       continue;
     
-    if (IndirectFieldDecl *F = dyn_cast<IndirectFieldDecl>(*Mem)) {
+    if (auto *F = dyn_cast<IndirectFieldDecl>(Mem)) {
       if (F->getType()->isIncompleteArrayType()) {
         assert(ClassDecl->hasFlexibleArrayMember() &&
                "Incomplete array type is not valid");
@@ -3638,9 +3691,8 @@
   if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
     const RecordDecl *RD = RT->getDecl();
     if (RD->isAnonymousStructOrUnion()) {
-      for (RecordDecl::field_iterator Field = RD->field_begin(),
-          E = RD->field_end(); Field != E; ++Field)
-        PopulateKeysForFields(*Field, IdealInits);
+      for (auto *Field : RD->fields())
+        PopulateKeysForFields(Field, IdealInits);
       return;
     }
   }
@@ -3688,26 +3740,22 @@
   const CXXRecordDecl *ClassDecl = Constructor->getParent();
 
   // 1. Virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator VBase =
-       ClassDecl->vbases_begin(),
-       E = ClassDecl->vbases_end(); VBase != E; ++VBase)
-    IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, VBase->getType()));
+  for (const auto &VBase : ClassDecl->vbases())
+    IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, VBase.getType()));
 
   // 2. Non-virtual bases.
-  for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin(),
-       E = ClassDecl->bases_end(); Base != E; ++Base) {
-    if (Base->isVirtual())
+  for (const auto &Base : ClassDecl->bases()) {
+    if (Base.isVirtual())
       continue;
-    IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, Base->getType()));
+    IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, Base.getType()));
   }
 
   // 3. Direct fields.
-  for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
-       E = ClassDecl->field_end(); Field != E; ++Field) {
+  for (auto *Field : ClassDecl->fields()) {
     if (Field->isUnnamedBitfield())
       continue;
     
-    PopulateKeysForFields(*Field, IdealInitKeys);
+    PopulateKeysForFields(Field, IdealInitKeys);
   }
   
   unsigned NumIdealInits = IdealInitKeys.size();
@@ -3903,9 +3951,7 @@
   // emitted, and we currently don't say.
   
   // Non-static data members.
-  for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
-       E = ClassDecl->field_end(); I != E; ++I) {
-    FieldDecl *Field = *I;
+  for (auto *Field : ClassDecl->fields()) {
     if (Field->isInvalidDecl())
       continue;
     
@@ -3942,13 +3988,12 @@
   llvm::SmallPtrSet<const RecordType *, 8> DirectVirtualBases;
 
   // Bases.
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
-       E = ClassDecl->bases_end(); Base != E; ++Base) {
+  for (const auto &Base : ClassDecl->bases()) {
     // Bases are always records in a well-formed non-dependent class.
-    const RecordType *RT = Base->getType()->getAs<RecordType>();
+    const RecordType *RT = Base.getType()->getAs<RecordType>();
 
     // Remember direct virtual bases.
-    if (Base->isVirtual())
+    if (Base.isVirtual())
       DirectVirtualBases.insert(RT);
 
     CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
@@ -3962,10 +4007,10 @@
     assert(Dtor && "No dtor found for BaseClassDecl!");
 
     // FIXME: caret should be on the start of the class name
-    CheckDestructorAccess(Base->getLocStart(), Dtor,
+    CheckDestructorAccess(Base.getLocStart(), Dtor,
                           PDiag(diag::err_access_dtor_base)
-                            << Base->getType()
-                            << Base->getSourceRange(),
+                            << Base.getType()
+                            << Base.getSourceRange(),
                           Context.getTypeDeclType(ClassDecl));
     
     MarkFunctionReferenced(Location, Dtor);
@@ -3973,11 +4018,9 @@
   }
   
   // Virtual bases.
-  for (CXXRecordDecl::base_class_iterator VBase = ClassDecl->vbases_begin(),
-       E = ClassDecl->vbases_end(); VBase != E; ++VBase) {
-
+  for (const auto &VBase : ClassDecl->vbases()) {
     // Bases are always records in a well-formed non-dependent class.
-    const RecordType *RT = VBase->getType()->castAs<RecordType>();
+    const RecordType *RT = VBase.getType()->castAs<RecordType>();
 
     // Ignore direct virtual bases.
     if (DirectVirtualBases.count(RT))
@@ -3995,11 +4038,11 @@
     if (CheckDestructorAccess(
             ClassDecl->getLocation(), Dtor,
             PDiag(diag::err_access_dtor_vbase)
-                << Context.getTypeDeclType(ClassDecl) << VBase->getType(),
+                << Context.getTypeDeclType(ClassDecl) << VBase.getType(),
             Context.getTypeDeclType(ClassDecl)) ==
         AR_accessible) {
       CheckDerivedToBaseConversion(
-          Context.getTypeDeclType(ClassDecl), VBase->getType(),
+          Context.getTypeDeclType(ClassDecl), VBase.getType(),
           diag::err_access_dtor_vbase, 0, ClassDecl->getLocation(),
           SourceRange(), DeclarationName(), 0);
     }
@@ -4030,7 +4073,7 @@
     NonAbstractTypeDiagnoser(unsigned DiagID, AbstractDiagSelID SelID)
       : TypeDiagnoser(DiagID == 0), DiagID(DiagID), SelID(SelID) { }
 
-    void diagnose(Sema &S, SourceLocation Loc, QualType T) LLVM_OVERRIDE {
+    void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
       if (Suppressed) return;
       if (SelID == -1)
         S.Diag(Loc, DiagID) << T;
@@ -4172,12 +4215,12 @@
   }
 
   void Check(FunctionProtoTypeLoc TL, Sema::AbstractDiagSelID Sel) {
-    Visit(TL.getResultLoc(), Sema::AbstractReturnType);
-    for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
-      if (!TL.getArg(I))
+    Visit(TL.getReturnLoc(), Sema::AbstractReturnType);
+    for (unsigned I = 0, E = TL.getNumParams(); I != E; ++I) {
+      if (!TL.getParam(I))
         continue;
-      
-      TypeSourceInfo *TSI = TL.getArg(I)->getTypeSourceInfo();
+
+      TypeSourceInfo *TSI = TL.getParam(I)->getTypeSourceInfo();
       if (TSI) Visit(TSI->getTypeLoc(), Sema::AbstractParamType);
     }
   }
@@ -4267,9 +4310,7 @@
 /// Check for invalid uses of an abstract type within a class definition.
 static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
                                     CXXRecordDecl *RD) {
-  for (CXXRecordDecl::decl_iterator
-         I = RD->decls_begin(), E = RD->decls_end(); I != E; ++I) {
-    Decl *D = *I;
+  for (auto *D : RD->decls()) {
     if (D->isImplicit()) continue;
 
     // Methods and method templates.
@@ -4318,9 +4359,7 @@
       !Record->isAggregate() && !Record->hasUserDeclaredConstructor() &&
       !Record->isLambda()) {
     bool Complained = false;
-    for (RecordDecl::field_iterator F = Record->field_begin(), 
-                                 FEnd = Record->field_end();
-         F != FEnd; ++F) {
+    for (const auto *F : Record->fields()) {
       if (F->hasInClassInitializer() || F->isUnnamedBitfield())
         continue;
 
@@ -4381,27 +4420,25 @@
   }
 
   if (!Record->isDependentType()) {
-    for (CXXRecordDecl::method_iterator M = Record->method_begin(),
-                                     MEnd = Record->method_end();
-         M != MEnd; ++M) {
+    for (auto *M : Record->methods()) {
       // See if a method overloads virtual methods in a base
       // class without overriding any.
       if (!M->isStatic())
-        DiagnoseHiddenVirtualMethods(*M);
+        DiagnoseHiddenVirtualMethods(M);
 
       // Check whether the explicitly-defaulted special members are valid.
       if (!M->isInvalidDecl() && M->isExplicitlyDefaulted())
-        CheckExplicitlyDefaultedSpecialMember(*M);
+        CheckExplicitlyDefaultedSpecialMember(M);
 
       // For an explicitly defaulted or deleted special member, we defer
       // determining triviality until the class is complete. That time is now!
       if (!M->isImplicit() && !M->isUserProvided()) {
-        CXXSpecialMember CSM = getSpecialMember(*M);
+        CXXSpecialMember CSM = getSpecialMember(M);
         if (CSM != CXXInvalid) {
-          M->setTrivial(SpecialMemberIsTrivial(*M, CSM));
+          M->setTrivial(SpecialMemberIsTrivial(M, CSM));
 
           // Inform the class that we've finished declaring this member.
-          Record->finishedDefaultedOrDeletedMember(*M);
+          Record->finishedDefaultedOrDeletedMember(M);
         }
       }
     }
@@ -4420,10 +4457,8 @@
   // destructor for the class is trivial.
   if (LangOpts.CPlusPlus11 && !Record->isDependentType() &&
       !Record->isLiteral() && !Record->getNumVBases()) {
-    for (CXXRecordDecl::method_iterator M = Record->method_begin(),
-                                     MEnd = Record->method_end();
-         M != MEnd; ++M) {
-      if (M->isConstexpr() && M->isInstance() && !isa<CXXConstructorDecl>(*M)) {
+    for (const auto *M : Record->methods()) {
+      if (M->isConstexpr() && M->isInstance() && !isa<CXXConstructorDecl>(M)) {
         switch (Record->getTemplateSpecializationKind()) {
         case TSK_ImplicitInstantiation:
         case TSK_ExplicitInstantiationDeclaration:
@@ -4446,11 +4481,18 @@
     }
   }
 
-  // Check to see if we're trying to lay out a struct using the ms_struct
-  // attribute that is dynamic.
-  if (Record->isMsStruct(Context) && Record->isDynamicClass()) {
-    Diag(Record->getLocation(), diag::warn_pragma_ms_struct_failed);
-    Record->dropAttr<MsStructAttr>();
+  // ms_struct is a request to use the same ABI rules as MSVC.  Check
+  // whether this class uses any C++ features that are implemented
+  // completely differently in MSVC, and if so, emit a diagnostic.
+  // That diagnostic defaults to an error, but we allow projects to
+  // map it down to a warning (or ignore it).  It's a fairly common
+  // practice among users of the ms_struct pragma to mass-annotate
+  // headers, sweeping up a bunch of types that the project doesn't
+  // really rely on MSVC-compatible layout for.  We must therefore
+  // support "ms_struct except for C++ stuff" as a secondary ABI.
+  if (Record->isMsStruct(Context) &&
+      (Record->isPolymorphic() || Record->getNumBases())) {
+    Diag(Record->getLocation(), diag::warn_cxx_ms_struct);
   }
 
   // Declare inheriting constructors. We do this eagerly here because:
@@ -4463,14 +4505,43 @@
   DeclareInheritingConstructors(Record);
 }
 
+/// Look up the special member function that would be called by a special
+/// member function for a subobject of class type.
+///
+/// \param Class The class type of the subobject.
+/// \param CSM The kind of special member function.
+/// \param FieldQuals If the subobject is a field, its cv-qualifiers.
+/// \param ConstRHS True if this is a copy operation with a const object
+///        on its RHS, that is, if the argument to the outer special member
+///        function is 'const' and this is not a field marked 'mutable'.
+static Sema::SpecialMemberOverloadResult *lookupCallFromSpecialMember(
+    Sema &S, CXXRecordDecl *Class, Sema::CXXSpecialMember CSM,
+    unsigned FieldQuals, bool ConstRHS) {
+  unsigned LHSQuals = 0;
+  if (CSM == Sema::CXXCopyAssignment || CSM == Sema::CXXMoveAssignment)
+    LHSQuals = FieldQuals;
+
+  unsigned RHSQuals = FieldQuals;
+  if (CSM == Sema::CXXDefaultConstructor || CSM == Sema::CXXDestructor)
+    RHSQuals = 0;
+  else if (ConstRHS)
+    RHSQuals |= Qualifiers::Const;
+
+  return S.LookupSpecialMember(Class, CSM,
+                               RHSQuals & Qualifiers::Const,
+                               RHSQuals & Qualifiers::Volatile,
+                               false,
+                               LHSQuals & Qualifiers::Const,
+                               LHSQuals & Qualifiers::Volatile);
+}
+
 /// Is the special member function which would be selected to perform the
 /// specified operation on the specified class type a constexpr constructor?
 static bool specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
                                      Sema::CXXSpecialMember CSM,
-                                     bool ConstArg) {
+                                     unsigned Quals, bool ConstRHS) {
   Sema::SpecialMemberOverloadResult *SMOR =
-      S.LookupSpecialMember(ClassDecl, CSM, ConstArg,
-                            false, false, false, false);
+      lookupCallFromSpecialMember(S, ClassDecl, CSM, Quals, ConstRHS);
   if (!SMOR || !SMOR->getMethod())
     // A constructor we wouldn't select can't be "involved in initializing"
     // anything.
@@ -4540,14 +4611,12 @@
   //      sub-objects shall be a constexpr constructor;
   //   -- the assignment operator selected to copy/move each direct base
   //      class is a constexpr function, and
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
-                                       BEnd = ClassDecl->bases_end();
-       B != BEnd; ++B) {
-    const RecordType *BaseType = B->getType()->getAs<RecordType>();
+  for (const auto &B : ClassDecl->bases()) {
+    const RecordType *BaseType = B.getType()->getAs<RecordType>();
     if (!BaseType) continue;
 
     CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
-    if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, ConstArg))
+    if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, 0, ConstArg))
       return false;
   }
 
@@ -4555,18 +4624,18 @@
   //      [...] shall be a constexpr constructor;
   //   -- every non-static data member and base class sub-object shall be
   //      initialized
-  //   -- for each non-stastic data member of X that is of class type (or array
+  //   -- for each non-static data member of X that is of class type (or array
   //      thereof), the assignment operator selected to copy/move that member is
   //      a constexpr function
-  for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
-                               FEnd = ClassDecl->field_end();
-       F != FEnd; ++F) {
+  for (const auto *F : ClassDecl->fields()) {
     if (F->isInvalidDecl())
       continue;
-    if (const RecordType *RecordTy =
-            S.Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
+    QualType BaseType = S.Context.getBaseElementType(F->getType());
+    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
       CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
-      if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM, ConstArg))
+      if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM,
+                                    BaseType.getCVRQualifiers(),
+                                    ConstArg && !F->isMutable()))
         return false;
     }
   }
@@ -4598,15 +4667,6 @@
   return S.ComputeInheritingCtorExceptionSpec(cast<CXXConstructorDecl>(MD));
 }
 
-static void
-updateExceptionSpec(Sema &S, FunctionDecl *FD, const FunctionProtoType *FPT,
-                    const Sema::ImplicitExceptionSpecification &ExceptSpec) {
-  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
-  ExceptSpec.getEPI(EPI);
-  FD->setType(S.Context.getFunctionType(FPT->getResultType(),
-                                        FPT->getArgTypes(), EPI));
-}
-
 static FunctionProtoType::ExtProtoInfo getImplicitMethodEPI(Sema &S,
                                                             CXXMethodDecl *MD) {
   FunctionProtoType::ExtProtoInfo EPI;
@@ -4631,8 +4691,11 @@
   ImplicitExceptionSpecification ExceptSpec =
       computeImplicitExceptionSpec(*this, Loc, MD);
 
+  FunctionProtoType::ExtProtoInfo EPI;
+  ExceptSpec.getEPI(EPI);
+
   // Update the type of the special member to use it.
-  updateExceptionSpec(*this, MD, FPT, ExceptSpec);
+  UpdateExceptionSpec(MD, EPI);
 
   // A user-provided destructor can be defined outside the class. When that
   // happens, be sure to update the exception specification on both
@@ -4640,8 +4703,7 @@
   const FunctionProtoType *CanonicalFPT =
     MD->getCanonicalDecl()->getType()->castAs<FunctionProtoType>();
   if (CanonicalFPT->getExceptionSpecType() == EST_Unevaluated)
-    updateExceptionSpec(*this, MD->getCanonicalDecl(),
-                        CanonicalFPT, ExceptSpec);
+    UpdateExceptionSpec(MD->getCanonicalDecl(), EPI);
 }
 
 void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
@@ -4691,7 +4753,7 @@
   QualType ReturnType = Context.VoidTy;
   if (CSM == CXXCopyAssignment || CSM == CXXMoveAssignment) {
     // Check for return type matching.
-    ReturnType = Type->getResultType();
+    ReturnType = Type->getReturnType();
     QualType ExpectedReturnType =
         Context.getLValueReferenceType(Context.getTypeDeclType(RD));
     if (!Context.hasSameType(ReturnType, ExpectedReturnType)) {
@@ -4709,7 +4771,7 @@
   }
 
   // Check for parameter type matching.
-  QualType ArgType = ExpectedParams ? Type->getArgType(0) : QualType();
+  QualType ArgType = ExpectedParams ? Type->getParamType(0) : QualType();
   bool HasConstParam = false;
   if (ExpectedParams && ArgType->isReferenceType()) {
     // Argument must be reference to possibly-const T.
@@ -4803,6 +4865,7 @@
       //   [For a] user-provided explicitly-defaulted function [...] if such a
       //   function is implicitly defined as deleted, the program is ill-formed.
       Diag(MD->getLocation(), diag::err_out_of_line_default_deletes) << CSM;
+      ShouldDeleteSpecialMember(MD, CSM, /*Diagnose*/true);
       HadError = true;
     }
   }
@@ -4865,7 +4928,7 @@
   bool Diagnose;
 
   // Properties of the special member, computed for convenience.
-  bool IsConstructor, IsAssignment, IsMove, ConstArg, VolatileArg;
+  bool IsConstructor, IsAssignment, IsMove, ConstArg;
   SourceLocation Loc;
 
   bool AllFieldsAreConst;
@@ -4874,7 +4937,7 @@
                             Sema::CXXSpecialMember CSM, bool Diagnose)
     : S(S), MD(MD), CSM(CSM), Diagnose(Diagnose),
       IsConstructor(false), IsAssignment(false), IsMove(false),
-      ConstArg(false), VolatileArg(false), Loc(MD->getLocation()),
+      ConstArg(false), Loc(MD->getLocation()),
       AllFieldsAreConst(true) {
     switch (CSM) {
       case Sema::CXXDefaultConstructor:
@@ -4899,8 +4962,9 @@
     }
 
     if (MD->getNumParams()) {
-      ConstArg = MD->getParamDecl(0)->getType().isConstQualified();
-      VolatileArg = MD->getParamDecl(0)->getType().isVolatileQualified();
+      if (const ReferenceType *RT =
+              MD->getParamDecl(0)->getType()->getAs<ReferenceType>())
+        ConstArg = RT->getPointeeType().isConstQualified();
     }
   }
 
@@ -4908,21 +4972,9 @@
 
   /// Look up the corresponding special member in the given class.
   Sema::SpecialMemberOverloadResult *lookupIn(CXXRecordDecl *Class,
-                                              unsigned Quals) {
-    unsigned TQ = MD->getTypeQualifiers();
-    // cv-qualifiers on class members don't affect default ctor / dtor calls.
-    if (CSM == Sema::CXXDefaultConstructor || CSM == Sema::CXXDestructor)
-      Quals = 0;
-    // cv-qualifiers on class members affect the type of both '*this' and the
-    // argument for an assignment.
-    if (IsAssignment)
-      TQ |= Quals;
-    return S.LookupSpecialMember(Class, CSM,
-                                 ConstArg || (Quals & Qualifiers::Const),
-                                 VolatileArg || (Quals & Qualifiers::Volatile),
-                                 MD->getRefQualifier() == RQ_RValue,
-                                 TQ & Qualifiers::Const,
-                                 TQ & Qualifiers::Volatile);
+                                              unsigned Quals, bool IsMutable) {
+    return lookupCallFromSpecialMember(S, Class, CSM, Quals,
+                                       ConstArg && !IsMutable);
   }
 
   typedef llvm::PointerUnion<CXXBaseSpecifier*, FieldDecl*> Subobject;
@@ -5017,6 +5069,7 @@
 bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
     CXXRecordDecl *Class, Subobject Subobj, unsigned Quals) {
   FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>();
+  bool IsMutable = Field && Field->isMutable();
 
   // C++11 [class.ctor]p5:
   // -- any direct or virtual base class, or non-static data member with no
@@ -5034,7 +5087,8 @@
   //    that is deleted or inaccessible
   if (!(CSM == Sema::CXXDefaultConstructor &&
         Field && Field->hasInClassInitializer()) &&
-      shouldDeleteForSubobjectCall(Subobj, lookupIn(Class, Quals), false))
+      shouldDeleteForSubobjectCall(Subobj, lookupIn(Class, Quals, IsMutable),
+                                   false))
     return true;
 
   // C++11 [class.ctor]p5, C++11 [class.copy]p11:
@@ -5122,9 +5176,7 @@
       bool AllVariantFieldsAreConst = true;
 
       // FIXME: Handle anonymous unions declared within anonymous unions.
-      for (CXXRecordDecl::field_iterator UI = FieldRecord->field_begin(),
-                                         UE = FieldRecord->field_end();
-           UI != UE; ++UI) {
+      for (auto *UI : FieldRecord->fields()) {
         QualType UnionFieldType = S.Context.getBaseElementType(UI->getType());
 
         if (!UnionFieldType.isConstQualified())
@@ -5132,14 +5184,14 @@
 
         CXXRecordDecl *UnionFieldRecord = UnionFieldType->getAsCXXRecordDecl();
         if (UnionFieldRecord &&
-            shouldDeleteForClassSubobject(UnionFieldRecord, *UI,
+            shouldDeleteForClassSubobject(UnionFieldRecord, UI,
                                           UnionFieldType.getCVRQualifiers()))
           return true;
       }
 
       // At least one member in each anonymous union must be non-const
       if (CSM == Sema::CXXDefaultConstructor && AllVariantFieldsAreConst &&
-          FieldRecord->field_begin() != FieldRecord->field_end()) {
+          !FieldRecord->field_empty()) {
         if (Diagnose)
           S.Diag(FieldRecord->getLocation(),
                  diag::note_deleted_default_ctor_all_const)
@@ -5167,7 +5219,7 @@
   // This is a silly definition, because it gives an empty union a deleted
   // default constructor. Don't do that.
   if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst &&
-      (MD->getParent()->field_begin() != MD->getParent()->field_end())) {
+      !MD->getParent()->field_empty()) {
     if (Diagnose)
       S.Diag(MD->getParent()->getLocation(),
              diag::note_deleted_default_ctor_all_const)
@@ -5218,27 +5270,25 @@
     // In Microsoft mode, a user-declared move only causes the deletion of the
     // corresponding copy operation, not both copy operations.
     if (RD->hasUserDeclaredMoveConstructor() &&
-        (!getLangOpts().MicrosoftMode || CSM == CXXCopyConstructor)) {
+        (!getLangOpts().MSVCCompat || CSM == CXXCopyConstructor)) {
       if (!Diagnose) return true;
 
       // Find any user-declared move constructor.
-      for (CXXRecordDecl::ctor_iterator I = RD->ctor_begin(),
-                                        E = RD->ctor_end(); I != E; ++I) {
+      for (auto *I : RD->ctors()) {
         if (I->isMoveConstructor()) {
-          UserDeclaredMove = *I;
+          UserDeclaredMove = I;
           break;
         }
       }
       assert(UserDeclaredMove);
     } else if (RD->hasUserDeclaredMoveAssignment() &&
-               (!getLangOpts().MicrosoftMode || CSM == CXXCopyAssignment)) {
+               (!getLangOpts().MSVCCompat || CSM == CXXCopyAssignment)) {
       if (!Diagnose) return true;
 
       // Find any user-declared move assignment operator.
-      for (CXXRecordDecl::method_iterator I = RD->method_begin(),
-                                          E = RD->method_end(); I != E; ++I) {
+      for (auto *I : RD->methods()) {
         if (I->isMoveAssignmentOperator()) {
-          UserDeclaredMove = *I;
+          UserDeclaredMove = I;
           break;
         }
       }
@@ -5274,26 +5324,22 @@
 
   SpecialMemberDeletionInfo SMI(*this, MD, CSM, Diagnose);
 
-  for (CXXRecordDecl::base_class_iterator BI = RD->bases_begin(),
-                                          BE = RD->bases_end(); BI != BE; ++BI)
-    if (!BI->isVirtual() &&
-        SMI.shouldDeleteForBase(BI))
+  for (auto &BI : RD->bases())
+    if (!BI.isVirtual() &&
+        SMI.shouldDeleteForBase(&BI))
       return true;
 
   // Per DR1611, do not consider virtual bases of constructors of abstract
   // classes, since we are not going to construct them.
   if (!RD->isAbstract() || !SMI.IsConstructor) {
-    for (CXXRecordDecl::base_class_iterator BI = RD->vbases_begin(),
-                                            BE = RD->vbases_end();
-         BI != BE; ++BI)
-      if (SMI.shouldDeleteForBase(BI))
+    for (auto &BI : RD->vbases())
+      if (SMI.shouldDeleteForBase(&BI))
         return true;
   }
 
-  for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
-                                     FE = RD->field_end(); FI != FE; ++FI)
+  for (auto *FI : RD->fields())
     if (!FI->isInvalidDecl() && !FI->isUnnamedBitfield() &&
-        SMI.shouldDeleteForField(*FI))
+        SMI.shouldDeleteForField(FI))
       return true;
 
   if (SMI.shouldDeleteForAllConstMembers())
@@ -5312,7 +5358,7 @@
 /// member that was most likely to be intended to be trivial, if any.
 static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
                                      Sema::CXXSpecialMember CSM, unsigned Quals,
-                                     CXXMethodDecl **Selected) {
+                                     bool ConstRHS, CXXMethodDecl **Selected) {
   if (Selected)
     *Selected = 0;
 
@@ -5336,11 +5382,10 @@
       CXXConstructorDecl *DefCtor = 0;
       if (RD->needsImplicitDefaultConstructor())
         S.DeclareImplicitDefaultConstructor(RD);
-      for (CXXRecordDecl::ctor_iterator CI = RD->ctor_begin(),
-                                        CE = RD->ctor_end(); CI != CE; ++CI) {
+      for (auto *CI : RD->ctors()) {
         if (!CI->isDefaultConstructor())
           continue;
-        DefCtor = *CI;
+        DefCtor = CI;
         if (!DefCtor->isUserProvided())
           break;
       }
@@ -5403,11 +5448,7 @@
   case Sema::CXXMoveAssignment:
   NeedOverloadResolution:
     Sema::SpecialMemberOverloadResult *SMOR =
-      S.LookupSpecialMember(RD, CSM,
-                            Quals & Qualifiers::Const,
-                            Quals & Qualifiers::Volatile,
-                            /*RValueThis*/false, /*ConstThis*/false,
-                            /*VolatileThis*/false);
+        lookupCallFromSpecialMember(S, RD, CSM, Quals, ConstRHS);
 
     // The standard doesn't describe how to behave if the lookup is ambiguous.
     // We treat it as not making the member non-trivial, just like the standard
@@ -5433,10 +5474,9 @@
 }
 
 static CXXConstructorDecl *findUserDeclaredCtor(CXXRecordDecl *RD) {
-  for (CXXRecordDecl::ctor_iterator CI = RD->ctor_begin(), CE = RD->ctor_end();
-       CI != CE; ++CI)
+  for (auto *CI : RD->ctors())
     if (!CI->isImplicit())
-      return *CI;
+      return CI;
 
   // Look for constructor templates.
   typedef CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl> tmpl_iter;
@@ -5462,7 +5502,7 @@
 
 /// Check whether the special member selected for a given type would be trivial.
 static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc,
-                                      QualType SubType,
+                                      QualType SubType, bool ConstRHS,
                                       Sema::CXXSpecialMember CSM,
                                       TrivialSubobjectKind Kind,
                                       bool Diagnose) {
@@ -5472,10 +5512,13 @@
 
   CXXMethodDecl *Selected;
   if (findTrivialSpecialMember(S, SubRD, CSM, SubType.getCVRQualifiers(),
-                               Diagnose ? &Selected : 0))
+                               ConstRHS, Diagnose ? &Selected : 0))
     return true;
 
   if (Diagnose) {
+    if (ConstRHS)
+      SubType.addConst();
+
     if (!Selected && CSM == Sema::CXXDefaultConstructor) {
       S.Diag(SubobjLoc, diag::note_nontrivial_no_def_ctor)
         << Kind << SubType.getUnqualifiedType();
@@ -5511,8 +5554,7 @@
 static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
                                      Sema::CXXSpecialMember CSM,
                                      bool ConstArg, bool Diagnose) {
-  for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
-                                     FE = RD->field_end(); FI != FE; ++FI) {
+  for (const auto *FI : RD->fields()) {
     if (FI->isInvalidDecl() || FI->isUnnamedBitfield())
       continue;
 
@@ -5532,7 +5574,7 @@
     //       brace-or-equal-initializer
     if (CSM == Sema::CXXDefaultConstructor && FI->hasInClassInitializer()) {
       if (Diagnose)
-        S.Diag(FI->getLocation(), diag::note_nontrivial_in_class_init) << *FI;
+        S.Diag(FI->getLocation(), diag::note_nontrivial_in_class_init) << FI;
       return false;
     }
 
@@ -5548,10 +5590,9 @@
       return false;
     }
 
-    if (ConstArg && !FI->isMutable())
-      FieldType.addConst();
-    if (!checkTrivialSubobjectCall(S, FI->getLocation(), FieldType, CSM,
-                                   TSK_Field, Diagnose))
+    bool ConstRHS = ConstArg && !FI->isMutable();
+    if (!checkTrivialSubobjectCall(S, FI->getLocation(), FieldType, ConstRHS,
+                                   CSM, TSK_Field, Diagnose))
       return false;
   }
 
@@ -5562,10 +5603,9 @@
 /// the given kind.
 void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD, CXXSpecialMember CSM) {
   QualType Ty = Context.getRecordType(RD);
-  if (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment)
-    Ty.addConst();
 
-  checkTrivialSubobjectCall(*this, RD->getLocation(), Ty, CSM,
+  bool ConstArg = (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment);
+  checkTrivialSubobjectCall(*this, RD->getLocation(), Ty, ConstArg, CSM,
                             TSK_CompleteObject, /*Diagnose*/true);
 }
 
@@ -5648,12 +5688,9 @@
   //   A [default constructor or destructor] is trivial if
   //    -- all the direct base classes have trivial [default constructors or
   //       destructors]
-  for (CXXRecordDecl::base_class_iterator BI = RD->bases_begin(),
-                                          BE = RD->bases_end(); BI != BE; ++BI)
-    if (!checkTrivialSubobjectCall(*this, BI->getLocStart(),
-                                   ConstArg ? BI->getType().withConst()
-                                            : BI->getType(),
-                                   CSM, TSK_BaseClass, Diagnose))
+  for (const auto &BI : RD->bases())
+    if (!checkTrivialSubobjectCall(*this, BI.getLocStart(), BI.getType(),
+                                   ConstArg, CSM, TSK_BaseClass, Diagnose))
       return false;
 
   // C++11 [class.ctor]p5, C++11 [class.dtor]p5:
@@ -5697,8 +5734,7 @@
     }
 
     // Must have a virtual method.
-    for (CXXRecordDecl::method_iterator MI = RD->method_begin(),
-                                        ME = RD->method_end(); MI != ME; ++MI) {
+    for (const auto *MI : RD->methods()) {
       if (MI->isVirtual()) {
         SourceLocation MLoc = MI->getLocStart();
         Diag(MLoc, diag::note_nontrivial_has_virtual) << RD << 0;
@@ -5992,6 +6028,18 @@
   PopDeclContext();
 }
 
+/// This is used to implement the constant expression evaluation part of the
+/// attribute enable_if extension. There is nothing in standard C++ which would
+/// require reentering parameters.
+void Sema::ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param) {
+  if (!Param)
+    return;
+
+  S->AddDecl(Param);
+  if (Param->getDeclName())
+    IdResolver.AddDecl(Param);
+}
+
 /// ActOnStartDelayedCXXMethodDeclaration - We have completed
 /// parsing a top-level (non-nested) C++ class, and we are now
 /// parsing those parts of the given Method declaration that could
@@ -6108,14 +6156,14 @@
   // case any of the errors above fired) and with "void" as the
   // return type, since constructors don't have return types.
   const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
-  if (Proto->getResultType() == Context.VoidTy && !D.isInvalidType())
+  if (Proto->getReturnType() == Context.VoidTy && !D.isInvalidType())
     return R;
 
   FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
   EPI.TypeQuals = 0;
   EPI.RefQualifier = RQ_None;
-  
-  return Context.getFunctionType(Context.VoidTy, Proto->getArgTypes(), EPI);
+
+  return Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(), EPI);
 }
 
 /// CheckConstructor - Checks a fully-formed constructor for
@@ -6190,9 +6238,9 @@
 
 static inline bool
 FTIHasSingleVoidArgument(DeclaratorChunk::FunctionTypeInfo &FTI) {
-  return (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
-          FTI.ArgInfo[0].Param &&
-          cast<ParmVarDecl>(FTI.ArgInfo[0].Param)->getType()->isVoidType());
+  return (FTI.NumParams == 1 && !FTI.isVariadic && FTI.Params[0].Ident == 0 &&
+          FTI.Params[0].Param &&
+          cast<ParmVarDecl>(FTI.Params[0].Param)->getType()->isVoidType());
 }
 
 /// CheckDestructorDeclarator - Called by ActOnDeclarator to check
@@ -6273,11 +6321,11 @@
   }
   
   // Make sure we don't have any parameters.
-  if (FTI.NumArgs > 0 && !FTIHasSingleVoidArgument(FTI)) {
+  if (FTI.NumParams > 0 && !FTIHasSingleVoidArgument(FTI)) {
     Diag(D.getIdentifierLoc(), diag::err_destructor_with_params);
 
     // Delete the parameters.
-    FTI.freeArgs();
+    FTI.freeParams();
     D.setInvalidType();
   }
 
@@ -6343,11 +6391,11 @@
   const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
 
   // Make sure we don't have any parameters.
-  if (Proto->getNumArgs() > 0) {
+  if (Proto->getNumParams() > 0) {
     Diag(D.getIdentifierLoc(), diag::err_conv_function_with_params);
 
     // Delete the parameters.
-    D.getFunctionTypeInfo().freeArgs();
+    D.getFunctionTypeInfo().freeParams();
     D.setInvalidType();
   } else if (Proto->isVariadic()) {
     Diag(D.getIdentifierLoc(), diag::err_conv_function_variadic);
@@ -6356,11 +6404,11 @@
 
   // Diagnose "&operator bool()" and other such nonsense.  This
   // is actually a gcc extension which we don't support.
-  if (Proto->getResultType() != ConvType) {
+  if (Proto->getReturnType() != ConvType) {
     Diag(D.getIdentifierLoc(), diag::err_conv_function_with_complex_decl)
-      << Proto->getResultType();
+        << Proto->getReturnType();
     D.setInvalidType();
-    ConvType = Proto->getResultType();
+    ConvType = Proto->getReturnType();
   }
 
   // C++ [class.conv.fct]p4:
@@ -6464,9 +6512,8 @@
       NS->setInline(*IsInline);
     // Patch up the lookup table for the containing namespace. This isn't really
     // correct, but it's good enough for this particular case.
-    for (DeclContext::decl_iterator I = PrevNS->decls_begin(),
-                                    E = PrevNS->decls_end(); I != E; ++I)
-      if (NamedDecl *ND = dyn_cast<NamedDecl>(*I))
+    for (auto *I : PrevNS->decls())
+      if (auto *ND = dyn_cast<NamedDecl>(I))
         PrevNS->getParent()->makeDeclVisibleInContext(ND);
     return;
   }
@@ -6477,8 +6524,7 @@
     S.Diag(Loc, diag::warn_inline_namespace_reopened_noninline)
       << FixItHint::CreateInsertion(KeywordLoc, "inline ");
   else
-    S.Diag(Loc, diag::err_inline_namespace_mismatch)
-      << IsInline;
+    S.Diag(Loc, diag::err_inline_namespace_mismatch) << *IsInline;
 
   S.Diag(PrevNS->getLocation(), diag::note_previous_definition);
   *IsInline = PrevNS->isInline();
@@ -6830,7 +6876,7 @@
 // Callback to only accept typo corrections that are namespaces.
 class NamespaceValidatorCCC : public CorrectionCandidateCallback {
 public:
-  bool ValidateCandidate(const TypoCorrection &candidate) LLVM_OVERRIDE {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     if (NamedDecl *ND = candidate.getCorrectionDecl())
       return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
     return false;
@@ -6886,7 +6932,7 @@
   UsingDirectiveDecl *UDir = 0;
   NestedNameSpecifier *Qualifier = 0;
   if (SS.isSet())
-    Qualifier = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+    Qualifier = SS.getScopeRep();
   
   // Lookup namespace name.
   LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName);
@@ -7130,13 +7176,7 @@
   if (FoundEquivalentDecl)
     return false;
 
-  if (Target->isFunctionOrFunctionTemplate()) {
-    FunctionDecl *FD;
-    if (isa<FunctionTemplateDecl>(Target))
-      FD = cast<FunctionTemplateDecl>(Target)->getTemplatedDecl();
-    else
-      FD = cast<FunctionDecl>(Target);
-
+  if (FunctionDecl *FD = Target->getAsFunction()) {
     NamedDecl *OldDecl = 0;
     switch (CheckOverload(0, FD, Previous, OldDecl, /*IsForUsingDecl*/ true)) {
     case Ovl_Overload:
@@ -7145,7 +7185,7 @@
     case Ovl_NonFunction:
       Diag(Using->getLocation(), diag::err_using_decl_conflict);
       break;
-      
+
     // We found a decl with the exact signature.
     case Ovl_Match:
       // If we're in a record, we want to hide the target, so we
@@ -7274,7 +7314,7 @@
       : HasTypenameKeyword(HasTypenameKeyword),
         IsInstantiation(IsInstantiation), RequireMember(RequireMember) {}
 
-  bool ValidateCandidate(const TypoCorrection &Candidate) LLVM_OVERRIDE {
+  bool ValidateCandidate(const TypoCorrection &Candidate) override {
     NamedDecl *ND = Candidate.getCorrectionDecl();
 
     // Keywords are not valid here.
@@ -7353,7 +7393,7 @@
     return 0;
 
   // Check for bad qualifiers.
-  if (CheckUsingDeclQualifier(UsingLoc, SS, IdentLoc))
+  if (CheckUsingDeclQualifier(UsingLoc, SS, NameInfo, IdentLoc))
     return 0;
 
   DeclContext *LookupContext = computeDeclContext(SS);
@@ -7534,8 +7574,7 @@
   if (!CurContext->getRedeclContext()->isRecord())
     return false;
 
-  NestedNameSpecifier *Qual
-    = static_cast<NestedNameSpecifier*>(SS.getScopeRep());
+  NestedNameSpecifier *Qual = SS.getScopeRep();
 
   for (LookupResult::iterator I = Prev.begin(), E = Prev.end(); I != E; ++I) {
     NamedDecl *D = *I;
@@ -7580,6 +7619,7 @@
 /// scope.  If an error is found, diagnoses it and returns true.
 bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
                                    const CXXScopeSpec &SS,
+                                   const DeclarationNameInfo &NameInfo,
                                    SourceLocation NameLoc) {
   DeclContext *NamedContext = computeDeclContext(SS);
 
@@ -7591,8 +7631,56 @@
     // If we weren't able to compute a valid scope, it must be a
     // dependent class scope.
     if (!NamedContext || NamedContext->isRecord()) {
+      auto *RD = dyn_cast<CXXRecordDecl>(NamedContext);
+      if (RD && RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), RD))
+        RD = 0;
+
       Diag(NameLoc, diag::err_using_decl_can_not_refer_to_class_member)
         << SS.getRange();
+
+      // If we have a complete, non-dependent source type, try to suggest a
+      // way to get the same effect.
+      if (!RD)
+        return true;
+
+      // Find what this using-declaration was referring to.
+      LookupResult R(*this, NameInfo, LookupOrdinaryName);
+      R.setHideTags(false);
+      R.suppressDiagnostics();
+      LookupQualifiedName(R, RD);
+
+      if (R.getAsSingle<TypeDecl>()) {
+        if (getLangOpts().CPlusPlus11) {
+          // Convert 'using X::Y;' to 'using Y = X::Y;'.
+          Diag(SS.getBeginLoc(), diag::note_using_decl_class_member_workaround)
+            << 0 // alias declaration
+            << FixItHint::CreateInsertion(SS.getBeginLoc(),
+                                          NameInfo.getName().getAsString() +
+                                              " = ");
+        } else {
+          // Convert 'using X::Y;' to 'typedef X::Y Y;'.
+          SourceLocation InsertLoc =
+              PP.getLocForEndOfToken(NameInfo.getLocEnd());
+          Diag(InsertLoc, diag::note_using_decl_class_member_workaround)
+            << 1 // typedef declaration
+            << FixItHint::CreateReplacement(UsingLoc, "typedef")
+            << FixItHint::CreateInsertion(
+                   InsertLoc, " " + NameInfo.getName().getAsString());
+        }
+      } else if (R.getAsSingle<VarDecl>()) {
+        // Don't provide a fixit outside C++11 mode; we don't want to suggest
+        // repeating the type of the static data member here.
+        FixItHint FixIt;
+        if (getLangOpts().CPlusPlus11) {
+          // Convert 'using X::Y;' to 'auto &Y = X::Y;'.
+          FixIt = FixItHint::CreateReplacement(
+              UsingLoc, "auto &" + NameInfo.getName().getAsString() + " = ");
+        }
+
+        Diag(UsingLoc, diag::note_using_decl_class_member_workaround)
+          << 2 // reference declaration
+          << FixIt;
+      }
       return true;
     }
 
@@ -7618,7 +7706,7 @@
     // but we don't have that level of source info.
     Diag(SS.getRange().getBegin(),
          diag::err_using_decl_nested_name_specifier_is_not_class)
-      << (NestedNameSpecifier*) SS.getScopeRep() << SS.getRange();
+      << SS.getScopeRep() << SS.getRange();
     return true;
   }
 
@@ -7643,7 +7731,7 @@
 
       Diag(SS.getRange().getBegin(),
            diag::err_using_decl_nested_name_specifier_is_not_base_class)
-        << (NestedNameSpecifier*) SS.getScopeRep()
+        << SS.getScopeRep()
         << cast<CXXRecordDecl>(CurContext)
         << SS.getRange();
       return true;
@@ -7703,7 +7791,7 @@
 
   Diag(SS.getRange().getBegin(),
        diag::err_using_decl_nested_name_specifier_is_not_base_class)
-    << (NestedNameSpecifier*) SS.getScopeRep()
+    << SS.getScopeRep()
     << cast<CXXRecordDecl>(CurContext)
     << SS.getRange();
 
@@ -7923,40 +8011,34 @@
     return ExceptSpec;
 
   // Direct base-class constructors.
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
-                                       BEnd = ClassDecl->bases_end();
-       B != BEnd; ++B) {
-    if (B->isVirtual()) // Handled below.
+  for (const auto &B : ClassDecl->bases()) {
+    if (B.isVirtual()) // Handled below.
       continue;
     
-    if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+    if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
       CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
       CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
       // If this is a deleted function, add it anyway. This might be conformant
       // with the standard. This might not. I'm not sure. It might not matter.
       if (Constructor)
-        ExceptSpec.CalledDecl(B->getLocStart(), Constructor);
+        ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
     }
   }
 
   // Virtual base-class constructors.
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
-                                       BEnd = ClassDecl->vbases_end();
-       B != BEnd; ++B) {
-    if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+  for (const auto &B : ClassDecl->vbases()) {
+    if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
       CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
       CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
       // If this is a deleted function, add it anyway. This might be conformant
       // with the standard. This might not. I'm not sure. It might not matter.
       if (Constructor)
-        ExceptSpec.CalledDecl(B->getLocStart(), Constructor);
+        ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
     }
   }
 
   // Field constructors.
-  for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
-                               FEnd = ClassDecl->field_end();
-       F != FEnd; ++F) {
+  for (const auto *F : ClassDecl->fields()) {
     if (F->hasInClassInitializer()) {
       if (Expr *E = F->getInClassInitializer())
         ExceptSpec.CalledExpr(E);
@@ -8012,40 +8094,34 @@
   ExceptSpec.CalledDecl(CD->getLocStart(), InheritedCD);
 
   // Direct base-class constructors.
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
-                                       BEnd = ClassDecl->bases_end();
-       B != BEnd; ++B) {
-    if (B->isVirtual()) // Handled below.
+  for (const auto &B : ClassDecl->bases()) {
+    if (B.isVirtual()) // Handled below.
       continue;
 
-    if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+    if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
       CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
       if (BaseClassDecl == InheritedDecl)
         continue;
       CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
       if (Constructor)
-        ExceptSpec.CalledDecl(B->getLocStart(), Constructor);
+        ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
     }
   }
 
   // Virtual base-class constructors.
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
-                                       BEnd = ClassDecl->vbases_end();
-       B != BEnd; ++B) {
-    if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+  for (const auto &B : ClassDecl->vbases()) {
+    if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
       CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
       if (BaseClassDecl == InheritedDecl)
         continue;
       CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
       if (Constructor)
-        ExceptSpec.CalledDecl(B->getLocStart(), Constructor);
+        ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
     }
   }
 
   // Field constructors.
-  for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
-                               FEnd = ClassDecl->field_end();
-       F != FEnd; ++F) {
+  for (const auto *F : ClassDecl->fields()) {
     if (F->hasInClassInitializer()) {
       if (Expr *E = F->getInClassInitializer())
         ExceptSpec.CalledExpr(E);
@@ -8256,10 +8332,8 @@
 
   /// Process all constructors for a class.
   void visitAll(const CXXRecordDecl *RD, VisitFn Callback) {
-    for (CXXRecordDecl::ctor_iterator CtorIt = RD->ctor_begin(),
-                                      CtorE = RD->ctor_end();
-         CtorIt != CtorE; ++CtorIt)
-      (this->*Callback)(*CtorIt);
+    for (const auto *Ctor : RD->ctors())
+      (this->*Callback)(Ctor);
     for (CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl>
              I(RD->decls_begin()), E(RD->decls_end());
          I != E; ++I) {
@@ -8278,7 +8352,7 @@
   void inherit(const CXXConstructorDecl *Ctor) {
     const FunctionProtoType *CtorType =
         Ctor->getType()->castAs<FunctionProtoType>();
-    ArrayRef<QualType> ArgTypes(CtorType->getArgTypes());
+    ArrayRef<QualType> ArgTypes(CtorType->getParamTypes());
     FunctionProtoType::ExtProtoInfo EPI = CtorType->getExtProtoInfo();
 
     SourceLocation UsingLoc = getUsingLoc(Ctor->getParent());
@@ -8306,7 +8380,7 @@
       do
         declareCtor(UsingLoc, Ctor,
                     SemaRef.Context.getFunctionType(
-                        Ctor->getResultType(), ArgTypes.slice(0, Params), EPI));
+                        Ctor->getReturnType(), ArgTypes.slice(0, Params), EPI));
       while (Params > MinParams &&
              Ctor->getParamDecl(--Params)->hasDefaultArg());
     }
@@ -8420,21 +8494,21 @@
     FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
     EPI.ExceptionSpecType = EST_Unevaluated;
     EPI.ExceptionSpecDecl = DerivedCtor;
-    DerivedCtor->setType(Context.getFunctionType(FPT->getResultType(),
-                                                 FPT->getArgTypes(), EPI));
+    DerivedCtor->setType(Context.getFunctionType(FPT->getReturnType(),
+                                                 FPT->getParamTypes(), EPI));
 
     // Build the parameter declarations.
     SmallVector<ParmVarDecl *, 16> ParamDecls;
-    for (unsigned I = 0, N = FPT->getNumArgs(); I != N; ++I) {
+    for (unsigned I = 0, N = FPT->getNumParams(); I != N; ++I) {
       TypeSourceInfo *TInfo =
-          Context.getTrivialTypeSourceInfo(FPT->getArgType(I), UsingLoc);
+          Context.getTrivialTypeSourceInfo(FPT->getParamType(I), UsingLoc);
       ParmVarDecl *PD = ParmVarDecl::Create(
           Context, DerivedCtor, UsingLoc, UsingLoc, /*IdentifierInfo=*/0,
-          FPT->getArgType(I), TInfo, SC_None, /*DefaultArg=*/0);
+          FPT->getParamType(I), TInfo, SC_None, /*DefaultArg=*/0);
       PD->setScopeInfo(0, I);
       PD->setImplicit();
       ParamDecls.push_back(PD);
-      ProtoLoc.setArg(I, PD);
+      ProtoLoc.setParam(I, PD);
     }
 
     // Set up the new constructor.
@@ -8475,11 +8549,9 @@
 
   // Find base classes from which we might inherit constructors.
   SmallVector<CXXRecordDecl*, 4> InheritedBases;
-  for (CXXRecordDecl::base_class_iterator BaseIt = ClassDecl->bases_begin(),
-                                          BaseE = ClassDecl->bases_end();
-       BaseIt != BaseE; ++BaseIt)
-    if (BaseIt->getInheritConstructors())
-      InheritedBases.push_back(BaseIt->getType()->getAsCXXRecordDecl());
+  for (const auto &BaseIt : ClassDecl->bases())
+    if (BaseIt.getInheritConstructors())
+      InheritedBases.push_back(BaseIt.getType()->getAsCXXRecordDecl());
 
   // Go no further if we're not inheriting any constructors.
   if (InheritedBases.empty())
@@ -8532,30 +8604,24 @@
     return ExceptSpec;
 
   // Direct base-class destructors.
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
-                                       BEnd = ClassDecl->bases_end();
-       B != BEnd; ++B) {
-    if (B->isVirtual()) // Handled below.
+  for (const auto &B : ClassDecl->bases()) {
+    if (B.isVirtual()) // Handled below.
       continue;
     
-    if (const RecordType *BaseType = B->getType()->getAs<RecordType>())
-      ExceptSpec.CalledDecl(B->getLocStart(),
+    if (const RecordType *BaseType = B.getType()->getAs<RecordType>())
+      ExceptSpec.CalledDecl(B.getLocStart(),
                    LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl())));
   }
 
   // Virtual base-class destructors.
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
-                                       BEnd = ClassDecl->vbases_end();
-       B != BEnd; ++B) {
-    if (const RecordType *BaseType = B->getType()->getAs<RecordType>())
-      ExceptSpec.CalledDecl(B->getLocStart(),
+  for (const auto &B : ClassDecl->vbases()) {
+    if (const RecordType *BaseType = B.getType()->getAs<RecordType>())
+      ExceptSpec.CalledDecl(B.getLocStart(),
                   LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl())));
   }
 
   // Field destructors.
-  for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
-                               FEnd = ClassDecl->field_end();
-       F != FEnd; ++F) {
+  for (const auto *F : ClassDecl->fields()) {
     if (const RecordType *RecordTy
         = Context.getBaseElementType(F->getType())->getAs<RecordType>())
       ExceptSpec.CalledDecl(F->getLocation(),
@@ -8719,7 +8785,7 @@
   QualType VarType;
 
 public:
-  virtual Expr *build(Sema &S, SourceLocation Loc) const LLVM_OVERRIDE {
+  virtual Expr *build(Sema &S, SourceLocation Loc) const override {
     return assertNotNull(S.BuildDeclRefExpr(Var, VarType, VK_LValue, Loc).take());
   }
 
@@ -8729,7 +8795,7 @@
 
 class ThisBuilder: public ExprBuilder {
 public:
-  virtual Expr *build(Sema &S, SourceLocation Loc) const LLVM_OVERRIDE {
+  virtual Expr *build(Sema &S, SourceLocation Loc) const override {
     return assertNotNull(S.ActOnCXXThis(Loc).takeAs<Expr>());
   }
 };
@@ -8741,7 +8807,7 @@
   const CXXCastPath &Path;
 
 public:
-  virtual Expr *build(Sema &S, SourceLocation Loc) const LLVM_OVERRIDE {
+  virtual Expr *build(Sema &S, SourceLocation Loc) const override {
     return assertNotNull(S.ImpCastExprToType(Builder.build(S, Loc), Type,
                                              CK_UncheckedDerivedToBase, Kind,
                                              &Path).take());
@@ -8756,7 +8822,7 @@
   const ExprBuilder &Builder;
 
 public:
-  virtual Expr *build(Sema &S, SourceLocation Loc) const LLVM_OVERRIDE {
+  virtual Expr *build(Sema &S, SourceLocation Loc) const override {
     return assertNotNull(
         S.CreateBuiltinUnaryOp(Loc, UO_Deref, Builder.build(S, Loc)).take());
   }
@@ -8772,7 +8838,7 @@
   LookupResult &MemberLookup;
 
 public:
-  virtual Expr *build(Sema &S, SourceLocation Loc) const LLVM_OVERRIDE {
+  virtual Expr *build(Sema &S, SourceLocation Loc) const override {
     return assertNotNull(S.BuildMemberReferenceExpr(
         Builder.build(S, Loc), Type, Loc, IsArrow, SS, SourceLocation(), 0,
         MemberLookup, 0).take());
@@ -8788,7 +8854,7 @@
   const ExprBuilder &Builder;
 
 public:
-  virtual Expr *build(Sema &S, SourceLocation Loc) const LLVM_OVERRIDE {
+  virtual Expr *build(Sema &S, SourceLocation Loc) const override {
     return assertNotNull(CastForMoving(S, Builder.build(S, Loc)));
   }
 
@@ -8799,7 +8865,7 @@
   const ExprBuilder &Builder;
 
 public:
-  virtual Expr *build(Sema &S, SourceLocation Loc) const LLVM_OVERRIDE {
+  virtual Expr *build(Sema &S, SourceLocation Loc) const override {
     return assertNotNull(
         S.DefaultLvalueConversion(Builder.build(S, Loc)).take());
   }
@@ -8812,8 +8878,7 @@
   const ExprBuilder &Index;
 
 public:
-  virtual Expr *build(Sema &S, SourceLocation Loc) const
-      LLVM_OVERRIDE {
+  virtual Expr *build(Sema &S, SourceLocation Loc) const override {
     return assertNotNull(S.CreateBuiltinArraySubscriptExpr(
         Base.build(S, Loc), Loc, Index.build(S, Loc), Loc).take());
   }
@@ -9127,8 +9192,9 @@
     return ExceptSpec;
 
   const FunctionProtoType *T = MD->getType()->castAs<FunctionProtoType>();
-  assert(T->getNumArgs() == 1 && "not a copy assignment op");
-  unsigned ArgQuals = T->getArgType(0).getNonReferenceType().getCVRQualifiers();
+  assert(T->getNumParams() == 1 && "not a copy assignment op");
+  unsigned ArgQuals =
+      T->getParamType(0).getNonReferenceType().getCVRQualifiers();
 
   // C++ [except.spec]p14:
   //   An implicitly declared special member function (Clause 12) shall have an
@@ -9140,33 +9206,26 @@
   // Based on a similar decision made for constness in C++0x, we're erring on
   // the side of assuming such calls to be made regardless of whether they
   // actually happen.
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
-                                       BaseEnd = ClassDecl->bases_end();
-       Base != BaseEnd; ++Base) {
-    if (Base->isVirtual())
+  for (const auto &Base : ClassDecl->bases()) {
+    if (Base.isVirtual())
       continue;
 
     CXXRecordDecl *BaseClassDecl
-      = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
     if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl,
                                                             ArgQuals, false, 0))
-      ExceptSpec.CalledDecl(Base->getLocStart(), CopyAssign);
+      ExceptSpec.CalledDecl(Base.getLocStart(), CopyAssign);
   }
 
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
-                                       BaseEnd = ClassDecl->vbases_end();
-       Base != BaseEnd; ++Base) {
+  for (const auto &Base : ClassDecl->vbases()) {
     CXXRecordDecl *BaseClassDecl
-      = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
     if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl,
                                                             ArgQuals, false, 0))
-      ExceptSpec.CalledDecl(Base->getLocStart(), CopyAssign);
+      ExceptSpec.CalledDecl(Base.getLocStart(), CopyAssign);
   }
 
-  for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
-                                  FieldEnd = ClassDecl->field_end();
-       Field != FieldEnd;
-       ++Field) {
+  for (const auto *Field : ClassDecl->fields()) {
     QualType FieldType = Context.getBaseElementType(Field->getType());
     if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
       if (CXXMethodDecl *CopyAssign =
@@ -9263,24 +9322,22 @@
     UserDeclaredOperation = RD->getDestructor();
   } else if (!isa<CXXConstructorDecl>(CopyOp) &&
              RD->hasUserDeclaredCopyConstructor() &&
-             !S.getLangOpts().MicrosoftMode) {
+             !S.getLangOpts().MSVCCompat) {
     // Find any user-declared copy constructor.
-    for (CXXRecordDecl::ctor_iterator I = RD->ctor_begin(),
-                                      E = RD->ctor_end(); I != E; ++I) {
+    for (auto *I : RD->ctors()) {
       if (I->isCopyConstructor()) {
-        UserDeclaredOperation = *I;
+        UserDeclaredOperation = I;
         break;
       }
     }
     assert(UserDeclaredOperation);
   } else if (isa<CXXConstructorDecl>(CopyOp) &&
              RD->hasUserDeclaredCopyAssignment() &&
-             !S.getLangOpts().MicrosoftMode) {
+             !S.getLangOpts().MSVCCompat) {
     // Find any user-declared move assignment operator.
-    for (CXXRecordDecl::method_iterator I = RD->method_begin(),
-                                        E = RD->method_end(); I != E; ++I) {
+    for (auto *I : RD->methods()) {
       if (I->isCopyAssignmentOperator()) {
-        UserDeclaredOperation = *I;
+        UserDeclaredOperation = I;
         break;
       }
     }
@@ -9359,18 +9416,17 @@
   
   // Assign base classes.
   bool Invalid = false;
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
-       E = ClassDecl->bases_end(); Base != E; ++Base) {
+  for (auto &Base : ClassDecl->bases()) {
     // Form the assignment:
     //   static_cast<Base*>(this)->Base::operator=(static_cast<Base&>(other));
-    QualType BaseType = Base->getType().getUnqualifiedType();
+    QualType BaseType = Base.getType().getUnqualifiedType();
     if (!BaseType->isRecordType()) {
       Invalid = true;
       continue;
     }
 
     CXXCastPath BasePath;
-    BasePath.push_back(Base);
+    BasePath.push_back(&Base);
 
     // Construct the "from" expression, which is an implicit cast to the
     // appropriately-qualified base type.
@@ -9401,9 +9457,7 @@
   }
   
   // Assign non-static members.
-  for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
-                                  FieldEnd = ClassDecl->field_end(); 
-       Field != FieldEnd; ++Field) {
+  for (auto *Field : ClassDecl->fields()) {
     if (Field->isUnnamedBitfield())
       continue;
 
@@ -9450,7 +9504,7 @@
     CXXScopeSpec SS; // Intentionally empty
     LookupResult MemberLookup(*this, Field->getDeclName(), Loc,
                               LookupMemberName);
-    MemberLookup.addDecl(*Field);
+    MemberLookup.addDecl(Field);
     MemberLookup.resolveKind();
 
     MemberBuilder From(OtherRef, OtherRefType, /*IsArrow=*/false, MemberLookup);
@@ -9530,33 +9584,26 @@
   // actually happen.
   // Note that a move constructor is not implicitly declared when there are
   // virtual bases, but it can still be user-declared and explicitly defaulted.
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
-                                       BaseEnd = ClassDecl->bases_end();
-       Base != BaseEnd; ++Base) {
-    if (Base->isVirtual())
+  for (const auto &Base : ClassDecl->bases()) {
+    if (Base.isVirtual())
       continue;
 
     CXXRecordDecl *BaseClassDecl
-      = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
     if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl,
                                                            0, false, 0))
-      ExceptSpec.CalledDecl(Base->getLocStart(), MoveAssign);
+      ExceptSpec.CalledDecl(Base.getLocStart(), MoveAssign);
   }
 
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
-                                       BaseEnd = ClassDecl->vbases_end();
-       Base != BaseEnd; ++Base) {
+  for (const auto &Base : ClassDecl->vbases()) {
     CXXRecordDecl *BaseClassDecl
-      = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
     if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl,
                                                            0, false, 0))
-      ExceptSpec.CalledDecl(Base->getLocStart(), MoveAssign);
+      ExceptSpec.CalledDecl(Base.getLocStart(), MoveAssign);
   }
 
-  for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
-                                  FieldEnd = ClassDecl->field_end();
-       Field != FieldEnd;
-       ++Field) {
+  for (const auto *Field : ClassDecl->fields()) {
     QualType FieldType = Context.getBaseElementType(Field->getType());
     if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
       if (CXXMethodDecl *MoveAssign =
@@ -9654,10 +9701,8 @@
   typedef llvm::DenseMap<CXXRecordDecl*, CXXBaseSpecifier*> VBaseMap;
   VBaseMap VBases;
 
-  for (CXXRecordDecl::base_class_iterator BI = Class->bases_begin(),
-                                          BE = Class->bases_end();
-       BI != BE; ++BI) {
-    Worklist.push_back(&*BI);
+  for (auto &BI : Class->bases()) {
+    Worklist.push_back(&BI);
     while (!Worklist.empty()) {
       CXXBaseSpecifier *BaseSpec = Worklist.pop_back_val();
       CXXRecordDecl *Base = BaseSpec->getType()->getAsCXXRecordDecl();
@@ -9689,19 +9734,19 @@
         // only happens in one base, we'll diagnose it when synthesizing
         // that base class's move assignment operator.)
         CXXBaseSpecifier *&Existing =
-            VBases.insert(std::make_pair(Base->getCanonicalDecl(), BI))
+            VBases.insert(std::make_pair(Base->getCanonicalDecl(), &BI))
                 .first->second;
-        if (Existing && Existing != BI) {
+        if (Existing && Existing != &BI) {
           S.Diag(CurrentLocation, diag::warn_vbase_moved_multiple_times)
             << Class << Base;
           S.Diag(Existing->getLocStart(), diag::note_vbase_moved_here)
             << (Base->getCanonicalDecl() ==
                 Existing->getType()->getAsCXXRecordDecl()->getCanonicalDecl())
             << Base << Existing->getType() << Existing->getSourceRange();
-          S.Diag(BI->getLocStart(), diag::note_vbase_moved_here)
+          S.Diag(BI.getLocStart(), diag::note_vbase_moved_here)
             << (Base->getCanonicalDecl() ==
-                BI->getType()->getAsCXXRecordDecl()->getCanonicalDecl())
-            << Base << BI->getType() << BaseSpec->getSourceRange();
+                BI.getType()->getAsCXXRecordDecl()->getCanonicalDecl())
+            << Base << BI.getType() << BaseSpec->getSourceRange();
 
           // Only diagnose each vbase once.
           Existing = 0;
@@ -9714,10 +9759,8 @@
           continue;
 
         // We're going to move the base classes of Base. Add them to the list.
-        for (CXXRecordDecl::base_class_iterator BI = Base->bases_begin(),
-                                                BE = Base->bases_end();
-             BI != BE; ++BI)
-          Worklist.push_back(&*BI);
+        for (auto &BI : Base->bases())
+          Worklist.push_back(&BI);
       }
     }
   }
@@ -9779,8 +9822,7 @@
 
   // Assign base classes.
   bool Invalid = false;
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
-       E = ClassDecl->bases_end(); Base != E; ++Base) {
+  for (auto &Base : ClassDecl->bases()) {
     // C++11 [class.copy]p28:
     //   It is unspecified whether subobjects representing virtual base classes
     //   are assigned more than once by the implicitly-defined copy assignment
@@ -9791,14 +9833,14 @@
 
     // Form the assignment:
     //   static_cast<Base*>(this)->Base::operator=(static_cast<Base&&>(other));
-    QualType BaseType = Base->getType().getUnqualifiedType();
+    QualType BaseType = Base.getType().getUnqualifiedType();
     if (!BaseType->isRecordType()) {
       Invalid = true;
       continue;
     }
 
     CXXCastPath BasePath;
-    BasePath.push_back(Base);
+    BasePath.push_back(&Base);
 
     // Construct the "from" expression, which is an implicit cast to the
     // appropriately-qualified base type.
@@ -9830,9 +9872,7 @@
   }
 
   // Assign non-static members.
-  for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
-                                  FieldEnd = ClassDecl->field_end(); 
-       Field != FieldEnd; ++Field) {
+  for (auto *Field : ClassDecl->fields()) {
     if (Field->isUnnamedBitfield())
       continue;
 
@@ -9878,7 +9918,7 @@
     // Build references to the field in the object we're copying from and to.
     LookupResult MemberLookup(*this, Field->getDeclName(), Loc,
                               LookupMemberName);
-    MemberLookup.addDecl(*Field);
+    MemberLookup.addDecl(Field);
     MemberLookup.resolveKind();
     MemberBuilder From(MoveOther, OtherRefType,
                        /*IsArrow=*/false, MemberLookup);
@@ -9951,40 +9991,31 @@
     return ExceptSpec;
 
   const FunctionProtoType *T = MD->getType()->castAs<FunctionProtoType>();
-  assert(T->getNumArgs() >= 1 && "not a copy ctor");
-  unsigned Quals = T->getArgType(0).getNonReferenceType().getCVRQualifiers();
+  assert(T->getNumParams() >= 1 && "not a copy ctor");
+  unsigned Quals = T->getParamType(0).getNonReferenceType().getCVRQualifiers();
 
   // C++ [except.spec]p14:
   //   An implicitly declared special member function (Clause 12) shall have an 
   //   exception-specification. [...]
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
-                                       BaseEnd = ClassDecl->bases_end();
-       Base != BaseEnd; 
-       ++Base) {
+  for (const auto &Base : ClassDecl->bases()) {
     // Virtual bases are handled below.
-    if (Base->isVirtual())
+    if (Base.isVirtual())
       continue;
     
     CXXRecordDecl *BaseClassDecl
-      = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
     if (CXXConstructorDecl *CopyConstructor =
           LookupCopyingConstructor(BaseClassDecl, Quals))
-      ExceptSpec.CalledDecl(Base->getLocStart(), CopyConstructor);
+      ExceptSpec.CalledDecl(Base.getLocStart(), CopyConstructor);
   }
-  for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
-                                       BaseEnd = ClassDecl->vbases_end();
-       Base != BaseEnd; 
-       ++Base) {
+  for (const auto &Base : ClassDecl->vbases()) {
     CXXRecordDecl *BaseClassDecl
-      = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
     if (CXXConstructorDecl *CopyConstructor =
           LookupCopyingConstructor(BaseClassDecl, Quals))
-      ExceptSpec.CalledDecl(Base->getLocStart(), CopyConstructor);
+      ExceptSpec.CalledDecl(Base.getLocStart(), CopyConstructor);
   }
-  for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
-                                  FieldEnd = ClassDecl->field_end();
-       Field != FieldEnd;
-       ++Field) {
+  for (const auto *Field : ClassDecl->fields()) {
     QualType FieldType = Context.getBaseElementType(Field->getType());
     if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
       if (CXXConstructorDecl *CopyConstructor =
@@ -10117,42 +10148,36 @@
     return ExceptSpec;
 
   // Direct base-class constructors.
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
-                                       BEnd = ClassDecl->bases_end();
-       B != BEnd; ++B) {
-    if (B->isVirtual()) // Handled below.
+  for (const auto &B : ClassDecl->bases()) {
+    if (B.isVirtual()) // Handled below.
       continue;
     
-    if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+    if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
       CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
       CXXConstructorDecl *Constructor =
           LookupMovingConstructor(BaseClassDecl, 0);
       // If this is a deleted function, add it anyway. This might be conformant
       // with the standard. This might not. I'm not sure. It might not matter.
       if (Constructor)
-        ExceptSpec.CalledDecl(B->getLocStart(), Constructor);
+        ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
     }
   }
 
   // Virtual base-class constructors.
-  for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
-                                       BEnd = ClassDecl->vbases_end();
-       B != BEnd; ++B) {
-    if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+  for (const auto &B : ClassDecl->vbases()) {
+    if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
       CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
       CXXConstructorDecl *Constructor =
           LookupMovingConstructor(BaseClassDecl, 0);
       // If this is a deleted function, add it anyway. This might be conformant
       // with the standard. This might not. I'm not sure. It might not matter.
       if (Constructor)
-        ExceptSpec.CalledDecl(B->getLocStart(), Constructor);
+        ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
     }
   }
 
   // Field constructors.
-  for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
-                               FEnd = ClassDecl->field_end();
-       F != FEnd; ++F) {
+  for (const auto *F : ClassDecl->fields()) {
     QualType FieldType = Context.getBaseElementType(F->getType());
     if (CXXRecordDecl *FieldRecDecl = FieldType->getAsCXXRecordDecl()) {
       CXXConstructorDecl *Constructor =
@@ -10309,7 +10334,7 @@
   SynthesizedFunctionScope Scope(*this, Conv);
   DiagnosticErrorTrap Trap(Diags);
    
-  // Retreive the static invoker...
+  // Retrieve the static invoker...
   CXXMethodDecl *Invoker = Lambda->getLambdaStaticInvoker();
   // ... and get the corresponding specialization for a generic lambda.
   if (Lambda->isGenericLambda()) {
@@ -10495,6 +10520,7 @@
                         << VD->getType());
   DiagnoseUseOfDecl(Destructor, VD->getLocation());
 
+  if (Destructor->isTrivial()) return;
   if (!VD->hasGlobalStorage()) return;
 
   // Emit warning for non-trivial dtor in global scope (a real global,
@@ -10525,11 +10551,11 @@
   const FunctionProtoType *Proto 
     = Constructor->getType()->getAs<FunctionProtoType>();
   assert(Proto && "Constructor without a prototype?");
-  unsigned NumArgsInProto = Proto->getNumArgs();
-  
+  unsigned NumParams = Proto->getNumParams();
+
   // If too few arguments are available, we'll fill in the rest with defaults.
-  if (NumArgs < NumArgsInProto)
-    ConvertedArgs.reserve(NumArgsInProto);
+  if (NumArgs < NumParams)
+    ConvertedArgs.reserve(NumParams);
   else
     ConvertedArgs.reserve(NumArgs);
 
@@ -10580,8 +10606,8 @@
                             CanQualType ExpectedFirstParamType,
                             unsigned DependentParamTypeDiag,
                             unsigned InvalidParamTypeDiag) {
-  QualType ResultType = 
-    FnDecl->getType()->getAs<FunctionType>()->getResultType();
+  QualType ResultType =
+      FnDecl->getType()->getAs<FunctionType>()->getReturnType();
 
   // Check that the result type is not dependent.
   if (ResultType->isDependentType())
@@ -10706,10 +10732,8 @@
                   diag::err_operator_overload_static) << FnDecl->getDeclName();
   } else {
     bool ClassOrEnumParam = false;
-    for (FunctionDecl::param_iterator Param = FnDecl->param_begin(),
-                                   ParamEnd = FnDecl->param_end();
-         Param != ParamEnd; ++Param) {
-      QualType ParamType = (*Param)->getType().getNonReferenceType();
+    for (auto Param : FnDecl->params()) {
+      QualType ParamType = Param->getType().getNonReferenceType();
       if (ParamType->isDependentType() || ParamType->isRecordType() ||
           ParamType->isEnumeralType()) {
         ClassOrEnumParam = true;
@@ -10730,12 +10754,11 @@
   // Only the function-call operator allows default arguments
   // (C++ [over.call]p1).
   if (Op != OO_Call) {
-    for (FunctionDecl::param_iterator Param = FnDecl->param_begin();
-         Param != FnDecl->param_end(); ++Param) {
-      if ((*Param)->hasDefaultArg())
-        return Diag((*Param)->getLocation(),
+    for (auto Param : FnDecl->params()) {
+      if (Param->hasDefaultArg())
+        return Diag(Param->getLocation(),
                     diag::err_operator_overload_default_arg)
-          << FnDecl->getDeclName() << (*Param)->getDefaultArgRange();
+          << FnDecl->getDeclName() << Param->getDefaultArgRange();
     }
   }
 
@@ -10802,11 +10825,10 @@
   //   increment operator ++ for objects of that type.
   if ((Op == OO_PlusPlus || Op == OO_MinusMinus) && NumParams == 2) {
     ParmVarDecl *LastParam = FnDecl->getParamDecl(FnDecl->getNumParams() - 1);
-    bool ParamIsInt = false;
-    if (const BuiltinType *BT = LastParam->getType()->getAs<BuiltinType>())
-      ParamIsInt = BT->getKind() == BuiltinType::Int;
+    QualType ParamType = LastParam->getType();
 
-    if (!ParamIsInt)
+    if (!ParamType->isSpecificBuiltinType(BuiltinType::Int) &&
+        !ParamType->isDependentType())
       return Diag(LastParam->getLocation(),
                   diag::err_operator_overload_post_incdec_must_be_int)
         << LastParam->getType() << (Op == OO_MinusMinus);
@@ -10939,13 +10961,11 @@
 
   // A parameter-declaration-clause containing a default argument is not
   // equivalent to any of the permitted forms.
-  for (FunctionDecl::param_iterator Param = FnDecl->param_begin(),
-                                    ParamEnd = FnDecl->param_end();
-       Param != ParamEnd; ++Param) {
-    if ((*Param)->hasDefaultArg()) {
-      Diag((*Param)->getDefaultArgRange().getBegin(),
+  for (auto Param : FnDecl->params()) {
+    if (Param->hasDefaultArg()) {
+      Diag(Param->getDefaultArgRange().getBegin(),
            diag::err_literal_operator_default_argument)
-        << (*Param)->getDefaultArgRange();
+        << Param->getDefaultArgRange();
       break;
     }
   }
@@ -10965,29 +10985,36 @@
 
 /// ActOnStartLinkageSpecification - Parsed the beginning of a C++
 /// linkage specification, including the language and (if present)
-/// the '{'. ExternLoc is the location of the 'extern', LangLoc is
-/// the location of the language string literal, which is provided
-/// by Lang/StrSize. LBraceLoc, if valid, provides the location of
+/// the '{'. ExternLoc is the location of the 'extern', Lang is the
+/// language string literal. LBraceLoc, if valid, provides the location of
 /// the '{' brace. Otherwise, this linkage specification does not
 /// have any braces.
 Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
-                                           SourceLocation LangLoc,
-                                           StringRef Lang,
+                                           Expr *LangStr,
                                            SourceLocation LBraceLoc) {
+  StringLiteral *Lit = cast<StringLiteral>(LangStr);
+  if (!Lit->isAscii()) {
+    Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_not_ascii)
+      << LangStr->getSourceRange();
+    return 0;
+  }
+
+  StringRef Lang = Lit->getString();
   LinkageSpecDecl::LanguageIDs Language;
-  if (Lang == "\"C\"")
+  if (Lang == "C")
     Language = LinkageSpecDecl::lang_c;
-  else if (Lang == "\"C++\"")
+  else if (Lang == "C++")
     Language = LinkageSpecDecl::lang_cxx;
   else {
-    Diag(LangLoc, diag::err_bad_language);
+    Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_unknown)
+      << LangStr->getSourceRange();
     return 0;
   }
 
   // FIXME: Add all the various semantics of linkage specifications
 
-  LinkageSpecDecl *D = LinkageSpecDecl::Create(Context, CurContext,
-                                               ExternLoc, LangLoc, Language,
+  LinkageSpecDecl *D = LinkageSpecDecl::Create(Context, CurContext, ExternLoc,
+                                               LangStr->getExprLoc(), Language,
                                                LBraceLoc.isValid());
   CurContext->addDecl(D);
   PushDeclContext(S, D);
@@ -11001,13 +11028,11 @@
 Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
                                             Decl *LinkageSpec,
                                             SourceLocation RBraceLoc) {
-  if (LinkageSpec) {
-    if (RBraceLoc.isValid()) {
-      LinkageSpecDecl* LSDecl = cast<LinkageSpecDecl>(LinkageSpec);
-      LSDecl->setRBraceLoc(RBraceLoc);
-    }
-    PopDeclContext();
+  if (RBraceLoc.isValid()) {
+    LinkageSpecDecl* LSDecl = cast<LinkageSpecDecl>(LinkageSpec);
+    LSDecl->setRBraceLoc(RBraceLoc);
   }
+  PopDeclContext();
   return LinkageSpec;
 }
 
@@ -11374,14 +11399,15 @@
       bool Owned = false;
       bool IsDependent = false;
       return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc,
-                      Attr, AS_public, 
+                      Attr, AS_public,
                       /*ModulePrivateLoc=*/SourceLocation(),
-                      MultiTemplateParamsArg(), Owned, IsDependent, 
+                      MultiTemplateParamsArg(), Owned, IsDependent,
                       /*ScopedEnumKWLoc=*/SourceLocation(),
                       /*ScopedEnumUsesClassTag=*/false,
-                      /*UnderlyingType=*/TypeResult());          
+                      /*UnderlyingType=*/TypeResult(),
+                      /*IsTypeSpecifier=*/false);
     }
-    
+
     NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
     ElaboratedTypeKeyword Keyword
       = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
@@ -11841,10 +11867,13 @@
   if (const FunctionDecl *Prev = Fn->getPreviousDecl()) {
     // Don't consider the implicit declaration we generate for explicit
     // specializations. FIXME: Do not generate these implicit declarations.
-    if ((Prev->getTemplateSpecializationKind() != TSK_ExplicitSpecialization
-        || Prev->getPreviousDecl()) && !Prev->isDefined()) {
+    if ((Prev->getTemplateSpecializationKind() != TSK_ExplicitSpecialization ||
+         Prev->getPreviousDecl()) &&
+        !Prev->isDefined()) {
       Diag(DelLoc, diag::err_deleted_decl_not_first);
-      Diag(Prev->getLocation(), diag::note_previous_declaration);
+      Diag(Prev->getLocation().isInvalid() ? DelLoc : Prev->getLocation(),
+           Prev->isImplicit() ? diag::note_previous_implicit_declaration
+                              : diag::note_previous_declaration);
     }
     // If the declaration wasn't the first, we delete the function anyway for
     // recovery.
@@ -11871,6 +11900,11 @@
     }
   }
 
+  // C++11 [basic.start.main]p3:
+  //   A program that defines main as deleted [...] is ill-formed.
+  if (Fn->isMain())
+    Diag(DelLoc, diag::err_deleted_main);
+
   Fn->setDeletedAsWritten();
 }
 
@@ -11977,6 +12011,13 @@
   if (NewCC == OldCC)
     return false;
 
+  // If the calling conventions mismatch because the new function is static,
+  // suppress the calling convention mismatch error; the error about static
+  // function override (err_static_overrides_virtual from
+  // Sema::CheckFunctionDeclaration) is more clear.
+  if (New->getStorageClass() == SC_Static)
+    return false;
+
   Diag(New->getLocation(),
        diag::err_conflicting_overriding_cc_attributes)
     << New->getDeclName() << New->getType() << Old->getType();
@@ -11986,8 +12027,8 @@
 
 bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
                                              const CXXMethodDecl *Old) {
-  QualType NewTy = New->getType()->getAs<FunctionType>()->getResultType();
-  QualType OldTy = Old->getType()->getAs<FunctionType>()->getResultType();
+  QualType NewTy = New->getType()->getAs<FunctionType>()->getReturnType();
+  QualType OldTy = Old->getType()->getAs<FunctionType>()->getReturnType();
 
   if (Context.hasSameType(NewTy, OldTy) ||
       NewTy->isDependentType() || OldTy->isDependentType())
@@ -12120,11 +12161,13 @@
   // If there is no declaration, there was an error parsing it.
   if (D == 0 || D->isInvalidDecl()) return;
 
-  // We should only get called for declarations with scope specifiers, like:
-  //   int foo::bar;
-  assert(D->isOutOfLine());
-  EnterDeclaratorContext(S, D->getDeclContext());
-  
+  // We will always have a nested name specifier here, but this declaration
+  // might not be out of line if the specifier names the current namespace:
+  //   extern int n;
+  //   int ::n = 0;
+  if (D->isOutOfLine())
+    EnterDeclaratorContext(S, D->getDeclContext());
+
   // If we are parsing the initializer for a static data member, push a
   // new expression evaluation context that is associated with this static
   // data member.
@@ -12139,10 +12182,10 @@
   if (D == 0 || D->isInvalidDecl()) return;
 
   if (isStaticDataMember(D))
-    PopExpressionEvaluationContext();  
+    PopExpressionEvaluationContext();
 
-  assert(D->isOutOfLine());
-  ExitDeclaratorContext(S);
+  if (D->isOutOfLine())
+    ExitDeclaratorContext(S);
 }
 
 /// ActOnCXXConditionDeclarationExpr - Parsed a condition declaration of a
@@ -12216,6 +12259,18 @@
       // Otherwise, we can early exit.
       return;
     }
+  } else {
+    // The Microsoft ABI requires that we perform the destructor body
+    // checks (i.e. operator delete() lookup) when the vtable is marked used, as
+    // the deleting destructor is emitted with the vtable, not with the
+    // destructor definition as in the Itanium ABI.
+    // If it has a definition, we do the check at that point instead.
+    if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+        Class->hasUserDeclaredDestructor() &&
+        !Class->getDestructor()->isDefined() &&
+        !Class->getDestructor()->isDeleted()) {
+      CheckDestructor(Class->getDestructor());
+    }
   }
 
   // Local classes need to have their virtual members marked
@@ -12267,11 +12322,9 @@
       bool IsExplicitInstantiationDeclaration
         = Class->getTemplateSpecializationKind()
                                       == TSK_ExplicitInstantiationDeclaration;
-      for (TagDecl::redecl_iterator R = Class->redecls_begin(),
-                                 REnd = Class->redecls_end();
-           R != REnd; ++R) {
+      for (auto R : Class->redecls()) {
         TemplateSpecializationKind TSK
-          = cast<CXXRecordDecl>(*R)->getTemplateSpecializationKind();
+          = cast<CXXRecordDecl>(R)->getTemplateSpecializationKind();
         if (TSK == TSK_ExplicitInstantiationDeclaration)
           IsExplicitInstantiationDeclaration = true;
         else if (TSK == TSK_ExplicitInstantiationDefinition) {
@@ -12320,10 +12373,9 @@
 
 void Sema::MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
                                                  const CXXRecordDecl *RD) {
-  for (CXXRecordDecl::method_iterator I = RD->method_begin(),
-                                      E = RD->method_end(); I != E; ++I)
-    if ((*I)->isVirtual() && !(*I)->isPure())
-      ResolveExceptionSpec(Loc, (*I)->getType()->castAs<FunctionProtoType>());
+  for (const auto *I : RD->methods())
+    if (I->isVirtual() && !I->isPure())
+      ResolveExceptionSpec(Loc, I->getType()->castAs<FunctionProtoType>());
 }
 
 void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
@@ -12351,10 +12403,9 @@
   if (RD->getNumVBases() == 0)
     return;
 
-  for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
-           e = RD->bases_end(); i != e; ++i) {
+  for (const auto &I : RD->bases()) {
     const CXXRecordDecl *Base =
-        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+        cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
     if (Base->getNumVBases() == 0)
       continue;
     MarkVirtualMembersReferenced(Loc, Base);
@@ -12534,7 +12585,7 @@
   
   // If the return type came after the cv-qualifier-seq, check it now.
   if (Proto->hasTrailingReturn() &&
-      !Finder.TraverseTypeLoc(ProtoTL.getResultLoc()))
+      !Finder.TraverseTypeLoc(ProtoTL.getReturnLoc()))
     return true;
 
   // Check the exception specification.
@@ -12571,10 +12622,8 @@
       return true;
     
   case EST_Dynamic:
-    for (FunctionProtoType::exception_iterator E = Proto->exception_begin(),
-         EEnd = Proto->exception_end();
-         E != EEnd; ++E) {
-      if (!Finder.TraverseType(*E))
+    for (const auto &E : Proto->exceptions()) {
+      if (!Finder.TraverseType(E))
         return true;
     }
     break;
@@ -12587,45 +12636,36 @@
   FindCXXThisExpr Finder(*this);
 
   // Check attributes.
-  for (Decl::attr_iterator A = Method->attr_begin(), AEnd = Method->attr_end();
-       A != AEnd; ++A) {
+  for (const auto *A : Method->attrs()) {
     // FIXME: This should be emitted by tblgen.
     Expr *Arg = 0;
     ArrayRef<Expr *> Args;
-    if (GuardedByAttr *G = dyn_cast<GuardedByAttr>(*A))
+    if (const auto *G = dyn_cast<GuardedByAttr>(A))
       Arg = G->getArg();
-    else if (PtGuardedByAttr *G = dyn_cast<PtGuardedByAttr>(*A))
+    else if (const auto *G = dyn_cast<PtGuardedByAttr>(A))
       Arg = G->getArg();
-    else if (AcquiredAfterAttr *AA = dyn_cast<AcquiredAfterAttr>(*A))
+    else if (const auto *AA = dyn_cast<AcquiredAfterAttr>(A))
       Args = ArrayRef<Expr *>(AA->args_begin(), AA->args_size());
-    else if (AcquiredBeforeAttr *AB = dyn_cast<AcquiredBeforeAttr>(*A))
+    else if (const auto *AB = dyn_cast<AcquiredBeforeAttr>(A))
       Args = ArrayRef<Expr *>(AB->args_begin(), AB->args_size());
-    else if (ExclusiveLockFunctionAttr *ELF 
-               = dyn_cast<ExclusiveLockFunctionAttr>(*A))
-      Args = ArrayRef<Expr *>(ELF->args_begin(), ELF->args_size());
-    else if (SharedLockFunctionAttr *SLF 
-               = dyn_cast<SharedLockFunctionAttr>(*A))
-      Args = ArrayRef<Expr *>(SLF->args_begin(), SLF->args_size());
-    else if (ExclusiveTrylockFunctionAttr *ETLF
-               = dyn_cast<ExclusiveTrylockFunctionAttr>(*A)) {
+    else if (const auto *ETLF = dyn_cast<ExclusiveTrylockFunctionAttr>(A)) {
       Arg = ETLF->getSuccessValue();
       Args = ArrayRef<Expr *>(ETLF->args_begin(), ETLF->args_size());
-    } else if (SharedTrylockFunctionAttr *STLF
-                 = dyn_cast<SharedTrylockFunctionAttr>(*A)) {
+    } else if (const auto *STLF = dyn_cast<SharedTrylockFunctionAttr>(A)) {
       Arg = STLF->getSuccessValue();
       Args = ArrayRef<Expr *>(STLF->args_begin(), STLF->args_size());
-    } else if (UnlockFunctionAttr *UF = dyn_cast<UnlockFunctionAttr>(*A))
-      Args = ArrayRef<Expr *>(UF->args_begin(), UF->args_size());
-    else if (LockReturnedAttr *LR = dyn_cast<LockReturnedAttr>(*A))
+    } else if (const auto *LR = dyn_cast<LockReturnedAttr>(A))
       Arg = LR->getArg();
-    else if (LocksExcludedAttr *LE = dyn_cast<LocksExcludedAttr>(*A))
+    else if (const auto *LE = dyn_cast<LocksExcludedAttr>(A))
       Args = ArrayRef<Expr *>(LE->args_begin(), LE->args_size());
-    else if (ExclusiveLocksRequiredAttr *ELR 
-               = dyn_cast<ExclusiveLocksRequiredAttr>(*A))
-      Args = ArrayRef<Expr *>(ELR->args_begin(), ELR->args_size());
-    else if (SharedLocksRequiredAttr *SLR 
-               = dyn_cast<SharedLocksRequiredAttr>(*A))
-      Args = ArrayRef<Expr *>(SLR->args_begin(), SLR->args_size());
+    else if (const auto *RC = dyn_cast<RequiresCapabilityAttr>(A))
+      Args = ArrayRef<Expr *>(RC->args_begin(), RC->args_size());
+    else if (const auto *AC = dyn_cast<AcquireCapabilityAttr>(A))
+      Args = ArrayRef<Expr *>(AC->args_begin(), AC->args_size());
+    else if (const auto *AC = dyn_cast<TryAcquireCapabilityAttr>(A))
+      Args = ArrayRef<Expr *>(AC->args_begin(), AC->args_size());
+    else if (const auto *RC = dyn_cast<ReleaseCapabilityAttr>(A))
+      Args = ArrayRef<Expr *>(RC->args_begin(), RC->args_size());
 
     if (Arg && !Finder.TraverseStmt(Arg))
       return true;
@@ -12801,11 +12841,9 @@
     PrevDecl = 0;
 
   SourceLocation TSSL = D.getLocStart();
-  MSPropertyDecl *NewPD;
   const AttributeList::PropertyData &Data = MSPropertyAttr->getPropertyData();
-  NewPD = new (Context) MSPropertyDecl(Record, Loc,
-                                       II, T, TInfo, TSSL,
-                                       Data.GetterId, Data.SetterId);
+  MSPropertyDecl *NewPD = MSPropertyDecl::Create(
+      Context, Record, Loc, II, T, TInfo, TSSL, Data.GetterId, Data.SetterId);
   ProcessDeclAttributes(TUScope, NewPD, D);
   NewPD->setAccess(AS);
 
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index f44fb32..d59dd8b 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -15,6 +15,7 @@
 #include "clang/AST/ASTConsumer.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DataRecursiveASTVisitor.h"
 #include "clang/AST/DeclObjC.h"
 #include "clang/AST/Expr.h"
 #include "clang/AST/ExprObjC.h"
@@ -48,8 +49,8 @@
 
   // We ignore protocols here.  Should we?  What about Class?
 
-  const ObjCObjectType *result = method->getResultType()
-    ->castAs<ObjCObjectPointerType>()->getObjectType();
+  const ObjCObjectType *result =
+      method->getReturnType()->castAs<ObjCObjectPointerType>()->getObjectType();
 
   if (result->isObjCId()) {
     return false;
@@ -97,8 +98,9 @@
   // If we're in a system header, and this is not a call, just make
   // the method unusable.
   if (receiverTypeIfCall.isNull() && getSourceManager().isInSystemHeader(loc)) {
-    method->addAttr(new (Context) UnavailableAttr(loc, Context,
-                "init method returns a type unrelated to its receiver type"));
+    method->addAttr(UnavailableAttr::CreateImplicit(Context,
+                "init method returns a type unrelated to its receiver type",
+                loc));
     return true;
   }
 
@@ -116,10 +118,10 @@
     // implies a related result type, and the original (overridden) method has
     // a suitable return type, but the new (overriding) method does not have
     // a suitable return type.
-    QualType ResultType = NewMethod->getResultType();
+    QualType ResultType = NewMethod->getReturnType();
     SourceRange ResultTypeRange;
-    if (const TypeSourceInfo *ResultTypeInfo 
-                                        = NewMethod->getResultTypeSourceInfo())
+    if (const TypeSourceInfo *ResultTypeInfo =
+            NewMethod->getReturnTypeSourceInfo())
       ResultTypeRange = ResultTypeInfo->getTypeLoc().getSourceRange();
     
     // Figure out which class this method is part of, if any.
@@ -207,19 +209,19 @@
     return false;
 
   case OMF_dealloc:
-    if (!Context.hasSameType(method->getResultType(), Context.VoidTy)) {
+    if (!Context.hasSameType(method->getReturnType(), Context.VoidTy)) {
       SourceRange ResultTypeRange;
-      if (const TypeSourceInfo *ResultTypeInfo
-          = method->getResultTypeSourceInfo())
+      if (const TypeSourceInfo *ResultTypeInfo =
+              method->getReturnTypeSourceInfo())
         ResultTypeRange = ResultTypeInfo->getTypeLoc().getSourceRange();
       if (ResultTypeRange.isInvalid())
-        Diag(method->getLocation(), diag::error_dealloc_bad_result_type) 
-          << method->getResultType() 
-          << FixItHint::CreateInsertion(method->getSelectorLoc(0), "(void)");
+        Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
+            << method->getReturnType()
+            << FixItHint::CreateInsertion(method->getSelectorLoc(0), "(void)");
       else
-        Diag(method->getLocation(), diag::error_dealloc_bad_result_type) 
-          << method->getResultType() 
-          << FixItHint::CreateReplacement(ResultTypeRange, "void");
+        Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
+            << method->getReturnType()
+            << FixItHint::CreateReplacement(ResultTypeRange, "void");
       return true;
     }
     return false;
@@ -229,8 +231,7 @@
     if (checkInitMethod(method, QualType()))
       return true;
 
-    method->addAttr(new (Context) NSConsumesSelfAttr(SourceLocation(),
-                                                     Context));
+    method->addAttr(NSConsumesSelfAttr::CreateImplicit(Context));
 
     // Don't add a second copy of this attribute, but otherwise don't
     // let it be suppressed.
@@ -249,8 +250,7 @@
     break;
   }
 
-  method->addAttr(new (Context) NSReturnsRetainedAttr(SourceLocation(),
-                                                      Context));
+  method->addAttr(NSReturnsRetainedAttr::CreateImplicit(Context));
   return false;
 }
 
@@ -329,17 +329,15 @@
                            /*CheckParameterNames=*/false);
 
   // Introduce all of the other parameters into this scope.
-  for (ObjCMethodDecl::param_iterator PI = MDecl->param_begin(),
-       E = MDecl->param_end(); PI != E; ++PI) {
-    ParmVarDecl *Param = (*PI);
+  for (auto *Param : MDecl->params()) {
     if (!Param->isInvalidDecl() &&
         getLangOpts().ObjCAutoRefCount &&
         !HasExplicitOwnershipAttr(*this, Param))
       Diag(Param->getLocation(), diag::warn_arc_strong_pointer_objc_pointer) <<
             Param->getType();
     
-    if ((*PI)->getIdentifier())
-      PushOnScopeChains(*PI, FnBodyScope);
+    if (Param->getIdentifier())
+      PushOnScopeChains(Param, FnBodyScope);
   }
 
   // In ARC, disallow definition of retain/release/autorelease/retainCount
@@ -381,8 +379,13 @@
       ObjCImplDecl *ImplDeclOfMethodDecl = 0;
       if (ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(ContDeclOfMethodDecl))
         ImplDeclOfMethodDecl = OID->getImplementation();
-      else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContDeclOfMethodDecl))
-        ImplDeclOfMethodDecl = CD->getImplementation();
+      else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContDeclOfMethodDecl)) {
+        if (CD->IsClassExtension()) {
+          if (ObjCInterfaceDecl *OID = CD->getClassInterface())
+            ImplDeclOfMethodDecl = OID->getImplementation();
+        } else
+            ImplDeclOfMethodDecl = CD->getImplementation();
+      }
       // No need to issue deprecated warning if deprecated mehod in class/category
       // is being implemented in its own implementation (no overriding is involved).
       if (!ImplDeclOfMethodDecl || ImplDeclOfMethodDecl != ImplDeclOfMethodDef)
@@ -391,6 +394,17 @@
                                           MDecl->getLocation(), 0);
     }
 
+    if (MDecl->getMethodFamily() == OMF_init) {
+      if (MDecl->isDesignatedInitializerForTheInterface()) {
+        getCurFunction()->ObjCIsDesignatedInit = true;
+        getCurFunction()->ObjCWarnForNoDesignatedInitChain =
+            IC->getSuperClass() != 0;
+      } else if (IC->hasDesignatedInitializers()) {
+        getCurFunction()->ObjCIsSecondaryInit = true;
+        getCurFunction()->ObjCWarnForNoInitDelegation = true;
+      }
+    }
+
     // If this is "dealloc" or "finalize", set some bit here.
     // Then in ActOnSuperMessage() (SemaExprObjC), set it back to false.
     // Finally, in ActOnFinishFunctionBody() (SemaDecl), warn if flag is set.
@@ -428,7 +442,7 @@
   explicit ObjCInterfaceValidatorCCC(ObjCInterfaceDecl *IDecl)
       : CurrentIDecl(IDecl) {}
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     ObjCInterfaceDecl *ID = candidate.getCorrectionDeclAs<ObjCInterfaceDecl>();
     return ID && !declaresSameEntity(ID, CurrentIDecl);
   }
@@ -607,9 +621,8 @@
     QualType T = TDecl->getUnderlyingType();
     if (T->isObjCObjectType())
       if (const ObjCObjectType *OPT = T->getAs<ObjCObjectType>())
-        for (ObjCObjectType::qual_iterator I = OPT->qual_begin(),
-             E = OPT->qual_end(); I != E; ++I)
-          ProtocolRefs.push_back(*I);
+        for (auto *I : OPT->quals())
+          ProtocolRefs.push_back(I);
   }
 }
 
@@ -751,6 +764,21 @@
   return ActOnObjCContainerStartDefinition(PDecl);
 }
 
+static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl,
+                                          ObjCProtocolDecl *&UndefinedProtocol) {
+  if (!PDecl->hasDefinition() || PDecl->getDefinition()->isHidden()) {
+    UndefinedProtocol = PDecl;
+    return true;
+  }
+  
+  for (auto *PI : PDecl->protocols())
+    if (NestedProtocolHasNoDefinition(PI, UndefinedProtocol)) {
+      UndefinedProtocol = PI;
+      return true;
+    }
+  return false;
+}
+
 /// FindProtocolDeclaration - This routine looks up protocols and
 /// issues an error if they are not declared. It returns list of
 /// protocol declarations in its 'Protocols' argument.
@@ -786,10 +814,15 @@
     // If this is a forward declaration and we are supposed to warn in this
     // case, do it.
     // FIXME: Recover nicely in the hidden case.
+    ObjCProtocolDecl *UndefinedProtocol;
+    
     if (WarnOnDeclarations &&
-        (!PDecl->hasDefinition() || PDecl->getDefinition()->isHidden()))
+        NestedProtocolHasNoDefinition(PDecl, UndefinedProtocol)) {
       Diag(ProtocolId[i].second, diag::warn_undef_protocolref)
         << ProtocolId[i].first;
+      Diag(UndefinedProtocol->getLocation(), diag::note_protocol_decl_undefined)
+        << UndefinedProtocol;
+    }
     Protocols.push_back(PDecl);
   }
 }
@@ -803,19 +836,16 @@
     return;  // Possibly due to previous error
 
   llvm::DenseMap<Selector, const ObjCMethodDecl*> MethodMap;
-  for (ObjCInterfaceDecl::method_iterator i = ID->meth_begin(),
-       e =  ID->meth_end(); i != e; ++i) {
-    ObjCMethodDecl *MD = *i;
+  for (auto *MD : ID->methods())
     MethodMap[MD->getSelector()] = MD;
-  }
 
   if (MethodMap.empty())
     return;
-  for (ObjCCategoryDecl::method_iterator i = CAT->meth_begin(),
-       e =  CAT->meth_end(); i != e; ++i) {
-    ObjCMethodDecl *Method = *i;
+  for (const auto *Method : CAT->methods()) {
     const ObjCMethodDecl *&PrevMethod = MethodMap[Method->getSelector()];
-    if (PrevMethod && !MatchTwoMethodDeclarations(Method, PrevMethod)) {
+    if (PrevMethod &&
+        (PrevMethod->isInstanceMethod() == Method->isInstanceMethod()) &&
+        !MatchTwoMethodDeclarations(Method, PrevMethod)) {
       Diag(Method->getLocation(), diag::err_duplicate_method_decl)
             << Method->getDeclName();
       Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
@@ -1154,11 +1184,7 @@
         continue;
       }
       // Check class extensions (unnamed categories) for duplicate ivars.
-      for (ObjCInterfaceDecl::visible_extensions_iterator
-           Ext = IDecl->visible_extensions_begin(),
-           ExtEnd = IDecl->visible_extensions_end();
-         Ext != ExtEnd; ++Ext) {
-        ObjCCategoryDecl *CDecl = *Ext;
+      for (const auto *CDecl : IDecl->visible_extensions()) {
         if (const ObjCIvarDecl *ClsExtIvar = 
             CDecl->getIvarDecl(ImplIvar->getIdentifier())) {
           Diag(ImplIvar->getLocation(), diag::err_duplicate_ivar_declaration); 
@@ -1209,13 +1235,16 @@
   }
 
   if (numIvars > 0)
-    Diag(ivars[j]->getLocation(), diag::err_inconsistant_ivar_count);
+    Diag(ivars[j]->getLocation(), diag::err_inconsistent_ivar_count);
   else if (IVI != IVE)
-    Diag(IVI->getLocation(), diag::err_inconsistant_ivar_count);
+    Diag(IVI->getLocation(), diag::err_inconsistent_ivar_count);
 }
 
-void Sema::WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
-                               bool &IncompleteImpl, unsigned DiagID) {
+static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc,
+                                ObjCMethodDecl *method,
+                                bool &IncompleteImpl,
+                                unsigned DiagID,
+                                NamedDecl *NeededFor = 0) {
   // No point warning no definition of method which is 'unavailable'.
   switch (method->getAvailability()) {
   case AR_Available:
@@ -1233,13 +1262,17 @@
   // warning, but some users strongly voiced that they would prefer
   // separate warnings.  We will give that approach a try, as that
   // matches what we do with protocols.
-  
-  Diag(ImpLoc, DiagID) << method->getDeclName();
+  {
+    const Sema::SemaDiagnosticBuilder &B = S.Diag(ImpLoc, DiagID);
+    B << method;
+    if (NeededFor)
+      B << NeededFor;
+  }
 
   // Issue a note to the original declaration.
   SourceLocation MethodLoc = method->getLocStart();
   if (MethodLoc.isValid())
-    Diag(MethodLoc, diag::note_method_declared_at) << method;
+    S.Diag(MethodLoc, diag::note_method_declared_at) << method;
 }
 
 /// Determines if type B can be substituted for type A.  Returns true if we can
@@ -1323,21 +1356,21 @@
       (MethodDecl->getObjCDeclQualifier() !=
        MethodImpl->getObjCDeclQualifier())) {
     if (Warn) {
-        S.Diag(MethodImpl->getLocation(), 
-               (IsOverridingMode ? 
-                 diag::warn_conflicting_overriding_ret_type_modifiers 
-                 : diag::warn_conflicting_ret_type_modifiers))
+      S.Diag(MethodImpl->getLocation(),
+             (IsOverridingMode
+                  ? diag::warn_conflicting_overriding_ret_type_modifiers
+                  : diag::warn_conflicting_ret_type_modifiers))
           << MethodImpl->getDeclName()
-          << getTypeRange(MethodImpl->getResultTypeSourceInfo());
-        S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration)
-          << getTypeRange(MethodDecl->getResultTypeSourceInfo());
+          << getTypeRange(MethodImpl->getReturnTypeSourceInfo());
+      S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration)
+          << getTypeRange(MethodDecl->getReturnTypeSourceInfo());
     }
     else
       return false;
   }
-  
-  if (S.Context.hasSameUnqualifiedType(MethodImpl->getResultType(),
-                                       MethodDecl->getResultType()))
+
+  if (S.Context.hasSameUnqualifiedType(MethodImpl->getReturnType(),
+                                       MethodDecl->getReturnType()))
     return true;
   if (!Warn)
     return false;
@@ -1349,9 +1382,9 @@
   // Mismatches between ObjC pointers go into a different warning
   // category, and sometimes they're even completely whitelisted.
   if (const ObjCObjectPointerType *ImplPtrTy =
-        MethodImpl->getResultType()->getAs<ObjCObjectPointerType>()) {
+          MethodImpl->getReturnType()->getAs<ObjCObjectPointerType>()) {
     if (const ObjCObjectPointerType *IfacePtrTy =
-          MethodDecl->getResultType()->getAs<ObjCObjectPointerType>()) {
+            MethodDecl->getReturnType()->getAs<ObjCObjectPointerType>()) {
       // Allow non-matching return types as long as they don't violate
       // the principle of substitutability.  Specifically, we permit
       // return types that are subclasses of the declared return type,
@@ -1366,14 +1399,13 @@
   }
 
   S.Diag(MethodImpl->getLocation(), DiagID)
-    << MethodImpl->getDeclName()
-    << MethodDecl->getResultType()
-    << MethodImpl->getResultType()
-    << getTypeRange(MethodImpl->getResultTypeSourceInfo());
-  S.Diag(MethodDecl->getLocation(), 
-         IsOverridingMode ? diag::note_previous_declaration 
-                          : diag::note_previous_definition)
-    << getTypeRange(MethodDecl->getResultTypeSourceInfo());
+      << MethodImpl->getDeclName() << MethodDecl->getReturnType()
+      << MethodImpl->getReturnType()
+      << getTypeRange(MethodImpl->getReturnTypeSourceInfo());
+  S.Diag(MethodDecl->getLocation(), IsOverridingMode
+                                        ? diag::note_previous_declaration
+                                        : diag::note_previous_definition)
+      << getTypeRange(MethodDecl->getReturnTypeSourceInfo());
   return false;
 }
 
@@ -1505,7 +1537,7 @@
 
   // The only reason these methods don't fall within their families is
   // due to unusual result types.
-  if (unmatched->getResultType()->isObjCObjectPointerType()) {
+  if (unmatched->getReturnType()->isObjCObjectPointerType()) {
     reasonSelector = R_UnrelatedReturn;
   } else {
     reasonSelector = R_NonObjectReturn;
@@ -1615,14 +1647,41 @@
 /// we used an immutable set to keep the table then it wouldn't add significant
 /// memory cost and it would be handy for lookups.
 
+typedef llvm::DenseSet<IdentifierInfo*> ProtocolNameSet;
+typedef std::unique_ptr<ProtocolNameSet> LazyProtocolNameSet;
+
+static void findProtocolsWithExplicitImpls(const ObjCProtocolDecl *PDecl,
+                                           ProtocolNameSet &PNS) {
+  if (PDecl->hasAttr<ObjCExplicitProtocolImplAttr>())
+    PNS.insert(PDecl->getIdentifier());
+  for (const auto *PI : PDecl->protocols())
+    findProtocolsWithExplicitImpls(PI, PNS);
+}
+
+/// Recursively populates a set with all conformed protocols in a class
+/// hierarchy that have the 'objc_protocol_requires_explicit_implementation'
+/// attribute.
+static void findProtocolsWithExplicitImpls(const ObjCInterfaceDecl *Super,
+                                           ProtocolNameSet &PNS) {
+  if (!Super)
+    return;
+
+  for (const auto *I : Super->all_referenced_protocols())
+    findProtocolsWithExplicitImpls(I, PNS);
+
+  findProtocolsWithExplicitImpls(Super->getSuperClass(), PNS);
+}
+
 /// CheckProtocolMethodDefs - This routine checks unimplemented methods
 /// Declared in protocol, and those referenced by it.
-void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
-                                   ObjCProtocolDecl *PDecl,
-                                   bool& IncompleteImpl,
-                                   const SelectorSet &InsMap,
-                                   const SelectorSet &ClsMap,
-                                   ObjCContainerDecl *CDecl) {
+static void CheckProtocolMethodDefs(Sema &S,
+                                    SourceLocation ImpLoc,
+                                    ObjCProtocolDecl *PDecl,
+                                    bool& IncompleteImpl,
+                                    const Sema::SelectorSet &InsMap,
+                                    const Sema::SelectorSet &ClsMap,
+                                    ObjCContainerDecl *CDecl,
+                                    LazyProtocolNameSet &ProtocolsExplictImpl) {
   ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
   ObjCInterfaceDecl *IDecl = C ? C->getClassInterface() 
                                : dyn_cast<ObjCInterfaceDecl>(CDecl);
@@ -1630,7 +1689,33 @@
   
   ObjCInterfaceDecl *Super = IDecl->getSuperClass();
   ObjCInterfaceDecl *NSIDecl = 0;
-  if (getLangOpts().ObjCRuntime.isNeXTFamily()) {
+
+  // If this protocol is marked 'objc_protocol_requires_explicit_implementation'
+  // then we should check if any class in the super class hierarchy also
+  // conforms to this protocol, either directly or via protocol inheritance.
+  // If so, we can skip checking this protocol completely because we
+  // know that a parent class already satisfies this protocol.
+  //
+  // Note: we could generalize this logic for all protocols, and merely
+  // add the limit on looking at the super class chain for just
+  // specially marked protocols.  This may be a good optimization.  This
+  // change is restricted to 'objc_protocol_requires_explicit_implementation'
+  // protocols for now for controlled evaluation.
+  if (PDecl->hasAttr<ObjCExplicitProtocolImplAttr>()) {
+    if (!ProtocolsExplictImpl) {
+      ProtocolsExplictImpl.reset(new ProtocolNameSet);
+      findProtocolsWithExplicitImpls(Super, *ProtocolsExplictImpl);
+    }
+    if (ProtocolsExplictImpl->find(PDecl->getIdentifier()) !=
+        ProtocolsExplictImpl->end())
+      return;
+
+    // If no super class conforms to the protocol, we should not search
+    // for methods in the super class to implicitly satisfy the protocol.
+    Super = NULL;
+  }
+
+  if (S.getLangOpts().ObjCRuntime.isNeXTFamily()) {
     // check to see if class implements forwardInvocation method and objects
     // of this class are derived from 'NSProxy' so that to forward requests
     // from one object to another.
@@ -1638,12 +1723,12 @@
     // implemented in the class, we should not issue "Method definition not
     // found" warnings.
     // FIXME: Use a general GetUnarySelector method for this.
-    IdentifierInfo* II = &Context.Idents.get("forwardInvocation");
-    Selector fISelector = Context.Selectors.getSelector(1, &II);
+    IdentifierInfo* II = &S.Context.Idents.get("forwardInvocation");
+    Selector fISelector = S.Context.Selectors.getSelector(1, &II);
     if (InsMap.count(fISelector))
       // Is IDecl derived from 'NSProxy'? If so, no instance methods
       // need be implemented in the implementation.
-      NSIDecl = IDecl->lookupInheritedClass(&Context.Idents.get("NSProxy"));
+      NSIDecl = IDecl->lookupInheritedClass(&S.Context.Idents.get("NSProxy"));
   }
 
   // If this is a forward protocol declaration, get its definition.
@@ -1658,13 +1743,15 @@
 
   // check unimplemented instance methods.
   if (!NSIDecl)
-    for (ObjCProtocolDecl::instmeth_iterator I = PDecl->instmeth_begin(),
-         E = PDecl->instmeth_end(); I != E; ++I) {
-      ObjCMethodDecl *method = *I;
+    for (auto *method : PDecl->instance_methods()) {
       if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
           !method->isPropertyAccessor() &&
           !InsMap.count(method->getSelector()) &&
-          (!Super || !Super->lookupInstanceMethod(method->getSelector()))) {
+          (!Super || !Super->lookupMethod(method->getSelector(),
+                                          true /* instance */,
+                                          false /* shallowCategory */,
+                                          true /* followsSuper */,
+                                          NULL /* category */))) {
             // If a method is not implemented in the category implementation but
             // has been declared in its primary class, superclass,
             // or in one of their protocols, no need to issue the warning. 
@@ -1675,44 +1762,47 @@
             // have been synthesized due to a property declared in the class which
             // uses the protocol.
             if (ObjCMethodDecl *MethodInClass =
-                  IDecl->lookupInstanceMethod(method->getSelector(), 
-                                              true /*shallowCategoryLookup*/))
+                  IDecl->lookupMethod(method->getSelector(),
+                                      true /* instance */,
+                                      true /* shallowCategoryLookup */,
+                                      false /* followSuper */))
               if (C || MethodInClass->isPropertyAccessor())
                 continue;
             unsigned DIAG = diag::warn_unimplemented_protocol_method;
-            if (Diags.getDiagnosticLevel(DIAG, ImpLoc)
+            if (S.Diags.getDiagnosticLevel(DIAG, ImpLoc)
                 != DiagnosticsEngine::Ignored) {
-              WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
-              Diag(CDecl->getLocation(), diag::note_required_for_protocol_at)
-                << PDecl->getDeclName();
+              WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG,
+                                  PDecl);
             }
           }
     }
   // check unimplemented class methods
-  for (ObjCProtocolDecl::classmeth_iterator
-         I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
-       I != E; ++I) {
-    ObjCMethodDecl *method = *I;
+  for (auto *method : PDecl->class_methods()) {
     if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
         !ClsMap.count(method->getSelector()) &&
-        (!Super || !Super->lookupClassMethod(method->getSelector()))) {
+        (!Super || !Super->lookupMethod(method->getSelector(),
+                                        false /* class method */,
+                                        false /* shallowCategoryLookup */,
+                                        true  /* followSuper */,
+                                        NULL /* category */))) {
       // See above comment for instance method lookups.
-      if (C && IDecl->lookupClassMethod(method->getSelector(), 
-                                        true /*shallowCategoryLookup*/))
+      if (C && IDecl->lookupMethod(method->getSelector(),
+                                   false /* class */,
+                                   true /* shallowCategoryLookup */,
+                                   false /* followSuper */))
         continue;
+
       unsigned DIAG = diag::warn_unimplemented_protocol_method;
-      if (Diags.getDiagnosticLevel(DIAG, ImpLoc) !=
+      if (S.Diags.getDiagnosticLevel(DIAG, ImpLoc) !=
             DiagnosticsEngine::Ignored) {
-        WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
-        Diag(IDecl->getLocation(), diag::note_required_for_protocol_at) <<
-          PDecl->getDeclName();
+        WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG, PDecl);
       }
     }
   }
   // Check on this protocols's referenced protocols, recursively.
-  for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
-       E = PDecl->protocol_end(); PI != E; ++PI)
-    CheckProtocolMethodDefs(ImpLoc, *PI, IncompleteImpl, InsMap, ClsMap, CDecl);
+  for (auto *PI : PDecl->protocols())
+    CheckProtocolMethodDefs(S, ImpLoc, PI, IncompleteImpl, InsMap, ClsMap,
+                            CDecl, ProtocolsExplictImpl);
 }
 
 /// MatchAllMethodDeclarations - Check methods declared in interface
@@ -1729,56 +1819,50 @@
                                       bool WarnCategoryMethodImpl) {
   // Check and see if instance methods in class interface have been
   // implemented in the implementation class. If so, their types match.
-  for (ObjCInterfaceDecl::instmeth_iterator I = CDecl->instmeth_begin(),
-       E = CDecl->instmeth_end(); I != E; ++I) {
-    if (!InsMapSeen.insert((*I)->getSelector()))
+  for (auto *I : CDecl->instance_methods()) {
+    if (!InsMapSeen.insert(I->getSelector()))
       continue;
-    if (!(*I)->isPropertyAccessor() &&
-        !InsMap.count((*I)->getSelector())) {
+    if (!I->isPropertyAccessor() &&
+        !InsMap.count(I->getSelector())) {
       if (ImmediateClass)
-        WarnUndefinedMethod(IMPDecl->getLocation(), *I, IncompleteImpl,
+        WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl,
                             diag::warn_undef_method_impl);
       continue;
     } else {
       ObjCMethodDecl *ImpMethodDecl =
-        IMPDecl->getInstanceMethod((*I)->getSelector());
-      assert(CDecl->getInstanceMethod((*I)->getSelector()) &&
+        IMPDecl->getInstanceMethod(I->getSelector());
+      assert(CDecl->getInstanceMethod(I->getSelector()) &&
              "Expected to find the method through lookup as well");
-      ObjCMethodDecl *MethodDecl = *I;
       // ImpMethodDecl may be null as in a @dynamic property.
       if (ImpMethodDecl) {
         if (!WarnCategoryMethodImpl)
-          WarnConflictingTypedMethods(ImpMethodDecl, MethodDecl,
+          WarnConflictingTypedMethods(ImpMethodDecl, I,
                                       isa<ObjCProtocolDecl>(CDecl));
-        else if (!MethodDecl->isPropertyAccessor())
-          WarnExactTypedMethods(ImpMethodDecl, MethodDecl,
-                                isa<ObjCProtocolDecl>(CDecl));
+        else if (!I->isPropertyAccessor())
+          WarnExactTypedMethods(ImpMethodDecl, I, isa<ObjCProtocolDecl>(CDecl));
       }
     }
   }
 
   // Check and see if class methods in class interface have been
   // implemented in the implementation class. If so, their types match.
-  for (ObjCInterfaceDecl::classmeth_iterator I = CDecl->classmeth_begin(),
-                                             E = CDecl->classmeth_end();
-       I != E; ++I) {
-    if (!ClsMapSeen.insert((*I)->getSelector()))
+  for (auto *I : CDecl->class_methods()) {
+    if (!ClsMapSeen.insert(I->getSelector()))
       continue;
-    if (!ClsMap.count((*I)->getSelector())) {
+    if (!ClsMap.count(I->getSelector())) {
       if (ImmediateClass)
-        WarnUndefinedMethod(IMPDecl->getLocation(), *I, IncompleteImpl,
+        WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl,
                             diag::warn_undef_method_impl);
     } else {
       ObjCMethodDecl *ImpMethodDecl =
-        IMPDecl->getClassMethod((*I)->getSelector());
-      assert(CDecl->getClassMethod((*I)->getSelector()) &&
+        IMPDecl->getClassMethod(I->getSelector());
+      assert(CDecl->getClassMethod(I->getSelector()) &&
              "Expected to find the method through lookup as well");
-      ObjCMethodDecl *MethodDecl = *I;
       if (!WarnCategoryMethodImpl)
-        WarnConflictingTypedMethods(ImpMethodDecl, MethodDecl, 
+        WarnConflictingTypedMethods(ImpMethodDecl, I, 
                                     isa<ObjCProtocolDecl>(CDecl));
       else
-        WarnExactTypedMethods(ImpMethodDecl, MethodDecl,
+        WarnExactTypedMethods(ImpMethodDecl, I,
                               isa<ObjCProtocolDecl>(CDecl));
     }
   }
@@ -1786,10 +1870,9 @@
   if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl> (CDecl)) {
     // Also, check for methods declared in protocols inherited by
     // this protocol.
-    for (ObjCProtocolDecl::protocol_iterator
-          PI = PD->protocol_begin(), E = PD->protocol_end(); PI != E; ++PI)
+    for (auto *PI : PD->protocols())
       MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
-                                 IMPDecl, (*PI), IncompleteImpl, false,
+                                 IMPDecl, PI, IncompleteImpl, false,
                                  WarnCategoryMethodImpl);
   }
   
@@ -1798,35 +1881,24 @@
     // i.e. when WarnCategoryMethodImpl is false, check declarations in class
     // extension; as well as those in categories.
     if (!WarnCategoryMethodImpl) {
-      for (ObjCInterfaceDecl::visible_categories_iterator
-             Cat = I->visible_categories_begin(),
-           CatEnd = I->visible_categories_end();
-           Cat != CatEnd; ++Cat) {
+      for (auto *Cat : I->visible_categories())
         MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
-                                   IMPDecl, *Cat, IncompleteImpl, false,
+                                   IMPDecl, Cat, IncompleteImpl, false,
                                    WarnCategoryMethodImpl);
-      }
     } else {
       // Also methods in class extensions need be looked at next.
-      for (ObjCInterfaceDecl::visible_extensions_iterator
-             Ext = I->visible_extensions_begin(),
-             ExtEnd = I->visible_extensions_end();
-           Ext != ExtEnd; ++Ext) {
+      for (auto *Ext : I->visible_extensions())
         MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
-                                   IMPDecl, *Ext, IncompleteImpl, false,
+                                   IMPDecl, Ext, IncompleteImpl, false,
                                    WarnCategoryMethodImpl);
-      }
     }
 
     // Check for any implementation of a methods declared in protocol.
-    for (ObjCInterfaceDecl::all_protocol_iterator
-          PI = I->all_referenced_protocol_begin(),
-          E = I->all_referenced_protocol_end(); PI != E; ++PI)
+    for (auto *PI : I->all_referenced_protocols())
       MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
-                                 IMPDecl,
-                                 (*PI), IncompleteImpl, false, 
+                                 IMPDecl, PI, IncompleteImpl, false,
                                  WarnCategoryMethodImpl);
-    
+
     // FIXME. For now, we are not checking for extact match of methods 
     // in category implementation and its primary class's super class. 
     if (!WarnCategoryMethodImpl && I->getSuperClass())
@@ -1841,20 +1913,6 @@
 /// warns each time an exact match is found. 
 void Sema::CheckCategoryVsClassMethodMatches(
                                   ObjCCategoryImplDecl *CatIMPDecl) {
-  SelectorSet InsMap, ClsMap;
-  
-  for (ObjCImplementationDecl::instmeth_iterator
-       I = CatIMPDecl->instmeth_begin(), 
-       E = CatIMPDecl->instmeth_end(); I!=E; ++I)
-    InsMap.insert((*I)->getSelector());
-  
-  for (ObjCImplementationDecl::classmeth_iterator
-       I = CatIMPDecl->classmeth_begin(),
-       E = CatIMPDecl->classmeth_end(); I != E; ++I)
-    ClsMap.insert((*I)->getSelector());
-  if (InsMap.empty() && ClsMap.empty())
-    return;
-  
   // Get category's primary class.
   ObjCCategoryDecl *CatDecl = CatIMPDecl->getCategoryDecl();
   if (!CatDecl)
@@ -1862,6 +1920,28 @@
   ObjCInterfaceDecl *IDecl = CatDecl->getClassInterface();
   if (!IDecl)
     return;
+  ObjCInterfaceDecl *SuperIDecl = IDecl->getSuperClass();
+  SelectorSet InsMap, ClsMap;
+  
+  for (const auto *I : CatIMPDecl->instance_methods()) {
+    Selector Sel = I->getSelector();
+    // When checking for methods implemented in the category, skip over
+    // those declared in category class's super class. This is because
+    // the super class must implement the method.
+    if (SuperIDecl && SuperIDecl->lookupMethod(Sel, true))
+      continue;
+    InsMap.insert(Sel);
+  }
+  
+  for (const auto *I : CatIMPDecl->class_methods()) {
+    Selector Sel = I->getSelector();
+    if (SuperIDecl && SuperIDecl->lookupMethod(Sel, false))
+      continue;
+    ClsMap.insert(Sel);
+  }
+  if (InsMap.empty() && ClsMap.empty())
+    return;
+  
   SelectorSet InsMapSeen, ClsMapSeen;
   bool IncompleteImpl = false;
   MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
@@ -1876,24 +1956,22 @@
   SelectorSet InsMap;
   // Check and see if instance methods in class interface have been
   // implemented in the implementation class.
-  for (ObjCImplementationDecl::instmeth_iterator
-         I = IMPDecl->instmeth_begin(), E = IMPDecl->instmeth_end(); I!=E; ++I)
-    InsMap.insert((*I)->getSelector());
+  for (const auto *I : IMPDecl->instance_methods())
+    InsMap.insert(I->getSelector());
 
   // Check and see if properties declared in the interface have either 1)
   // an implementation or 2) there is a @synthesize/@dynamic implementation
   // of the property in the @implementation.
-  if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl))
-    if  (!(LangOpts.ObjCDefaultSynthProperties &&
-           LangOpts.ObjCRuntime.isNonFragile()) ||
-         IDecl->isObjCRequiresPropertyDefs())
-      DiagnoseUnimplementedProperties(S, IMPDecl, CDecl);
-      
+  if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+    bool SynthesizeProperties = LangOpts.ObjCDefaultSynthProperties &&
+                                LangOpts.ObjCRuntime.isNonFragile() &&
+                                !IDecl->isObjCRequiresPropertyDefs();
+    DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, SynthesizeProperties);
+  }
+
   SelectorSet ClsMap;
-  for (ObjCImplementationDecl::classmeth_iterator
-       I = IMPDecl->classmeth_begin(),
-       E = IMPDecl->classmeth_end(); I != E; ++I)
-    ClsMap.insert((*I)->getSelector());
+  for (const auto *I : IMPDecl->class_methods())
+    ClsMap.insert(I->getSelector());
 
   // Check for type conflict of methods declared in a class/protocol and
   // its implementation; if any.
@@ -1913,28 +1991,25 @@
   // Check and see if class methods in class interface have been
   // implemented in the implementation class.
 
+  LazyProtocolNameSet ExplicitImplProtocols;
+
   if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
-    for (ObjCInterfaceDecl::all_protocol_iterator
-          PI = I->all_referenced_protocol_begin(),
-          E = I->all_referenced_protocol_end(); PI != E; ++PI)
-      CheckProtocolMethodDefs(IMPDecl->getLocation(), *PI, IncompleteImpl,
-                              InsMap, ClsMap, I);
+    for (auto *PI : I->all_referenced_protocols())
+      CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), PI, IncompleteImpl,
+                              InsMap, ClsMap, I, ExplicitImplProtocols);
     // Check class extensions (unnamed categories)
-    for (ObjCInterfaceDecl::visible_extensions_iterator
-           Ext = I->visible_extensions_begin(),
-           ExtEnd = I->visible_extensions_end();
-         Ext != ExtEnd; ++Ext) {
-      ImplMethodsVsClassMethods(S, IMPDecl, *Ext, IncompleteImpl);
-    }
+    for (auto *Ext : I->visible_extensions())
+      ImplMethodsVsClassMethods(S, IMPDecl, Ext, IncompleteImpl);
   } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
     // For extended class, unimplemented methods in its protocols will
     // be reported in the primary class.
     if (!C->IsClassExtension()) {
-      for (ObjCCategoryDecl::protocol_iterator PI = C->protocol_begin(),
-           E = C->protocol_end(); PI != E; ++PI)
-        CheckProtocolMethodDefs(IMPDecl->getLocation(), *PI, IncompleteImpl,
-                                InsMap, ClsMap, CDecl);
-      DiagnoseUnimplementedProperties(S, IMPDecl, CDecl);
+      for (auto *P : C->protocols())
+        CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), P,
+                                IncompleteImpl, InsMap, ClsMap, CDecl,
+                                ExplicitImplProtocols);
+      DiagnoseUnimplementedProperties(S, IMPDecl, CDecl,
+                                      /* SynthesizeProperties */ false);
     } 
   } else
     llvm_unreachable("invalid ObjCContainerDecl type.");
@@ -2103,8 +2178,8 @@
 bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
                                       const ObjCMethodDecl *right,
                                       MethodMatchStrategy strategy) {
-  if (!matchTypes(Context, strategy,
-                  left->getResultType(), right->getResultType()))
+  if (!matchTypes(Context, strategy, left->getReturnType(),
+                  right->getReturnType()))
     return false;
 
   // If either is hidden, it is not considered to match.
@@ -2233,7 +2308,7 @@
 
   // Don't complain about mismatches for -length if the method we
   // chose has an integral result type.
-  return (chosen->getResultType()->isIntegerType());
+  return (chosen->getReturnType()->isIntegerType());
 }
 
 ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
@@ -2327,11 +2402,15 @@
     return 0;
 
   GlobalMethods &Methods = Pos->second;
-
-  if (Methods.first.Method && Methods.first.Method->isDefined())
-    return Methods.first.Method;
-  if (Methods.second.Method && Methods.second.Method->isDefined())
-    return Methods.second.Method;
+  for (const ObjCMethodList *Method = &Methods.first; Method;
+       Method = Method->getNext())
+    if (Method->Method && Method->Method->isDefined())
+      return Method->Method;
+  
+  for (const ObjCMethodList *Method = &Methods.second; Method;
+       Method = Method->getNext())
+    if (Method->Method && Method->Method->isDefined())
+      return Method->Method;
   return 0;
 }
 
@@ -2423,51 +2502,6 @@
   return (SelectedMethods.size() == 1) ? SelectedMethods[0] : NULL;
 }
 
-static void
-HelperToDiagnoseMismatchedMethodsInGlobalPool(Sema &S,
-                                              ObjCMethodList &MethList) {
-  ObjCMethodList *M = &MethList;
-  ObjCMethodDecl *TargetMethod = M->Method;
-  while (TargetMethod &&
-         isa<ObjCImplDecl>(TargetMethod->getDeclContext())) {
-    M = M->getNext();
-    TargetMethod = M ? M->Method : 0;
-  }
-  if (!TargetMethod)
-    return;
-  bool FirstTime = true;
-  for (M = M->getNext(); M; M=M->getNext()) {
-    ObjCMethodDecl *MatchingMethodDecl = M->Method;
-    if (isa<ObjCImplDecl>(MatchingMethodDecl->getDeclContext()))
-      continue;
-    if (!S.MatchTwoMethodDeclarations(TargetMethod,
-                                      MatchingMethodDecl, Sema::MMS_loose)) {
-      if (FirstTime) {
-        FirstTime = false;
-        S.Diag(TargetMethod->getLocation(), diag::warning_multiple_selectors)
-        << TargetMethod->getSelector();
-      }
-      S.Diag(MatchingMethodDecl->getLocation(), diag::note_also_found);
-    }
-  }
-}
-
-void Sema::DiagnoseMismatchedMethodsInGlobalPool() {
-  unsigned DIAG = diag::warning_multiple_selectors;
-  if (Diags.getDiagnosticLevel(DIAG, SourceLocation())
-      == DiagnosticsEngine::Ignored)
-    return;
-  for (GlobalMethodPool::iterator b = MethodPool.begin(),
-       e = MethodPool.end(); b != e; b++) {
-    // first, instance methods
-    ObjCMethodList &InstMethList = b->second.first;
-    HelperToDiagnoseMismatchedMethodsInGlobalPool(*this, InstMethList);
-    // second, class methods
-    ObjCMethodList &ClsMethList = b->second.second;
-    HelperToDiagnoseMismatchedMethodsInGlobalPool(*this, ClsMethList);
-  }
-}
-
 /// DiagnoseDuplicateIvars -
 /// Check for duplicate ivars in the entire class at the start of 
 /// \@implementation. This becomes necesssary because class extension can
@@ -2475,9 +2509,7 @@
 /// class's \@implementation is seen.
 void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, 
                                   ObjCInterfaceDecl *SID) {
-  for (ObjCInterfaceDecl::ivar_iterator IVI = ID->ivar_begin(),
-       IVE = ID->ivar_end(); IVI != IVE; ++IVI) {
-    ObjCIvarDecl* Ivar = *IVI;
+  for (auto *Ivar : ID->ivars()) {
     if (Ivar->isInvalidDecl())
       continue;
     if (IdentifierInfo *II = Ivar->getIdentifier()) {
@@ -2603,10 +2635,8 @@
       // ProcessPropertyDecl is responsible for diagnosing conflicts with any
       // user-defined setter/getter. It also synthesizes setter/getter methods
       // and adds them to the DeclContext and global method pools.
-      for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(),
-                                            E = CDecl->prop_end();
-           I != E; ++I)
-        ProcessPropertyDecl(*I, CDecl);
+      for (auto *I : CDecl->properties())
+        ProcessPropertyDecl(I, CDecl);
     CDecl->setAtEndRange(AtEnd);
   }
   if (ObjCImplementationDecl *IC=dyn_cast<ObjCImplementationDecl>(ClassDecl)) {
@@ -2617,13 +2647,8 @@
       // of the other class extensions. Mark them as synthesized as
       // property will be synthesized when property with same name is
       // seen in the @implementation.
-      for (ObjCInterfaceDecl::visible_extensions_iterator
-             Ext = IDecl->visible_extensions_begin(),
-             ExtEnd = IDecl->visible_extensions_end();
-           Ext != ExtEnd; ++Ext) {
-        for (ObjCContainerDecl::prop_iterator I = Ext->prop_begin(),
-             E = Ext->prop_end(); I != E; ++I) {
-          ObjCPropertyDecl *Property = *I;
+      for (const auto *Ext : IDecl->visible_extensions()) {
+        for (const auto *Property : Ext->properties()) {
           // Skip over properties declared @dynamic
           if (const ObjCPropertyImplDecl *PIDecl
               = IC->FindPropertyImplDecl(Property->getIdentifier()))
@@ -2631,10 +2656,7 @@
                   == ObjCPropertyImplDecl::Dynamic)
               continue;
 
-          for (ObjCInterfaceDecl::visible_extensions_iterator
-                 Ext = IDecl->visible_extensions_begin(),
-                 ExtEnd = IDecl->visible_extensions_end();
-               Ext != ExtEnd; ++Ext) {
+          for (const auto *Ext : IDecl->visible_extensions()) {
             if (ObjCMethodDecl *GetterMethod
                   = Ext->getInstanceMethod(Property->getGetterName()))
               GetterMethod->setPropertyAccessor(true);
@@ -2648,7 +2670,10 @@
       ImplMethodsVsClassMethods(S, IC, IDecl);
       AtomicPropertySetterGetterRules(IC, IDecl);
       DiagnoseOwningPropertyGetterSynthesis(IC);
-  
+      DiagnoseUnusedBackingIvarInAccessor(S, IC);
+      if (IDecl->hasDesignatedInitializers())
+        DiagnoseMissingDesignatedInitOverrides(IC, IDecl);
+
       bool HasRootClassAttr = IDecl->hasAttr<ObjCRootClassAttr>();
       if (IDecl->getSuperClass() == NULL) {
         // This class has no superclass, so check that it has been marked with
@@ -2729,72 +2754,14 @@
   return (Decl::ObjCDeclQualifier) (unsigned) PQTVal;
 }
 
-static inline
-unsigned countAlignAttr(const AttrVec &A) {
-  unsigned count=0;
-  for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i)
-    if ((*i)->getKind() == attr::Aligned)
-      ++count;
-  return count;
-}
-
-static inline
-bool containsInvalidMethodImplAttribute(ObjCMethodDecl *IMD,
-                                        const AttrVec &A) {
-  // If method is only declared in implementation (private method),
-  // No need to issue any diagnostics on method definition with attributes.
-  if (!IMD)
-    return false;
-  
-  // method declared in interface has no attribute. 
-  // But implementation has attributes. This is invalid.
-  // Except when implementation has 'Align' attribute which is
-  // immaterial to method declared in interface.
-  if (!IMD->hasAttrs())
-    return (A.size() > countAlignAttr(A));
-
-  const AttrVec &D = IMD->getAttrs();
-
-  unsigned countAlignOnImpl = countAlignAttr(A);
-  if (!countAlignOnImpl && (A.size() != D.size()))
-    return true;
-  else if (countAlignOnImpl) {
-    unsigned countAlignOnDecl = countAlignAttr(D);
-    if (countAlignOnDecl && (A.size() != D.size()))
-      return true;
-    else if (!countAlignOnDecl && 
-             ((A.size()-countAlignOnImpl) != D.size()))
-      return true;
-  }
-  
-  // attributes on method declaration and definition must match exactly.
-  // Note that we have at most a couple of attributes on methods, so this
-  // n*n search is good enough.
-  for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i) {
-    if ((*i)->getKind() == attr::Aligned)
-      continue;
-    bool match = false;
-    for (AttrVec::const_iterator i1 = D.begin(), e1 = D.end(); i1 != e1; ++i1) {
-      if ((*i)->getKind() == (*i1)->getKind()) {
-        match = true;
-        break;
-      }
-    }
-    if (!match)
-      return true;
-  }
-  
-  return false;
-}
-
 /// \brief Check whether the declared result type of the given Objective-C
 /// method declaration is compatible with the method's class.
 ///
 static Sema::ResultTypeCompatibilityKind 
 CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method,
                                     ObjCInterfaceDecl *CurrentClass) {
-  QualType ResultType = Method->getResultType();
-  
+  QualType ResultType = Method->getReturnType();
+
   // If an Objective-C method inherits its related result type, then its 
   // declared result type must be compatible with its own class type. The
   // declared result type is compatible if:
@@ -2928,12 +2895,8 @@
       return;
     
     //   - categories,
-    for (ObjCInterfaceDecl::known_categories_iterator
-           cat = iface->known_categories_begin(),
-           catEnd = iface->known_categories_end();
-         cat != catEnd; ++cat) {
-      search(*cat);
-    }
+    for (auto *Cat : iface->known_categories())
+      search(Cat);
 
     //   - the super class, and
     if (ObjCInterfaceDecl *super = iface->getSuperClass())
@@ -3102,9 +3065,9 @@
   QualType resultDeclType;
 
   bool HasRelatedResultType = false;
-  TypeSourceInfo *ResultTInfo = 0;
+  TypeSourceInfo *ReturnTInfo = 0;
   if (ReturnType) {
-    resultDeclType = GetTypeFromParser(ReturnType, &ResultTInfo);
+    resultDeclType = GetTypeFromParser(ReturnType, &ReturnTInfo);
 
     if (CheckFunctionReturnType(resultDeclType, MethodLoc))
       return 0;
@@ -3116,18 +3079,14 @@
       << FixItHint::CreateInsertion(SelectorLocs.front(), "(id)");
   }
 
-  ObjCMethodDecl* ObjCMethod =
-    ObjCMethodDecl::Create(Context, MethodLoc, EndLoc, Sel,
-                           resultDeclType,
-                           ResultTInfo,
-                           CurContext,
-                           MethodType == tok::minus, isVariadic,
-                           /*isPropertyAccessor=*/false,
-                           /*isImplicitlyDeclared=*/false, /*isDefined=*/false,
-                           MethodDeclKind == tok::objc_optional 
-                             ? ObjCMethodDecl::Optional
-                             : ObjCMethodDecl::Required,
-                           HasRelatedResultType);
+  ObjCMethodDecl *ObjCMethod = ObjCMethodDecl::Create(
+      Context, MethodLoc, EndLoc, Sel, resultDeclType, ReturnTInfo, CurContext,
+      MethodType == tok::minus, isVariadic,
+      /*isPropertyAccessor=*/false,
+      /*isImplicitlyDeclared=*/false, /*isDefined=*/false,
+      MethodDeclKind == tok::objc_optional ? ObjCMethodDecl::Optional
+                                           : ObjCMethodDecl::Required,
+      HasRelatedResultType);
 
   SmallVector<ParmVarDecl*, 16> Params;
 
@@ -3221,17 +3180,15 @@
     if (IMD && IMD->hasAttr<ObjCRequiresSuperAttr>() &&
         !ObjCMethod->hasAttr<ObjCRequiresSuperAttr>()) {
       // merge the attribute into implementation.
-      ObjCMethod->addAttr(
-        new (Context) ObjCRequiresSuperAttr(ObjCMethod->getLocation(), Context));
+      ObjCMethod->addAttr(ObjCRequiresSuperAttr::CreateImplicit(Context,
+                                                   ObjCMethod->getLocation()));
     }
-    if (ObjCMethod->hasAttrs() &&
-        containsInvalidMethodImplAttribute(IMD, ObjCMethod->getAttrs())) {
-      SourceLocation MethodLoc = IMD->getLocation();
-      if (!getSourceManager().isInSystemHeader(MethodLoc)) {
-        Diag(EndLoc, diag::warn_attribute_method_def);
-        Diag(MethodLoc, diag::note_method_declared_at)
+    if (isa<ObjCCategoryImplDecl>(ImpDecl)) {
+      ObjCMethodFamily family = 
+        ObjCMethod->getSelector().getMethodFamily();
+      if (family == OMF_dealloc && IMD && IMD->isOverriding()) 
+        Diag(ObjCMethod->getLocation(), diag::warn_dealloc_in_category)
           << ObjCMethod->getDeclName();
-      }
     }
   } else {
     cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod);
@@ -3482,8 +3439,6 @@
       ReferencedSelectors[Sels[I].first] = Sels[I].second;
   }
   
-  DiagnoseMismatchedMethodsInGlobalPool();
-  
   // Warning will be issued only when selector table is
   // generated (which means there is at lease one implementation
   // in the TU). This is to match gcc's behavior.
@@ -3503,39 +3458,93 @@
 ObjCIvarDecl *
 Sema::GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
                                      const ObjCPropertyDecl *&PDecl) const {
-  
+  if (Method->isClassMethod())
+    return 0;
   const ObjCInterfaceDecl *IDecl = Method->getClassInterface();
   if (!IDecl)
     return 0;
-  Method = IDecl->lookupMethod(Method->getSelector(), true);
+  Method = IDecl->lookupMethod(Method->getSelector(), /*isInstance=*/true,
+                               /*shallowCategoryLookup=*/false,
+                               /*followSuper=*/false);
   if (!Method || !Method->isPropertyAccessor())
     return 0;
-  if ((PDecl = Method->findPropertyDecl())) {
-    if (!PDecl->getDeclContext())
-      return 0;
-    // Make sure property belongs to accessor's class and not to
-    // one of its super classes.
-    if (const ObjCInterfaceDecl *CID =
-        dyn_cast<ObjCInterfaceDecl>(PDecl->getDeclContext()))
-      if (CID != IDecl)
-        return 0;
-    return PDecl->getPropertyIvarDecl();
-  }
+  if ((PDecl = Method->findPropertyDecl()))
+    if (ObjCIvarDecl *IV = PDecl->getPropertyIvarDecl()) {
+      // property backing ivar must belong to property's class
+      // or be a private ivar in class's implementation.
+      // FIXME. fix the const-ness issue.
+      IV = const_cast<ObjCInterfaceDecl *>(IDecl)->lookupInstanceVariable(
+                                                        IV->getIdentifier());
+      return IV;
+    }
   return 0;
 }
 
-void Sema::DiagnoseUnusedBackingIvarInAccessor(Scope *S) {
-  if (S->hasUnrecoverableErrorOccurred() || !S->isInObjcMethodScope())
+namespace {
+  /// Used by Sema::DiagnoseUnusedBackingIvarInAccessor to check if a property
+  /// accessor references the backing ivar.
+  class UnusedBackingIvarChecker :
+      public DataRecursiveASTVisitor<UnusedBackingIvarChecker> {
+  public:
+    Sema &S;
+    const ObjCMethodDecl *Method;
+    const ObjCIvarDecl *IvarD;
+    bool AccessedIvar;
+    bool InvokedSelfMethod;
+
+    UnusedBackingIvarChecker(Sema &S, const ObjCMethodDecl *Method,
+                             const ObjCIvarDecl *IvarD)
+      : S(S), Method(Method), IvarD(IvarD),
+        AccessedIvar(false), InvokedSelfMethod(false) {
+      assert(IvarD);
+    }
+
+    bool VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+      if (E->getDecl() == IvarD) {
+        AccessedIvar = true;
+        return false;
+      }
+      return true;
+    }
+
+    bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+      if (E->getReceiverKind() == ObjCMessageExpr::Instance &&
+          S.isSelfExpr(E->getInstanceReceiver(), Method)) {
+        InvokedSelfMethod = true;
+      }
+      return true;
+    }
+  };
+}
+
+void Sema::DiagnoseUnusedBackingIvarInAccessor(Scope *S,
+                                          const ObjCImplementationDecl *ImplD) {
+  if (S->hasUnrecoverableErrorOccurred())
     return;
-  
-  const ObjCMethodDecl *CurMethod = getCurMethodDecl();
-  if (!CurMethod)
-    return;
-  const ObjCPropertyDecl *PDecl;
-  const ObjCIvarDecl *IV = GetIvarBackingPropertyAccessor(CurMethod, PDecl);
-  if (IV && !IV->getBackingIvarReferencedInAccessor()) {
-    Diag(getCurMethodDecl()->getLocation(), diag::warn_unused_property_backing_ivar)
-    << IV->getDeclName();
-    Diag(PDecl->getLocation(), diag::note_property_declare);
+
+  for (const auto *CurMethod : ImplD->instance_methods()) {
+    unsigned DIAG = diag::warn_unused_property_backing_ivar;
+    SourceLocation Loc = CurMethod->getLocation();
+    if (Diags.getDiagnosticLevel(DIAG, Loc) == DiagnosticsEngine::Ignored)
+      continue;
+
+    const ObjCPropertyDecl *PDecl;
+    const ObjCIvarDecl *IV = GetIvarBackingPropertyAccessor(CurMethod, PDecl);
+    if (!IV)
+      continue;
+
+    UnusedBackingIvarChecker Checker(*this, CurMethod, IV);
+    Checker.TraverseStmt(CurMethod->getBody());
+    if (Checker.AccessedIvar)
+      continue;
+
+    // Do not issue this warning if backing ivar is used somewhere and accessor
+    // implementation makes a self call. This is to prevent false positive in
+    // cases where the ivar is accessed by another method that the accessor
+    // delegates to.
+    if (!IV->isReferenced() || !Checker.InvokedSelfMethod) {
+      Diag(Loc, DIAG) << IV;
+      Diag(PDecl->getLocation(), diag::note_property_declare);
+    }
   }
 }
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index 3e8f324..59fd85e 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -12,6 +12,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTMutationListener.h"
 #include "clang/AST/CXXInheritance.h"
 #include "clang/AST/Expr.h"
 #include "clang/AST/ExprCXX.h"
@@ -132,6 +133,25 @@
   return SourceDecl->getType()->castAs<FunctionProtoType>();
 }
 
+void Sema::UpdateExceptionSpec(FunctionDecl *FD,
+                               const FunctionProtoType::ExtProtoInfo &EPI) {
+  const FunctionProtoType *Proto = FD->getType()->castAs<FunctionProtoType>();
+
+  // Overwrite the exception spec and rebuild the function type.
+  FunctionProtoType::ExtProtoInfo NewEPI = Proto->getExtProtoInfo();
+  NewEPI.ExceptionSpecType = EPI.ExceptionSpecType;
+  NewEPI.NumExceptions = EPI.NumExceptions;
+  NewEPI.Exceptions = EPI.Exceptions;
+  NewEPI.NoexceptExpr = EPI.NoexceptExpr;
+  FD->setType(Context.getFunctionType(Proto->getReturnType(),
+                                      Proto->getParamTypes(), NewEPI));
+
+  // If we've fully resolved the exception specification, notify listeners.
+  if (!isUnresolvedExceptionSpec(EPI.ExceptionSpecType))
+    if (auto *Listener = getASTMutationListener())
+      Listener->ResolvedExceptionSpec(FD);
+}
+
 /// Determine whether a function has an implicitly-generated exception
 /// specification.
 static bool hasImplicitExceptionSpec(FunctionDecl *Decl) {
@@ -140,10 +160,13 @@
       Decl->getDeclName().getCXXOverloadedOperator() != OO_Array_Delete)
     return false;
 
-  // If the user didn't declare the function, its exception specification must
-  // be implicit.
+  // For a function that the user didn't declare:
+  //  - if this is a destructor, its exception specification is implicit.
+  //  - if this is 'operator delete' or 'operator delete[]', the exception
+  //    specification is as-if an explicit exception specification was given
+  //    (per [basic.stc.dynamic]p2).
   if (!Decl->getTypeSourceInfo())
-    return true;
+    return isa<CXXDestructorDecl>(Decl);
 
   const FunctionProtoType *Ty =
     Decl->getTypeSourceInfo()->getType()->getAs<FunctionProtoType>();
@@ -155,9 +178,13 @@
   bool IsOperatorNew = OO == OO_New || OO == OO_Array_New;
   bool MissingExceptionSpecification = false;
   bool MissingEmptyExceptionSpecification = false;
+
   unsigned DiagID = diag::err_mismatched_exception_spec;
-  if (getLangOpts().MicrosoftExt)
+  bool ReturnValueOnError = true;
+  if (getLangOpts().MicrosoftExt) {
     DiagID = diag::warn_mismatched_exception_spec; 
+    ReturnValueOnError = false;
+  }
 
   // Check the types as written: they must match before any exception
   // specification adjustment is applied.
@@ -182,9 +209,9 @@
   }
 
   // The failure was something other than an missing exception
-  // specification; return an error.
+  // specification; return an error, except in MS mode where this is a warning.
   if (!MissingExceptionSpecification)
-    return true;
+    return ReturnValueOnError;
 
   const FunctionProtoType *NewProto =
     New->getType()->castAs<FunctionProtoType>();
@@ -203,8 +230,8 @@
       Old->isExternC()) {
     FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
     EPI.ExceptionSpecType = EST_DynamicNone;
-    QualType NewType = Context.getFunctionType(NewProto->getResultType(),
-                                               NewProto->getArgTypes(), EPI);
+    QualType NewType = Context.getFunctionType(NewProto->getReturnType(),
+                                               NewProto->getParamTypes(), EPI);
     New->setType(NewType);
     return false;
   }
@@ -224,8 +251,8 @@
 
   // Update the type of the function with the appropriate exception
   // specification.
-  QualType NewType = Context.getFunctionType(NewProto->getResultType(),
-                                             NewProto->getArgTypes(), EPI);
+  QualType NewType = Context.getFunctionType(NewProto->getReturnType(),
+                                             NewProto->getParamTypes(), EPI);
   New->setType(NewType);
 
   // Warn about the lack of exception specification.
@@ -239,16 +266,13 @@
   case EST_Dynamic: {
     OS << "throw(";
     bool OnFirstException = true;
-    for (FunctionProtoType::exception_iterator E = OldProto->exception_begin(),
-                                            EEnd = OldProto->exception_end();
-         E != EEnd;
-         ++E) {
+    for (const auto &E : OldProto->exceptions()) {
       if (OnFirstException)
         OnFirstException = false;
       else
         OS << ", ";
       
-      OS << E->getAsString(getPrintingPolicy());
+      OS << E.getAsString(getPrintingPolicy());
     }
     OS << ")";
     break;
@@ -302,10 +326,14 @@
     const FunctionProtoType *New, SourceLocation NewLoc) {
   unsigned DiagID = diag::err_mismatched_exception_spec;
   if (getLangOpts().MicrosoftExt)
-    DiagID = diag::warn_mismatched_exception_spec; 
-  return CheckEquivalentExceptionSpec(PDiag(DiagID),
-                                      PDiag(diag::note_previous_declaration),
-                                      Old, OldLoc, New, NewLoc);
+    DiagID = diag::warn_mismatched_exception_spec;
+  bool Result = CheckEquivalentExceptionSpec(PDiag(DiagID),
+      PDiag(diag::note_previous_declaration), Old, OldLoc, New, NewLoc);
+
+  // In Microsoft mode, mismatching exception specifications just cause a warning.
+  if (getLangOpts().MicrosoftExt)
+    return false;
+  return Result;
 }
 
 /// CheckEquivalentExceptionSpec - Check if the two types have compatible
@@ -488,13 +516,11 @@
   // Both have a dynamic exception spec. Collect the first set, then compare
   // to the second.
   llvm::SmallPtrSet<CanQualType, 8> OldTypes, NewTypes;
-  for (FunctionProtoType::exception_iterator I = Old->exception_begin(),
-       E = Old->exception_end(); I != E; ++I)
-    OldTypes.insert(Context.getCanonicalType(*I).getUnqualifiedType());
+  for (const auto &I : Old->exceptions())
+    OldTypes.insert(Context.getCanonicalType(I).getUnqualifiedType());
 
-  for (FunctionProtoType::exception_iterator I = New->exception_begin(),
-       E = New->exception_end(); I != E && Success; ++I) {
-    CanQualType TypePtr = Context.getCanonicalType(*I).getUnqualifiedType();
+  for (const auto &I : New->exceptions()) {
+    CanQualType TypePtr = Context.getCanonicalType(I).getUnqualifiedType();
     if(OldTypes.count(TypePtr))
       NewTypes.insert(TypePtr);
     else
@@ -602,10 +628,9 @@
          "Exception spec subset: non-dynamic case slipped through.");
 
   // Neither contains everything or nothing. Do a proper comparison.
-  for (FunctionProtoType::exception_iterator SubI = Subset->exception_begin(),
-       SubE = Subset->exception_end(); SubI != SubE; ++SubI) {
+  for (const auto &SubI : Subset->exceptions()) {
     // Take one type from the subset.
-    QualType CanonicalSubT = Context.getCanonicalType(*SubI);
+    QualType CanonicalSubT = Context.getCanonicalType(SubI);
     // Unwrap pointers and references so that we can do checks within a class
     // hierarchy. Don't unwrap member pointers; they don't have hierarchy
     // conversions on the pointee.
@@ -624,10 +649,8 @@
 
     bool Contained = false;
     // Make sure it's in the superset.
-    for (FunctionProtoType::exception_iterator SuperI =
-           Superset->exception_begin(), SuperE = Superset->exception_end();
-         SuperI != SuperE; ++SuperI) {
-      QualType CanonicalSuperT = Context.getCanonicalType(*SuperI);
+    for (const auto &SuperI : Superset->exceptions()) {
+      QualType CanonicalSuperT = Context.getCanonicalType(SuperI);
       // SubT must be SuperT or derived from it, or pointer or reference to
       // such types.
       if (const ReferenceType *RefTy = CanonicalSuperT->getAs<ReferenceType>())
@@ -711,23 +734,21 @@
     const FunctionProtoType *Target, SourceLocation TargetLoc,
     const FunctionProtoType *Source, SourceLocation SourceLoc)
 {
-  if (CheckSpecForTypesEquivalent(*this,
-                           PDiag(diag::err_deep_exception_specs_differ) << 0, 
-                                  PDiag(),
-                                  Target->getResultType(), TargetLoc,
-                                  Source->getResultType(), SourceLoc))
+  if (CheckSpecForTypesEquivalent(
+          *this, PDiag(diag::err_deep_exception_specs_differ) << 0, PDiag(),
+          Target->getReturnType(), TargetLoc, Source->getReturnType(),
+          SourceLoc))
     return true;
 
   // We shouldn't even be testing this unless the arguments are otherwise
   // compatible.
-  assert(Target->getNumArgs() == Source->getNumArgs() &&
+  assert(Target->getNumParams() == Source->getNumParams() &&
          "Functions have different argument counts.");
-  for (unsigned i = 0, E = Target->getNumArgs(); i != E; ++i) {
-    if (CheckSpecForTypesEquivalent(*this,
-                           PDiag(diag::err_deep_exception_specs_differ) << 1, 
-                                    PDiag(),
-                                    Target->getArgType(i), TargetLoc,
-                                    Source->getArgType(i), SourceLoc))
+  for (unsigned i = 0, E = Target->getNumParams(); i != E; ++i) {
+    if (CheckSpecForTypesEquivalent(
+            *this, PDiag(diag::err_deep_exception_specs_differ) << 1, PDiag(),
+            Target->getParamType(i), TargetLoc, Source->getParamType(i),
+            SourceLoc))
       return true;
   }
   return false;
@@ -1063,7 +1084,6 @@
   case Expr::AddrLabelExprClass:
   case Expr::ArrayTypeTraitExprClass:
   case Expr::AtomicExprClass:
-  case Expr::BinaryTypeTraitExprClass:
   case Expr::TypeTraitExprClass:
   case Expr::CXXBoolLiteralExprClass:
   case Expr::CXXNoexceptExprClass:
@@ -1086,7 +1106,6 @@
   case Expr::PredefinedExprClass:
   case Expr::SizeOfPackExprClass:
   case Expr::StringLiteralClass:
-  case Expr::UnaryTypeTraitExprClass:
     // These expressions can never throw.
     return CT_Cannot;
 
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index e1f65f4..7894682 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -59,8 +59,8 @@
 
     // If the function has a deduced return type, and we can't deduce it,
     // then we can't use it either.
-    if (getLangOpts().CPlusPlus1y && FD->getResultType()->isUndeducedType() &&
-        DeduceReturnType(FD, SourceLocation(), /*Diagnose*/false))
+    if (getLangOpts().CPlusPlus1y && FD->getReturnType()->isUndeducedType() &&
+        DeduceReturnType(FD, SourceLocation(), /*Diagnose*/ false))
       return false;
   }
 
@@ -111,32 +111,17 @@
       break;
             
     case AR_Deprecated:
-      S.EmitDeprecationWarning(D, Message, Loc, UnknownObjCClass, ObjCPDecl);
+      if (S.getCurContextAvailability() != AR_Deprecated)
+        S.EmitAvailabilityWarning(Sema::AD_Deprecation,
+                                  D, Message, Loc, UnknownObjCClass, ObjCPDecl);
       break;
-            
+
     case AR_Unavailable:
-      if (S.getCurContextAvailability() != AR_Unavailable) {
-        if (Message.empty()) {
-          if (!UnknownObjCClass) {
-            S.Diag(Loc, diag::err_unavailable) << D->getDeclName();
-            if (ObjCPDecl)
-              S.Diag(ObjCPDecl->getLocation(), diag::note_property_attribute)
-                << ObjCPDecl->getDeclName() << 1;
-          }
-          else
-            S.Diag(Loc, diag::warn_unavailable_fwdclass_message) 
-              << D->getDeclName();
-        }
-        else
-          S.Diag(Loc, diag::err_unavailable_message) 
-            << D->getDeclName() << Message;
-        S.Diag(D->getLocation(), diag::note_unavailable_here)
-                  << isa<FunctionDecl>(D) << false;
-        if (ObjCPDecl)
-          S.Diag(ObjCPDecl->getLocation(), diag::note_property_attribute)
-          << ObjCPDecl->getDeclName() << 1;
-      }
+      if (S.getCurContextAvailability() != AR_Unavailable)
+        S.EmitAvailabilityWarning(Sema::AD_Unavailable,
+                                  D, Message, Loc, UnknownObjCClass, ObjCPDecl);
       break;
+
     }
     return Result;
 }
@@ -176,16 +161,14 @@
     }
   }
 
-  Diag(Decl->getLocation(), diag::note_unavailable_here)
-    << 1 << true;
+  Diag(Decl->getLocation(), diag::note_availability_specified_here)
+    << Decl << true;
 }
 
 /// \brief Determine whether a FunctionDecl was ever declared with an
 /// explicit storage class.
 static bool hasAnyExplicitStorageClass(const FunctionDecl *D) {
-  for (FunctionDecl::redecl_iterator I = D->redecls_begin(),
-                                     E = D->redecls_end();
-       I != E; ++I) {
+  for (auto I : D->redecls()) {
     if (I->getStorageClass() != SC_None)
       return true;
   }
@@ -279,13 +262,18 @@
       SmallVectorImpl<PartialDiagnosticAt> &Suppressed = Pos->second;
       for (unsigned I = 0, N = Suppressed.size(); I != N; ++I)
         Diag(Suppressed[I].first, Suppressed[I].second);
-      
+
       // Clear out the list of suppressed diagnostics, so that we don't emit
       // them again for this specialization. However, we don't obsolete this
       // entry from the table, because we want to avoid ever emitting these
       // diagnostics again.
       Suppressed.clear();
     }
+
+    // C++ [basic.start.main]p3:
+    //   The function 'main' shall not be used within a program.
+    if (cast<FunctionDecl>(D)->isMain())
+      Diag(Loc, diag::ext_main_used);
   }
 
   // See if this is an auto-typed variable whose initializer we are parsing.
@@ -305,7 +293,7 @@
 
     // If the function has a deduced return type, and we can't deduce it,
     // then we can't use it either.
-    if (getLangOpts().CPlusPlus1y && FD->getResultType()->isUndeducedType() &&
+    if (getLangOpts().CPlusPlus1y && FD->getReturnType()->isUndeducedType() &&
         DeduceReturnType(FD, Loc))
       return true;
   }
@@ -367,7 +355,7 @@
     }
 
     if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fn)) {
-      numFormalParams = proto->getNumArgs();
+      numFormalParams = proto->getNumParams();
     } else {
       numFormalParams = 0;
     }
@@ -444,10 +432,16 @@
   QualType Ty = E->getType();
   assert(!Ty.isNull() && "DefaultFunctionArrayConversion - missing type");
 
-  if (Ty->isFunctionType())
+  if (Ty->isFunctionType()) {
+    // If we are here, we are not calling a function but taking
+    // its address (which is not allowed in OpenCL v1.0 s6.8.a.3).
+    if (getLangOpts().OpenCL) {
+      Diag(E->getExprLoc(), diag::err_opencl_taking_function_address);
+      return ExprError();
+    }
     E = ImpCastExprToType(E, Context.getPointerType(Ty),
                           CK_FunctionToPointerDecay).take();
-  else if (Ty->isArrayType()) {
+  } else if (Ty->isArrayType()) {
     // In C90 mode, arrays only promote to pointers if the array expression is
     // an lvalue.  The relevant legalese is C90 6.2.2.1p3: "an lvalue that has
     // type 'array of type' is converted to an expression that has type 'pointer
@@ -643,6 +637,24 @@
   return Res;
 }
 
+/// CallExprUnaryConversions - a special case of an unary conversion
+/// performed on a function designator of a call expression.
+ExprResult Sema::CallExprUnaryConversions(Expr *E) {
+  QualType Ty = E->getType();
+  ExprResult Res = E;
+  // Only do implicit cast for a function type, but not for a pointer
+  // to function type.
+  if (Ty->isFunctionType()) {
+    Res = ImpCastExprToType(E, Context.getPointerType(Ty),
+                            CK_FunctionToPointerDecay).take();
+    if (Res.isInvalid())
+      return ExprError();
+  }
+  Res = DefaultLvalueConversion(Res.take());
+  if (Res.isInvalid())
+    return ExprError();
+  return Owned(Res.take());
+}
 
 /// UsualUnaryConversions - Performs various conversions that are common to most
 /// operators (C99 6.3). The conversions of array and function types are
@@ -792,14 +804,20 @@
 
   // Complain about passing non-POD types through varargs.
   switch (VAK) {
-  case VAK_Valid:
-    break;
-
   case VAK_ValidInCXX11:
     DiagRuntimeBehavior(
         E->getLocStart(), 0,
         PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
-          << E->getType() << CT);
+          << Ty << CT);
+    // Fall through.
+  case VAK_Valid:
+    if (Ty->isRecordType()) {
+      // This is unlikely to be what the user intended. If the class has a
+      // 'c_str' member function, the user probably meant to call that.
+      DiagRuntimeBehavior(E->getLocStart(), 0,
+                          PDiag(diag::warn_pass_class_arg_to_vararg)
+                            << Ty << CT << hasCStrMethod(E) << ".c_str()");
+    }
     break;
 
   case VAK_Undefined:
@@ -1643,7 +1661,7 @@
         NameInfo.getLoc(), Ty, VK, FoundD, TemplateArgs);
   } else {
     assert(!TemplateArgs && "No template arguments for non-variable"
-                            " template specialization referrences");
+                            " template specialization references");
     E = DeclRefExpr::Create(
         Context,
         SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(),
@@ -1748,7 +1766,7 @@
         // Give a code modification hint to insert 'this->'.
         // TODO: fixit for inserting 'Base<T>::' in the other cases.
         // Actually quite difficult!
-        if (getLangOpts().MicrosoftMode)
+        if (getLangOpts().MSVCCompat)
           diagnostic = diag::warn_found_via_dependent_bases_lookup;
         if (isInstance) {
           Diag(R.getNameLoc(), diagnostic) << Name
@@ -1814,7 +1832,7 @@
     // function definition declared at class scope then we must set
     // DC to the lexical parent to be able to search into the parent
     // class.
-    if (getLangOpts().MicrosoftMode && isa<FunctionDecl>(DC) &&
+    if (getLangOpts().MSVCCompat && isa<FunctionDecl>(DC) &&
         cast<FunctionDecl>(DC)->getFriendObjectKind() &&
         DC->getLexicalParent()->isRecord())
       DC = DC->getLexicalParent();
@@ -2026,17 +2044,17 @@
     if (R.empty()) {
       // In Microsoft mode, if we are inside a template class member function
       // whose parent class has dependent base classes, and we can't resolve
-      // an identifier, then assume the identifier is a member of a dependent
-      // base class.  The goal is to postpone name lookup to instantiation time
-      // to be able to search into the type dependent base classes.
+      // an unqualified identifier, then assume the identifier is a member of a
+      // dependent base class.  The goal is to postpone name lookup to
+      // instantiation time to be able to search into the type dependent base
+      // classes.
       // FIXME: If we want 100% compatibility with MSVC, we will have delay all
       // unqualified name lookup.  Any name lookup during template parsing means
       // clang might find something that MSVC doesn't.  For now, we only handle
       // the common case of members of a dependent base class.
-      if (getLangOpts().MicrosoftMode) {
+      if (SS.isEmpty() && getLangOpts().MSVCCompat) {
         CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CurContext);
         if (MD && MD->isInstance() && MD->getParent()->hasAnyDependentBases()) {
-          assert(SS.isEmpty() && "qualifiers should be already handled");
           QualType ThisType = MD->getThisType(Context);
           // Since the 'this' expression is synthesized, we don't need to
           // perform the double-lookup check.
@@ -2263,15 +2281,7 @@
         return ExprError();
 
       MarkAnyDeclReferenced(Loc, IV, true);
-      if (!IV->getBackingIvarReferencedInAccessor()) {
-        // Mark this ivar 'referenced' in this method, if it is a backing ivar
-        // of a property and current method is one of its property accessor.
-        const ObjCPropertyDecl *PDecl;
-        const ObjCIvarDecl *BIV = GetIvarBackingPropertyAccessor(CurMethod, PDecl);
-        if (BIV && BIV == IV)
-          IV->setBackingIvarReferencedInAccessor(true);
-      }
-      
+
       ObjCMethodFamily MF = CurMethod->getMethodFamily();
       if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize &&
           !IvarBacksCurrentMethodAccessor(IFace, CurMethod, IV))
@@ -2755,7 +2765,7 @@
 
       // If we're referring to a function with an __unknown_anytype
       // result type, make the entire expression __unknown_anytype.
-      if (fty->getResultType() == Context.UnknownAnyTy) {
+      if (fty->getReturnType() == Context.UnknownAnyTy) {
         type = Context.UnknownAnyTy;
         valueKind = VK_RValue;
         break;
@@ -2774,7 +2784,7 @@
       // type.
       if (!cast<FunctionDecl>(VD)->hasPrototype() &&
           isa<FunctionProtoType>(fty))
-        type = Context.getFunctionNoProtoType(fty->getResultType(),
+        type = Context.getFunctionNoProtoType(fty->getReturnType(),
                                               fty->getExtInfo());
 
       // Functions are r-values in C.
@@ -2792,7 +2802,7 @@
       // This should only be possible with a type written directly.
       if (const FunctionProtoType *proto
             = dyn_cast<FunctionProtoType>(VD->getType()))
-        if (proto->getResultType() == Context.UnknownAnyTy) {
+        if (proto->getReturnType() == Context.UnknownAnyTy) {
           type = Context.UnknownAnyTy;
           valueKind = VK_RValue;
           break;
@@ -3200,7 +3210,7 @@
       // If we still couldn't decide a type, we probably have something that
       // does not fit in a signed long long, but has no U suffix.
       if (Ty.isNull()) {
-        Diag(Tok.getLocation(), diag::warn_integer_too_large_for_signed);
+        Diag(Tok.getLocation(), diag::ext_integer_too_large_for_signed);
         Ty = Context.UnsignedLongLongTy;
         Width = Context.getTargetInfo().getLongLongWidth();
       }
@@ -3259,9 +3269,12 @@
     return false;
   }
 
-  // Allow sizeof(void)/alignof(void) as an extension.
+  // Allow sizeof(void)/alignof(void) as an extension, unless in OpenCL where
+  // this is an error (OpenCL v1.1 s6.3.k)
   if (T->isVoidType()) {
-    S.Diag(Loc, diag::ext_sizeof_alignof_void_type) << TraitKind << ArgRange;
+    unsigned DiagID = S.LangOpts.OpenCL ? diag::err_opencl_sizeof_alignof_type
+                                        : diag::ext_sizeof_alignof_void_type;
+    S.Diag(Loc, DiagID) << TraitKind << ArgRange;
     return false;
   }
 
@@ -3302,7 +3315,7 @@
                                              << ICE->getSubExpr()->getType();
 }
 
-/// \brief Check the constrains on expression operands to unary type expression
+/// \brief Check the constraints on expression operands to unary type expression
 /// and type traits.
 ///
 /// Completes any types necessary and validates the constraints on the operand
@@ -3460,7 +3473,7 @@
   // delayed parsing --- except for trailing return types in C++11.
   // And if an id-expression referring to a field occurs in a
   // context that lacks a 'this' value, it's ill-formed --- except,
-  // agian, in C++11, where such references are allowed in an
+  // again, in C++11, where such references are allowed in an
   // unevaluated context.  So C++11 introduces some new complexity.
   //
   // For the record, since __alignof__ on expressions is a GCC
@@ -3965,7 +3978,7 @@
       : FunctionCallFilterCCC(SemaRef, NumArgs, HasExplicitTemplateArgs),
         FunctionName(FuncName) {}
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     if (!candidate.getCorrectionSpecifier() ||
         candidate.getCorrectionAsIdentifierInfo() != FunctionName) {
       return false;
@@ -4037,9 +4050,9 @@
 
   // C99 6.5.2.2p7 - the arguments are implicitly converted, as if by
   // assignment, to the types of the corresponding parameter, ...
-  unsigned NumArgsInProto = Proto->getNumArgs();
+  unsigned NumParams = Proto->getNumParams();
   bool Invalid = false;
-  unsigned MinArgs = FDecl ? FDecl->getMinRequiredArguments() : NumArgsInProto;
+  unsigned MinArgs = FDecl ? FDecl->getMinRequiredArguments() : NumParams;
   unsigned FnKind = Fn->getType()->isBlockPointerType()
                        ? 1 /* block */
                        : (IsExecConfig ? 3 /* kernel function (exec config) */
@@ -4047,7 +4060,7 @@
 
   // If too few arguments are available (and we don't have default
   // arguments for the remaining parameters), don't make the call.
-  if (Args.size() < NumArgsInProto) {
+  if (Args.size() < NumParams) {
     if (Args.size() < MinArgs) {
       MemberExpr *ME = dyn_cast<MemberExpr>(Fn);
       TypoCorrection TC;
@@ -4057,25 +4070,24 @@
                                                        : Fn->getLocStart())),
                         Args))) {
         unsigned diag_id =
-            MinArgs == NumArgsInProto && !Proto->isVariadic()
+            MinArgs == NumParams && !Proto->isVariadic()
                 ? diag::err_typecheck_call_too_few_args_suggest
                 : diag::err_typecheck_call_too_few_args_at_least_suggest;
         diagnoseTypo(TC, PDiag(diag_id) << FnKind << MinArgs
                                         << static_cast<unsigned>(Args.size())
-                                        << Fn->getSourceRange());
+                                        << TC.getCorrectionRange());
       } else if (MinArgs == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName())
-        Diag(RParenLoc, MinArgs == NumArgsInProto && !Proto->isVariadic()
-                          ? diag::err_typecheck_call_too_few_args_one
-                          : diag::err_typecheck_call_too_few_args_at_least_one)
-          << FnKind
-          << FDecl->getParamDecl(0) << Fn->getSourceRange();
+        Diag(RParenLoc,
+             MinArgs == NumParams && !Proto->isVariadic()
+                 ? diag::err_typecheck_call_too_few_args_one
+                 : diag::err_typecheck_call_too_few_args_at_least_one)
+            << FnKind << FDecl->getParamDecl(0) << Fn->getSourceRange();
       else
-        Diag(RParenLoc, MinArgs == NumArgsInProto && !Proto->isVariadic()
-                          ? diag::err_typecheck_call_too_few_args
-                          : diag::err_typecheck_call_too_few_args_at_least)
-          << FnKind
-          << MinArgs << static_cast<unsigned>(Args.size())
-          << Fn->getSourceRange();
+        Diag(RParenLoc, MinArgs == NumParams && !Proto->isVariadic()
+                            ? diag::err_typecheck_call_too_few_args
+                            : diag::err_typecheck_call_too_few_args_at_least)
+            << FnKind << MinArgs << static_cast<unsigned>(Args.size())
+            << Fn->getSourceRange();
 
       // Emit the location of the prototype.
       if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
@@ -4084,46 +4096,46 @@
 
       return true;
     }
-    Call->setNumArgs(Context, NumArgsInProto);
+    Call->setNumArgs(Context, NumParams);
   }
 
   // If too many are passed and not variadic, error on the extras and drop
   // them.
-  if (Args.size() > NumArgsInProto) {
+  if (Args.size() > NumParams) {
     if (!Proto->isVariadic()) {
+      MemberExpr *ME = dyn_cast<MemberExpr>(Fn);
       TypoCorrection TC;
       if (FDecl && (TC = TryTypoCorrectionForCall(
                         *this, DeclarationNameInfo(FDecl->getDeclName(),
-                                                   Fn->getLocStart()),
+                                                   (ME ? ME->getMemberLoc()
+                                                       : Fn->getLocStart())),
                         Args))) {
         unsigned diag_id =
-            MinArgs == NumArgsInProto && !Proto->isVariadic()
+            MinArgs == NumParams && !Proto->isVariadic()
                 ? diag::err_typecheck_call_too_many_args_suggest
                 : diag::err_typecheck_call_too_many_args_at_most_suggest;
-        diagnoseTypo(TC, PDiag(diag_id) << FnKind << NumArgsInProto
+        diagnoseTypo(TC, PDiag(diag_id) << FnKind << NumParams
                                         << static_cast<unsigned>(Args.size())
-                                        << Fn->getSourceRange());
-      } else if (NumArgsInProto == 1 && FDecl &&
+                                        << TC.getCorrectionRange());
+      } else if (NumParams == 1 && FDecl &&
                  FDecl->getParamDecl(0)->getDeclName())
-        Diag(Args[NumArgsInProto]->getLocStart(),
-             MinArgs == NumArgsInProto
-               ? diag::err_typecheck_call_too_many_args_one
-               : diag::err_typecheck_call_too_many_args_at_most_one)
-          << FnKind
-          << FDecl->getParamDecl(0) << static_cast<unsigned>(Args.size())
-          << Fn->getSourceRange()
-          << SourceRange(Args[NumArgsInProto]->getLocStart(),
-                         Args.back()->getLocEnd());
+        Diag(Args[NumParams]->getLocStart(),
+             MinArgs == NumParams
+                 ? diag::err_typecheck_call_too_many_args_one
+                 : diag::err_typecheck_call_too_many_args_at_most_one)
+            << FnKind << FDecl->getParamDecl(0)
+            << static_cast<unsigned>(Args.size()) << Fn->getSourceRange()
+            << SourceRange(Args[NumParams]->getLocStart(),
+                           Args.back()->getLocEnd());
       else
-        Diag(Args[NumArgsInProto]->getLocStart(),
-             MinArgs == NumArgsInProto
-               ? diag::err_typecheck_call_too_many_args
-               : diag::err_typecheck_call_too_many_args_at_most)
-          << FnKind
-          << NumArgsInProto << static_cast<unsigned>(Args.size())
-          << Fn->getSourceRange()
-          << SourceRange(Args[NumArgsInProto]->getLocStart(),
-                         Args.back()->getLocEnd());
+        Diag(Args[NumParams]->getLocStart(),
+             MinArgs == NumParams
+                 ? diag::err_typecheck_call_too_many_args
+                 : diag::err_typecheck_call_too_many_args_at_most)
+            << FnKind << NumParams << static_cast<unsigned>(Args.size())
+            << Fn->getSourceRange()
+            << SourceRange(Args[NumParams]->getLocStart(),
+                           Args.back()->getLocEnd());
 
       // Emit the location of the prototype.
       if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
@@ -4131,7 +4143,7 @@
           << FDecl;
       
       // This deletes the extra arguments.
-      Call->setNumArgs(Context, NumArgsInProto);
+      Call->setNumArgs(Context, NumParams);
       return true;
     }
   }
@@ -4149,25 +4161,22 @@
   return false;
 }
 
-bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
-                                  FunctionDecl *FDecl,
+bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
                                   const FunctionProtoType *Proto,
-                                  unsigned FirstProtoArg,
-                                  ArrayRef<Expr *> Args,
+                                  unsigned FirstParam, ArrayRef<Expr *> Args,
                                   SmallVectorImpl<Expr *> &AllArgs,
-                                  VariadicCallType CallType,
-                                  bool AllowExplicit,
+                                  VariadicCallType CallType, bool AllowExplicit,
                                   bool IsListInitialization) {
-  unsigned NumArgsInProto = Proto->getNumArgs();
+  unsigned NumParams = Proto->getNumParams();
   unsigned NumArgsToCheck = Args.size();
   bool Invalid = false;
-  if (Args.size() != NumArgsInProto)
+  if (Args.size() != NumParams)
     // Use default arguments for missing arguments
-    NumArgsToCheck = NumArgsInProto;
+    NumArgsToCheck = NumParams;
   unsigned ArgIx = 0;
   // Continue to check argument types (even if we have too few/many args).
-  for (unsigned i = FirstProtoArg; i != NumArgsToCheck; i++) {
-    QualType ProtoArgType = Proto->getArgType(i);
+  for (unsigned i = FirstParam; i != NumArgsToCheck; i++) {
+    QualType ProtoArgType = Proto->getParamType(i);
 
     Expr *Arg;
     ParmVarDecl *Param;
@@ -4195,11 +4204,12 @@
                (!Param || !Param->hasAttr<CFConsumedAttr>()))
         CFAudited = true;
 
-      InitializedEntity Entity = Param ?
-          InitializedEntity::InitializeParameter(Context, Param, ProtoArgType)
-        : InitializedEntity::InitializeParameter(Context, ProtoArgType,
-                                                 Proto->isArgConsumed(i));
-      
+      InitializedEntity Entity =
+          Param ? InitializedEntity::InitializeParameter(Context, Param,
+                                                         ProtoArgType)
+                : InitializedEntity::InitializeParameter(
+                      Context, ProtoArgType, Proto->isParamConsumed(i));
+
       // Remember that parameter belongs to a CF audited API.
       if (CFAudited)
         Entity.setParameterCFAudited();
@@ -4240,8 +4250,8 @@
   if (CallType != VariadicDoesNotApply) {
     // Assume that extern "C" functions with variadic arguments that
     // return __unknown_anytype aren't *really* variadic.
-    if (Proto->getResultType() == Context.UnknownAnyTy &&
-        FDecl && FDecl->isExternC()) {
+    if (Proto->getReturnType() == Context.UnknownAnyTy && FDecl &&
+        FDecl->isExternC()) {
       for (unsigned i = ArgIx, e = Args.size(); i != e; ++i) {
         QualType paramType; // ignored
         ExprResult arg = checkUnknownAnyArg(CallLoc, Args[i], paramType);
@@ -4494,6 +4504,21 @@
   else if (isa<MemberExpr>(NakedFn))
     NDecl = cast<MemberExpr>(NakedFn)->getMemberDecl();
 
+  if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(NDecl)) {
+    if (FD->hasAttr<EnableIfAttr>()) {
+      if (const EnableIfAttr *Attr = CheckEnableIf(FD, ArgExprs, true)) {
+        Diag(Fn->getLocStart(),
+             isa<CXXMethodDecl>(FD) ?
+                 diag::err_ovl_no_viable_member_function_in_call :
+                 diag::err_ovl_no_viable_function_in_call)
+          << FD << FD->getSourceRange();
+        Diag(FD->getLocation(),
+             diag::note_ovl_candidate_disabled_by_enable_if_attr)
+            << Attr->getCond()->getSourceRange() << Attr->getMessage();
+      }
+    }
+  }
+
   return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs, RParenLoc,
                                ExecConfig, IsExecConfig);
 }
@@ -4573,7 +4598,7 @@
     Result = ImpCastExprToType(Fn, Context.getPointerType(FDecl->getType()),
                                CK_BuiltinFnToFnPtr).take();
   } else {
-    Result = UsualUnaryConversions(Fn);
+    Result = CallExprUnaryConversions(Fn);
   }
   if (Result.isInvalid())
     return ExprError();
@@ -4629,7 +4654,7 @@
             << FDecl->getName() << Fn->getSourceRange());
 
       // CUDA: Kernel function must have 'void' return type
-      if (!FuncT->getResultType()->isVoidType())
+      if (!FuncT->getReturnType()->isVoidType())
         return ExprError(Diag(LParenLoc, diag::err_kern_type_not_void_return)
             << Fn->getType() << Fn->getSourceRange());
     } else {
@@ -4641,14 +4666,13 @@
   }
 
   // Check for a valid return type
-  if (CheckCallReturnType(FuncT->getResultType(),
-                          Fn->getLocStart(), TheCall,
+  if (CheckCallReturnType(FuncT->getReturnType(), Fn->getLocStart(), TheCall,
                           FDecl))
     return ExprError();
 
   // We know the result type of the call, set it.
   TheCall->setType(FuncT->getCallResultType(Context));
-  TheCall->setValueKind(Expr::getValueKindForType(FuncT->getResultType()));
+  TheCall->setValueKind(Expr::getValueKindForType(FuncT->getReturnType()));
 
   const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT);
   if (Proto) {
@@ -4679,11 +4703,9 @@
     for (unsigned i = 0, e = Args.size(); i != e; i++) {
       Expr *Arg = Args[i];
 
-      if (Proto && i < Proto->getNumArgs()) {
-        InitializedEntity Entity
-          = InitializedEntity::InitializeParameter(Context, 
-                                                   Proto->getArgType(i),
-                                                   Proto->isArgConsumed(i));
+      if (Proto && i < Proto->getNumParams()) {
+        InitializedEntity Entity = InitializedEntity::InitializeParameter(
+            Context, Proto->getParamType(i), Proto->isParamConsumed(i));
         ExprResult ArgE = PerformCopyInitialization(Entity,
                                                     SourceLocation(),
                                                     Owned(Arg));
@@ -4876,8 +4898,13 @@
   case Type::STK_BlockPointer:
   case Type::STK_ObjCObjectPointer:
     switch (DestTy->getScalarTypeKind()) {
-    case Type::STK_CPointer:
+    case Type::STK_CPointer: {
+      unsigned SrcAS = SrcTy->getPointeeType().getAddressSpace();
+      unsigned DestAS = DestTy->getPointeeType().getAddressSpace();
+      if (SrcAS != DestAS)
+        return CK_AddressSpaceConversion;
       return CK_BitCast;
+    }
     case Type::STK_BlockPointer:
       return (SrcKind == Type::STK_BlockPointer
                 ? CK_BitCast : CK_AnyPointerToBlockPointerCast);
@@ -5020,12 +5047,55 @@
   llvm_unreachable("Unhandled scalar cast");
 }
 
+static bool breakDownVectorType(QualType type, uint64_t &len,
+                                QualType &eltType) {
+  // Vectors are simple.
+  if (const VectorType *vecType = type->getAs<VectorType>()) {
+    len = vecType->getNumElements();
+    eltType = vecType->getElementType();
+    assert(eltType->isScalarType());
+    return true;
+  }
+  
+  // We allow lax conversion to and from non-vector types, but only if
+  // they're real types (i.e. non-complex, non-pointer scalar types).
+  if (!type->isRealType()) return false;
+  
+  len = 1;
+  eltType = type;
+  return true;
+}
+
+static bool VectorTypesMatch(Sema &S, QualType srcTy, QualType destTy) {
+  uint64_t srcLen, destLen;
+  QualType srcElt, destElt;
+  if (!breakDownVectorType(srcTy, srcLen, srcElt)) return false;
+  if (!breakDownVectorType(destTy, destLen, destElt)) return false;
+  
+  // ASTContext::getTypeSize will return the size rounded up to a
+  // power of 2, so instead of using that, we need to use the raw
+  // element size multiplied by the element count.
+  uint64_t srcEltSize = S.Context.getTypeSize(srcElt);
+  uint64_t destEltSize = S.Context.getTypeSize(destElt);
+  
+  return (srcLen * srcEltSize == destLen * destEltSize);
+}
+
+/// Is this a legal conversion between two known vector types?
+bool Sema::isLaxVectorConversion(QualType srcTy, QualType destTy) {
+  assert(destTy->isVectorType() || srcTy->isVectorType());
+  
+  if (!Context.getLangOpts().LaxVectorConversions)
+    return false;
+  return VectorTypesMatch(*this, srcTy, destTy);
+}
+
 bool Sema::CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
                            CastKind &Kind) {
   assert(VectorTy->isVectorType() && "Not a vector type!");
 
   if (Ty->isVectorType() || Ty->isIntegerType()) {
-    if (Context.getTypeSize(VectorTy) != Context.getTypeSize(Ty))
+    if (!VectorTypesMatch(*this, Ty, VectorTy))
       return Diag(R.getBegin(),
                   Ty->isVectorType() ?
                   diag::err_invalid_conversion_between_vectors :
@@ -5051,7 +5121,7 @@
   // In OpenCL, casts between vectors of different types are not allowed.
   // (See OpenCL 6.2).
   if (SrcTy->isVectorType()) {
-    if (Context.getTypeSize(DestTy) != Context.getTypeSize(SrcTy)
+    if (!VectorTypesMatch(*this, SrcTy, DestTy)
         || (getLangOpts().OpenCL &&
             (DestTy.getCanonicalType() != SrcTy.getCanonicalType()))) {
       Diag(R.getBegin(),diag::err_invalid_conversion_between_ext_vectors)
@@ -5137,6 +5207,10 @@
     CastExpr = Result.take();
   }
 
+  if (getLangOpts().CPlusPlus && !castType->isVoidType() &&
+      !getSourceManager().isInSystemMacro(LParenLoc))
+    Diag(LParenLoc, diag::warn_old_style_cast) << CastExpr->getSourceRange();
+
   return BuildCStyleCastExpr(LParenLoc, castTInfo, RParenLoc, CastExpr);
 }
 
@@ -5971,15 +6045,16 @@
   // get the "pointed to" type (ignoring qualifiers at the top level)
   const Type *lhptee, *rhptee;
   Qualifiers lhq, rhq;
-  llvm::tie(lhptee, lhq) = cast<PointerType>(LHSType)->getPointeeType().split();
-  llvm::tie(rhptee, rhq) = cast<PointerType>(RHSType)->getPointeeType().split();
+  std::tie(lhptee, lhq) =
+      cast<PointerType>(LHSType)->getPointeeType().split().asPair();
+  std::tie(rhptee, rhq) =
+      cast<PointerType>(RHSType)->getPointeeType().split().asPair();
 
   Sema::AssignConvertType ConvTy = Sema::Compatible;
 
   // C99 6.5.16.1p1: This following citation is common to constraints
   // 3 & 4 (below). ...and the type *pointed to* by the left has all the
   // qualifiers of the type *pointed to* by the right;
-  Qualifiers lq;
 
   // As a special case, 'non-__weak A *' -> 'non-__weak const *' is okay.
   if (lhq.getObjCLifetime() != rhq.getObjCLifetime() &&
@@ -6257,8 +6332,7 @@
       // If we are allowing lax vector conversions, and LHS and RHS are both
       // vectors, the total size only needs to be the same. This is a bitcast;
       // no bits are changed but the result type is different.
-      if (getLangOpts().LaxVectorConversions &&
-          (Context.getTypeSize(LHSType) == Context.getTypeSize(RHSType))) {
+      if (isLaxVectorConversion(RHSType, LHSType)) {
         Kind = CK_BitCast;
         return IncompatibleVectors;
       }
@@ -6480,9 +6554,7 @@
   RecordDecl *UD = UT->getDecl();
   FieldDecl *InitField = 0;
   // It's compatible if the expression matches any of the fields.
-  for (RecordDecl::field_iterator it = UD->field_begin(),
-         itend = UD->field_end();
-       it != itend; ++it) {
+  for (auto *it : UD->fields()) {
     if (it->getType()->isPointerType()) {
       // If the transparent union contains a pointer type, we allow:
       // 1) void pointer
@@ -6490,7 +6562,7 @@
       if (RHSType->isPointerType())
         if (RHSType->castAs<PointerType>()->getPointeeType()->isVoidType()) {
           RHS = ImpCastExprToType(RHS.take(), it->getType(), CK_BitCast);
-          InitField = *it;
+          InitField = it;
           break;
         }
 
@@ -6498,7 +6570,7 @@
                                            Expr::NPC_ValueDependentIsNull)) {
         RHS = ImpCastExprToType(RHS.take(), it->getType(),
                                 CK_NullToPointer);
-        InitField = *it;
+        InitField = it;
         break;
       }
     }
@@ -6507,7 +6579,7 @@
     if (CheckAssignmentConstraints(it->getType(), RHS, Kind)
           == Compatible) {
       RHS = ImpCastExprToType(RHS.take(), it->getType(), Kind);
-      InitField = *it;
+      InitField = it;
       break;
     }
   }
@@ -6603,6 +6675,14 @@
     if (getLangOpts().ObjCAutoRefCount)
       CheckObjCARCConversion(SourceRange(), Ty, E, CCK_ImplicitConversion,
                              DiagnoseCFAudited);
+    if (getLangOpts().ObjC1 &&
+        (CheckObjCBridgeRelatedConversions(E->getLocStart(),
+                                          LHSType, E->getType(), E) ||
+         ConversionToObjCStringLiteralCheck(LHSType, E))) {
+      RHS = Owned(E);
+      return Compatible;
+    }
+    
     RHS = ImpCastExprToType(E, Ty, Kind);
   }
   return result;
@@ -6616,6 +6696,53 @@
   return QualType();
 }
 
+/// Try to convert a value of non-vector type to a vector type by converting
+/// the type to the element type of the vector and then performing a splat.
+/// If the language is OpenCL, we only use conversions that promote scalar
+/// rank; for C, Obj-C, and C++ we allow any real scalar conversion except
+/// for float->int.
+///
+/// \param scalar - if non-null, actually perform the conversions
+/// \return true if the operation fails (but without diagnosing the failure)
+static bool tryVectorConvertAndSplat(Sema &S, ExprResult *scalar,
+                                     QualType scalarTy,
+                                     QualType vectorEltTy,
+                                     QualType vectorTy) {
+  // The conversion to apply to the scalar before splatting it,
+  // if necessary.
+  CastKind scalarCast = CK_Invalid;
+  
+  if (vectorEltTy->isIntegralType(S.Context)) {
+    if (!scalarTy->isIntegralType(S.Context))
+      return true;
+    if (S.getLangOpts().OpenCL &&
+        S.Context.getIntegerTypeOrder(vectorEltTy, scalarTy) < 0)
+      return true;
+    scalarCast = CK_IntegralCast;
+  } else if (vectorEltTy->isRealFloatingType()) {
+    if (scalarTy->isRealFloatingType()) {
+      if (S.getLangOpts().OpenCL &&
+          S.Context.getFloatingTypeOrder(vectorEltTy, scalarTy) < 0)
+        return true;
+      scalarCast = CK_FloatingCast;
+    }
+    else if (scalarTy->isIntegralType(S.Context))
+      scalarCast = CK_IntegralToFloating;
+    else
+      return true;
+  } else {
+    return true;
+  }
+
+  // Adjust scalar if desired.
+  if (scalar) {
+    if (scalarCast != CK_Invalid)
+      *scalar = S.ImpCastExprToType(scalar->take(), vectorEltTy, scalarCast);
+    *scalar = S.ImpCastExprToType(scalar->take(), vectorTy, CK_VectorSplat);
+  }
+  return false;
+}
+
 QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
                                    SourceLocation Loc, bool IsCompAssign) {
   if (!IsCompAssign) {
@@ -6629,19 +6756,21 @@
 
   // For conversion purposes, we ignore any qualifiers.
   // For example, "const float" and "float" are equivalent.
-  QualType LHSType =
-    Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType();
-  QualType RHSType =
-    Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType();
+  QualType LHSType = LHS.get()->getType().getUnqualifiedType();
+  QualType RHSType = RHS.get()->getType().getUnqualifiedType();
 
   // If the vector types are identical, return.
-  if (LHSType == RHSType)
+  if (Context.hasSameType(LHSType, RHSType))
     return LHSType;
 
-  // Handle the case of equivalent AltiVec and GCC vector types
-  if (LHSType->isVectorType() && RHSType->isVectorType() &&
+  const VectorType *LHSVecType = LHSType->getAs<VectorType>();
+  const VectorType *RHSVecType = RHSType->getAs<VectorType>();
+  assert(LHSVecType || RHSVecType);
+
+  // If we have compatible AltiVec and GCC vector types, use the AltiVec type.
+  if (LHSVecType && RHSVecType &&
       Context.areCompatibleVectorTypes(LHSType, RHSType)) {
-    if (LHSType->isExtVectorType()) {
+    if (isa<ExtVectorType>(LHSVecType)) {
       RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
       return LHSType;
     }
@@ -6651,62 +6780,43 @@
     return RHSType;
   }
 
-  if (getLangOpts().LaxVectorConversions &&
-      Context.getTypeSize(LHSType) == Context.getTypeSize(RHSType)) {
-    // If we are allowing lax vector conversions, and LHS and RHS are both
-    // vectors, the total size only needs to be the same. This is a
-    // bitcast; no bits are changed but the result type is different.
-    // FIXME: Should we really be allowing this?
-    RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
-    return LHSType;
+  // If there's an ext-vector type and a scalar, try to convert the scalar to
+  // the vector element type and splat.
+  if (!RHSVecType && isa<ExtVectorType>(LHSVecType)) {
+    if (!tryVectorConvertAndSplat(*this, &RHS, RHSType,
+                                  LHSVecType->getElementType(), LHSType))
+      return LHSType;
+  }
+  if (!LHSVecType && isa<ExtVectorType>(RHSVecType)) {
+    if (!tryVectorConvertAndSplat(*this, (IsCompAssign ? 0 : &LHS), LHSType,
+                                  RHSVecType->getElementType(), RHSType))
+      return RHSType;
   }
 
-  // Canonicalize the ExtVector to the LHS, remember if we swapped so we can
-  // swap back (so that we don't reverse the inputs to a subtract, for instance.
-  bool swapped = false;
-  if (RHSType->isExtVectorType() && !IsCompAssign) {
-    swapped = true;
-    std::swap(RHS, LHS);
-    std::swap(RHSType, LHSType);
+  // If we're allowing lax vector conversions, only the total (data) size
+  // needs to be the same.
+  // FIXME: Should we really be allowing this?
+  // FIXME: We really just pick the LHS type arbitrarily?
+  if (isLaxVectorConversion(RHSType, LHSType)) {
+    QualType resultType = LHSType;
+    RHS = ImpCastExprToType(RHS.take(), resultType, CK_BitCast);
+    return resultType;
   }
 
-  // Handle the case of an ext vector and scalar.
-  if (const ExtVectorType *LV = LHSType->getAs<ExtVectorType>()) {
-    QualType EltTy = LV->getElementType();
-    if (EltTy->isIntegralType(Context) && RHSType->isIntegralType(Context)) {
-      int order = Context.getIntegerTypeOrder(EltTy, RHSType);
-      if (order > 0)
-        RHS = ImpCastExprToType(RHS.take(), EltTy, CK_IntegralCast);
-      if (order >= 0) {
-        RHS = ImpCastExprToType(RHS.take(), LHSType, CK_VectorSplat);
-        if (swapped) std::swap(RHS, LHS);
-        return LHSType;
-      }
-    }
-    if (EltTy->isRealFloatingType() && RHSType->isScalarType()) {
-      if (RHSType->isRealFloatingType()) {
-        int order = Context.getFloatingTypeOrder(EltTy, RHSType);
-        if (order > 0)
-          RHS = ImpCastExprToType(RHS.take(), EltTy, CK_FloatingCast);
-        if (order >= 0) {
-          RHS = ImpCastExprToType(RHS.take(), LHSType, CK_VectorSplat);
-          if (swapped) std::swap(RHS, LHS);
-          return LHSType;
-        }
-      }
-      if (RHSType->isIntegralType(Context)) {
-        RHS = ImpCastExprToType(RHS.take(), EltTy, CK_IntegralToFloating);
-        RHS = ImpCastExprToType(RHS.take(), LHSType, CK_VectorSplat);
-        if (swapped) std::swap(RHS, LHS);
-        return LHSType;
-      }
-    }
+  // Okay, the expression is invalid.
+
+  // If there's a non-vector, non-real operand, diagnose that.
+  if ((!RHSVecType && !RHSType->isRealType()) ||
+      (!LHSVecType && !LHSType->isRealType())) {
+    Diag(Loc, diag::err_typecheck_vector_not_convertable_non_scalar)
+      << LHSType << RHSType
+      << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+    return QualType();
   }
 
-  // Vectors of different size or scalar and non-ext-vector are errors.
-  if (swapped) std::swap(RHS, LHS);
+  // Otherwise, use the generic diagnostic.
   Diag(Loc, diag::err_typecheck_vector_not_convertable)
-    << LHS.get()->getType() << RHS.get()->getType()
+    << LHSType << RHSType
     << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
   return QualType();
 }
@@ -7491,8 +7601,8 @@
   QualType T = Method->param_begin()[0]->getType();
   if (!T->isObjCObjectPointerType())
     return false;
-  
-  QualType R = Method->getResultType();
+
+  QualType R = Method->getReturnType();
   if (!R->isScalarType())
     return false;
 
@@ -7769,10 +7879,22 @@
       return ResultTy;
   }
 
-  bool LHSIsNull = LHS.get()->isNullPointerConstant(Context,
-                                              Expr::NPC_ValueDependentIsNull);
-  bool RHSIsNull = RHS.get()->isNullPointerConstant(Context,
-                                              Expr::NPC_ValueDependentIsNull);
+  const Expr::NullPointerConstantKind LHSNullKind =
+      LHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull);
+  const Expr::NullPointerConstantKind RHSNullKind =
+      RHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull);
+  bool LHSIsNull = LHSNullKind != Expr::NPCK_NotNull;
+  bool RHSIsNull = RHSNullKind != Expr::NPCK_NotNull;
+
+  if (!IsRelational && LHSIsNull != RHSIsNull) {
+    bool IsEquality = Opc == BO_EQ;
+    if (RHSIsNull)
+      DiagnoseAlwaysNonNullPointer(LHS.get(), RHSNullKind, IsEquality,
+                                   RHS.get()->getSourceRange());
+    else
+      DiagnoseAlwaysNonNullPointer(RHS.get(), LHSNullKind, IsEquality,
+                                   LHS.get()->getSourceRange());
+  }
 
   // All of the following pointer-related warnings are GCC extensions, except
   // when handling null pointer constants. 
@@ -7830,10 +7952,14 @@
       diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false);
     }
     if (LCanPointeeTy != RCanPointeeTy) {
+      unsigned AddrSpaceL = LCanPointeeTy.getAddressSpace();
+      unsigned AddrSpaceR = RCanPointeeTy.getAddressSpace();
+      CastKind Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion
+                                               : CK_BitCast;
       if (LHSIsNull && !RHSIsNull)
-        LHS = ImpCastExprToType(LHS.take(), RHSType, CK_BitCast);
+        LHS = ImpCastExprToType(LHS.take(), RHSType, Kind);
       else
-        RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
+        RHS = ImpCastExprToType(RHS.take(), LHSType, Kind);
     }
     return ResultTy;
   }
@@ -8730,6 +8856,12 @@
   // Make sure to ignore parentheses in subsequent checks
   Expr *op = OrigOp.get()->IgnoreParens();
 
+  // OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
+  if (LangOpts.OpenCL && op->getType()->isFunctionType()) {
+    Diag(op->getExprLoc(), diag::err_opencl_taking_function_address);
+    return QualType();
+  }
+
   if (getLangOpts().C99) {
     // Implement C99-only parts of addressof rules.
     if (UnaryOperator* uOp = dyn_cast<UnaryOperator>(op)) {
@@ -8793,8 +8925,11 @@
     if (isa<CXXDestructorDecl>(MD))
       Diag(OpLoc, diag::err_typecheck_addrof_dtor) << op->getSourceRange();
 
-    return Context.getMemberPointerType(op->getType(),
-              Context.getTypeDeclType(MD->getParent()).getTypePtr());
+    QualType MPTy = Context.getMemberPointerType(
+        op->getType(), Context.getTypeDeclType(MD->getParent()).getTypePtr());
+    if (Context.getTargetInfo().getCXXABI().isMicrosoft())
+      RequireCompleteType(OpLoc, MPTy, 0);
+    return MPTy;
   } else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) {
     // C99 6.5.3.2p1
     // The operand must be either an l-value or a function designator
@@ -8842,8 +8977,13 @@
 
           while (cast<RecordDecl>(Ctx)->isAnonymousStructOrUnion())
             Ctx = Ctx->getParent();
-          return Context.getMemberPointerType(op->getType(),
-                Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr());
+
+          QualType MPTy = Context.getMemberPointerType(
+              op->getType(),
+              Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr());
+          if (Context.getTargetInfo().getCXXABI().isMicrosoft())
+            RequireCompleteType(OpLoc, MPTy, 0);
+          return MPTy;
         }
       }
     } else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl))
@@ -9641,7 +9781,7 @@
 
     if (resultType->isDependentType())
       break;
-    if (resultType->isScalarType()) {
+    if (resultType->isScalarType() && !isScopedEnumerationType(resultType)) {
       // C99 6.5.3.3p1: ok, fallthrough;
       if (Context.getLangOpts().CPlusPlus) {
         // C++03 [expr.unary.op]p8, C++0x [expr.unary.op]p9:
@@ -9872,7 +10012,7 @@
   // example, it is not possible to goto into a stmt expression apparently.
   // More semantic analysis is needed.
 
-  // If there are sub stmts in the compound stmt, take the type of the last one
+  // If there are sub-stmts in the compound stmt, take the type of the last one
   // as the type of the stmtexpr.
   QualType Ty = Context.VoidTy;
   bool StmtExprMayBindToTemp = false;
@@ -10092,12 +10232,10 @@
     }
 
     if (IndirectMemberDecl) {
-      for (IndirectFieldDecl::chain_iterator FI =
-           IndirectMemberDecl->chain_begin(),
-           FEnd = IndirectMemberDecl->chain_end(); FI != FEnd; FI++) {
-        assert(isa<FieldDecl>(*FI));
+      for (auto *FI : IndirectMemberDecl->chain()) {
+        assert(isa<FieldDecl>(FI));
         Comps.push_back(OffsetOfNode(OC.LocStart,
-                                     cast<FieldDecl>(*FI), OC.LocEnd));
+                                     cast<FieldDecl>(FI), OC.LocEnd));
       }
     } else
       Comps.push_back(OffsetOfNode(OC.LocStart, MemberDecl, OC.LocEnd));
@@ -10241,7 +10379,7 @@
         ExplicitSignature.getLocalRangeEnd()) {
       // This would be much cheaper if we stored TypeLocs instead of
       // TypeSourceInfos.
-      TypeLoc Result = ExplicitSignature.getResultLoc();
+      TypeLoc Result = ExplicitSignature.getReturnLoc();
       unsigned Size = Result.getFullDataSize();
       Sig = Context.CreateTypeSourceInfo(Result.getType(), Size);
       Sig->getTypeLoc().initializeFullCopy(Result, Size);
@@ -10254,7 +10392,7 @@
   CurBlock->FunctionType = T;
 
   const FunctionType *Fn = T->getAs<FunctionType>();
-  QualType RetTy = Fn->getResultType();
+  QualType RetTy = Fn->getReturnType();
   bool isVariadic =
     (isa<FunctionProtoType>(Fn) && cast<FunctionProtoType>(Fn)->isVariadic());
 
@@ -10273,8 +10411,8 @@
   // Push block parameters from the declarator if we had them.
   SmallVector<ParmVarDecl*, 8> Params;
   if (ExplicitSignature) {
-    for (unsigned I = 0, E = ExplicitSignature.getNumArgs(); I != E; ++I) {
-      ParmVarDecl *Param = ExplicitSignature.getArg(I);
+    for (unsigned I = 0, E = ExplicitSignature.getNumParams(); I != E; ++I) {
+      ParmVarDecl *Param = ExplicitSignature.getParam(I);
       if (Param->getIdentifier() == 0 &&
           !Param->isImplicit() &&
           !Param->isInvalidDecl() &&
@@ -10286,12 +10424,9 @@
   // Fake up parameter variables if we have a typedef, like
   //   ^ fntype { ... }
   } else if (const FunctionProtoType *Fn = T->getAs<FunctionProtoType>()) {
-    for (FunctionProtoType::arg_type_iterator
-           I = Fn->arg_type_begin(), E = Fn->arg_type_end(); I != E; ++I) {
-      ParmVarDecl *Param =
-        BuildParmVarDeclForTypedef(CurBlock->TheDecl,
-                                   ParamInfo.getLocStart(),
-                                   *I);
+    for (const auto &I : Fn->param_types()) {
+      ParmVarDecl *Param = BuildParmVarDeclForTypedef(
+          CurBlock->TheDecl, ParamInfo.getLocStart(), I);
       Params.push_back(Param);
     }
   }
@@ -10308,15 +10443,14 @@
   ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo);
 
   // Put the parameter variables in scope.
-  for (BlockDecl::param_iterator AI = CurBlock->TheDecl->param_begin(),
-         E = CurBlock->TheDecl->param_end(); AI != E; ++AI) {
-    (*AI)->setOwningFunction(CurBlock->TheDecl);
+  for (auto AI : CurBlock->TheDecl->params()) {
+    AI->setOwningFunction(CurBlock->TheDecl);
 
     // If this has an identifier, add it to the scope stack.
-    if ((*AI)->getIdentifier()) {
-      CheckShadow(CurBlock->TheScope, *AI);
+    if (AI->getIdentifier()) {
+      CheckShadow(CurBlock->TheScope, AI);
 
-      PushOnScopeChains(*AI, CurBlock->TheScope);
+      PushOnScopeChains(AI, CurBlock->TheScope);
     }
   }
 }
@@ -10358,7 +10492,7 @@
   if (!BSI->ReturnType.isNull())
     RetTy = BSI->ReturnType;
 
-  bool NoReturn = BSI->TheDecl->getAttr<NoReturnAttr>();
+  bool NoReturn = BSI->TheDecl->hasAttr<NoReturnAttr>();
   QualType BlockTy;
 
   // Set the captured variables on the block.
@@ -10390,7 +10524,7 @@
 
     // Otherwise, if we don't need to change anything about the function type,
     // preserve its sugar structure.
-    } else if (FTy->getResultType() == RetTy &&
+    } else if (FTy->getReturnType() == RetTy &&
                (!NoReturn || FTy->getNoReturnAttr())) {
       BlockTy = BSI->FunctionType;
 
@@ -10400,7 +10534,7 @@
       FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
       EPI.TypeQuals = 0; // FIXME: silently?
       EPI.ExtInfo = Ext;
-      BlockTy = Context.getFunctionType(RetTy, FPT->getArgTypes(), EPI);
+      BlockTy = Context.getFunctionType(RetTy, FPT->getParamTypes(), EPI);
     }
 
   // If we don't have a function type, just build one from nothing.
@@ -10442,10 +10576,8 @@
 
     // It also gets a branch-protected scope if any of the captured
     // variables needs destruction.
-    for (BlockDecl::capture_const_iterator
-           ci = Result->getBlockDecl()->capture_begin(),
-           ce = Result->getBlockDecl()->capture_end(); ci != ce; ++ci) {
-      const VarDecl *var = ci->getVariable();
+    for (const auto &CI : Result->getBlockDecl()->captures()) {
+      const VarDecl *var = CI.getVariable();
       if (var->getType().isDestructedType() != QualType::DK_none) {
         getCurFunction()->setHasBranchProtectedScope();
         break;
@@ -10567,38 +10699,37 @@
   return Owned(new (Context) GNUNullExpr(Ty, TokenLoc));
 }
 
-static void MakeObjCStringLiteralFixItHint(Sema& SemaRef, QualType DstType,
-                                           Expr *SrcExpr, FixItHint &Hint,
-                                           bool &IsNSString) {
-  if (!SemaRef.getLangOpts().ObjC1)
-    return;
+bool
+Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp) {
+  if (!getLangOpts().ObjC1)
+    return false;
 
   const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
   if (!PT)
-    return;
+    return false;
 
-  // Check if the destination is of type 'id'.
   if (!PT->isObjCIdType()) {
     // Check if the destination is the 'NSString' interface.
     const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
     if (!ID || !ID->getIdentifier()->isStr("NSString"))
-      return;
-    IsNSString = true;
+      return false;
   }
-
+  
   // Ignore any parens, implicit casts (should only be
   // array-to-pointer decays), and not-so-opaque values.  The last is
   // important for making this trigger for property assignments.
-  SrcExpr = SrcExpr->IgnoreParenImpCasts();
+  Expr *SrcExpr = Exp->IgnoreParenImpCasts();
   if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(SrcExpr))
     if (OV->getSourceExpr())
       SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts();
 
   StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr);
   if (!SL || !SL->isAscii())
-    return;
-
-  Hint = FixItHint::CreateInsertion(SL->getLocStart(), "@");
+    return false;
+  Diag(SL->getLocStart(), diag::err_missing_atsign_prefix)
+    << FixItHint::CreateInsertion(SL->getLocStart(), "@");
+  Exp = BuildObjCStringLiteral(SL->getLocStart(), SL).take();
+  return true;
 }
 
 bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
@@ -10617,7 +10748,6 @@
   ConversionFixItGenerator ConvHints;
   bool MayHaveConvFixit = false;
   bool MayHaveFunctionDiff = false;
-  bool IsNSString = false;
 
   switch (ConvTy) {
   case Compatible:
@@ -10635,7 +10765,6 @@
     MayHaveConvFixit = true;
     break;
   case IncompatiblePointer:
-    MakeObjCStringLiteralFixItHint(*this, DstType, SrcExpr, Hint, IsNSString);
       DiagKind =
         (Action == AA_Passing_CFAudited ?
           diag::err_arc_typecheck_convert_incompatible_pointer :
@@ -10649,8 +10778,6 @@
       SrcType = SrcType.getUnqualifiedType();
       DstType = DstType.getUnqualifiedType();
     }
-    else if (IsNSString && !Hint.isNull())
-      DiagKind = diag::warn_missing_atsign_prefix;
     MayHaveConvFixit = true;
     break;
   case IncompatiblePointerSign:
@@ -10784,7 +10911,7 @@
                                                  llvm::APSInt *Result) {
   class SimpleICEDiagnoser : public VerifyICEDiagnoser {
   public:
-    virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+    void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
       S.Diag(Loc, diag::err_expr_not_ice) << S.LangOpts.CPlusPlus << SR;
     }
   } Diagnoser;
@@ -10803,7 +10930,7 @@
     IDDiagnoser(unsigned DiagID)
       : VerifyICEDiagnoser(DiagID == 0), DiagID(DiagID) { }
     
-    virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+    void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
       S.Diag(Loc, DiagID) << SR;
     }
   } Diagnoser(DiagID);
@@ -10835,40 +10962,40 @@
           : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false,
                                 Silent, true) {}
 
-      virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
-                                                   QualType T) {
+      SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+                                           QualType T) override {
         return S.Diag(Loc, diag::err_ice_not_integral) << T;
       }
 
-      virtual SemaDiagnosticBuilder diagnoseIncomplete(
-          Sema &S, SourceLocation Loc, QualType T) {
+      SemaDiagnosticBuilder diagnoseIncomplete(
+          Sema &S, SourceLocation Loc, QualType T) override {
         return S.Diag(Loc, diag::err_ice_incomplete_type) << T;
       }
 
-      virtual SemaDiagnosticBuilder diagnoseExplicitConv(
-          Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) {
+      SemaDiagnosticBuilder diagnoseExplicitConv(
+          Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
         return S.Diag(Loc, diag::err_ice_explicit_conversion) << T << ConvTy;
       }
 
-      virtual SemaDiagnosticBuilder noteExplicitConv(
-          Sema &S, CXXConversionDecl *Conv, QualType ConvTy) {
+      SemaDiagnosticBuilder noteExplicitConv(
+          Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
         return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here)
                  << ConvTy->isEnumeralType() << ConvTy;
       }
 
-      virtual SemaDiagnosticBuilder diagnoseAmbiguous(
-          Sema &S, SourceLocation Loc, QualType T) {
+      SemaDiagnosticBuilder diagnoseAmbiguous(
+          Sema &S, SourceLocation Loc, QualType T) override {
         return S.Diag(Loc, diag::err_ice_ambiguous_conversion) << T;
       }
 
-      virtual SemaDiagnosticBuilder noteAmbiguous(
-          Sema &S, CXXConversionDecl *Conv, QualType ConvTy) {
+      SemaDiagnosticBuilder noteAmbiguous(
+          Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
         return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here)
                  << ConvTy->isEnumeralType() << ConvTy;
       }
 
-      virtual SemaDiagnosticBuilder diagnoseConversion(
-          Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) {
+      SemaDiagnosticBuilder diagnoseConversion(
+          Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
         llvm_unreachable("conversion functions are permitted");
       }
     } ConvertDiagnoser(Diagnoser.Suppress);
@@ -11174,37 +11301,34 @@
 
   // Note that this declaration has been used.
   if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
+    Constructor = cast<CXXConstructorDecl>(Constructor->getFirstDecl());
     if (Constructor->isDefaulted() && !Constructor->isDeleted()) {
       if (Constructor->isDefaultConstructor()) {
         if (Constructor->isTrivial())
           return;
-        if (!Constructor->isUsed(false))
-          DefineImplicitDefaultConstructor(Loc, Constructor);
+        DefineImplicitDefaultConstructor(Loc, Constructor);
       } else if (Constructor->isCopyConstructor()) {
-        if (!Constructor->isUsed(false))
-          DefineImplicitCopyConstructor(Loc, Constructor);
+        DefineImplicitCopyConstructor(Loc, Constructor);
       } else if (Constructor->isMoveConstructor()) {
-        if (!Constructor->isUsed(false))
-          DefineImplicitMoveConstructor(Loc, Constructor);
+        DefineImplicitMoveConstructor(Loc, Constructor);
       }
     } else if (Constructor->getInheritedConstructor()) {
-      if (!Constructor->isUsed(false))
-        DefineInheritingConstructor(Loc, Constructor);
+      DefineInheritingConstructor(Loc, Constructor);
     }
 
     MarkVTableUsed(Loc, Constructor->getParent());
   } else if (CXXDestructorDecl *Destructor =
                  dyn_cast<CXXDestructorDecl>(Func)) {
-    if (Destructor->isDefaulted() && !Destructor->isDeleted() &&
-        !Destructor->isUsed(false))
+    Destructor = cast<CXXDestructorDecl>(Destructor->getFirstDecl());
+    if (Destructor->isDefaulted() && !Destructor->isDeleted())
       DefineImplicitDestructor(Loc, Destructor);
     if (Destructor->isVirtual())
       MarkVTableUsed(Loc, Destructor->getParent());
   } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(Func)) {
-    if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted() &&
-        MethodDecl->isOverloadedOperator() &&
+    if (MethodDecl->isOverloadedOperator() &&
         MethodDecl->getOverloadedOperator() == OO_Equal) {
-      if (!MethodDecl->isUsed(false)) {
+      MethodDecl = cast<CXXMethodDecl>(MethodDecl->getFirstDecl());
+      if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted()) {
         if (MethodDecl->isCopyAssignmentOperator())
           DefineImplicitCopyAssignment(Loc, MethodDecl);
         else
@@ -11212,7 +11336,8 @@
       }
     } else if (isa<CXXConversionDecl>(MethodDecl) &&
                MethodDecl->getParent()->isLambda()) {
-      CXXConversionDecl *Conversion = cast<CXXConversionDecl>(MethodDecl);
+      CXXConversionDecl *Conversion =
+          cast<CXXConversionDecl>(MethodDecl->getFirstDecl());
       if (Conversion->isLambdaToBlockPointerConversion())
         DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion);
       else
@@ -11276,10 +11401,9 @@
     }
   } else {
     // Walk redefinitions, as some of them may be instantiable.
-    for (FunctionDecl::redecl_iterator i(Func->redecls_begin()),
-         e(Func->redecls_end()); i != e; ++i) {
+    for (auto i : Func->redecls()) {
       if (!i->isUsed(false) && i->isImplicitlyInstantiable())
-        MarkFunctionReferenced(Loc, *i);
+        MarkFunctionReferenced(Loc, i);
     }
   }
 
@@ -11778,9 +11902,18 @@
       return false;
     }
 
-    if (S.RequireNonAbstractType(Loc, CaptureType,
-                                 diag::err_capture_of_abstract_type))
-      return false;
+    // Make sure that by-copy captures are of a complete and non-abstract type.
+    if (BuildAndDiagnose) {
+      if (!CaptureType->isDependentType() &&
+          S.RequireCompleteType(Loc, CaptureType,
+                                diag::err_capture_of_incomplete_type,
+                                Var->getDeclName()))
+        return false;
+
+      if (S.RequireNonAbstractType(Loc, CaptureType,
+                                   diag::err_capture_of_abstract_type))
+        return false;
+    }
   }
 
   // Capture this variable in the lambda.
@@ -12069,42 +12202,39 @@
          "Invalid Expr argument to DoMarkVarDeclReferenced");
   Var->setReferenced();
 
-  // If the context is not PotentiallyEvaluated and not Unevaluated 
-  // (i.e PotentiallyEvaluatedIfUsed) do not bother to consider variables 
-  // in this context for odr-use unless we are within a lambda.  
-  // If we don't know whether the context is potentially evaluated or not 
-  // (for e.g., if we're in a generic lambda), we want to add a potential 
-  // capture and eventually analyze for odr-use.
-  // We should also be able to analyze certain constructs in a non-generic 
-  // lambda setting for potential odr-use and capture violation:
-  // template<class T> void foo(T t) {
-  //    auto L = [](int i) { return t; };
-  // }
-  // 
+  // If the context is not potentially evaluated, this is not an odr-use and
+  // does not trigger instantiation.
   if (!IsPotentiallyEvaluatedContext(SemaRef)) {
+    if (SemaRef.isUnevaluatedContext())
+      return;
 
-    if (SemaRef.isUnevaluatedContext()) return;
+    // If we don't yet know whether this context is going to end up being an
+    // evaluated context, and we're referencing a variable from an enclosing
+    // scope, add a potential capture.
+    //
+    // FIXME: Is this necessary? These contexts are only used for default
+    // arguments, where local variables can't be used.
+    const bool RefersToEnclosingScope =
+        (SemaRef.CurContext != Var->getDeclContext() &&
+         Var->getDeclContext()->isFunctionOrMethod() &&
+         Var->hasLocalStorage());
+    if (!RefersToEnclosingScope)
+      return;
 
-    const bool refersToEnclosingScope =
-      (SemaRef.CurContext != Var->getDeclContext() &&
-           Var->getDeclContext()->isFunctionOrMethod());
-    if (!refersToEnclosingScope) return;
-    
     if (LambdaScopeInfo *const LSI = SemaRef.getCurLambda()) {
       // If a variable could potentially be odr-used, defer marking it so
-      // until we finish analyzing the full expression for any lvalue-to-rvalue 
+      // until we finish analyzing the full expression for any lvalue-to-rvalue
       // or discarded value conversions that would obviate odr-use.
       // Add it to the list of potential captures that will be analyzed
       // later (ActOnFinishFullExpr) for eventual capture and odr-use marking
       // unless the variable is a reference that was initialized by a constant
       // expression (this will never need to be captured or odr-used).
-      const bool IsConstantExpr = IsVariableNonDependentAndAConstantExpression(
-          Var, SemaRef.Context);
       assert(E && "Capture variable should be used in an expression.");
-      if (!IsConstantExpr || !Var->getType()->isReferenceType())
-        LSI->addPotentialCapture(E->IgnoreParens());      
-    } 
-    return;  
+      if (!Var->getType()->isReferenceType() ||
+          !IsVariableNonDependentAndAConstantExpression(Var, SemaRef.Context))
+        LSI->addPotentialCapture(E->IgnoreParens());
+    }
+    return;
   }
 
   VarTemplateSpecializationDecl *VarSpec =
@@ -12112,10 +12242,10 @@
   assert(!isa<VarTemplatePartialSpecializationDecl>(Var) &&
          "Can't instantiate a partial template specialization.");
 
-  // Implicit instantiation of static data members, static data member
-  // templates of class templates, and variable template specializations.
-  // Delay instantiations of variable templates, except for those
-  // that could be used in a constant expression.
+  // Perform implicit instantiation of static data members, static data member
+  // templates of class templates, and variable template specializations. Delay
+  // instantiations of variable templates, except for those that could be used
+  // in a constant expression.
   TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
   if (isTemplateInstantiation(TSK)) {
     bool TryInstantiating = TSK == TSK_ImplicitInstantiation;
@@ -12155,6 +12285,7 @@
       }
     }
   }
+
   // Per C++11 [basic.def.odr], a variable is odr-used "unless it satisfies
   // the requirements for appearing in a constant expression (5.19) and, if
   // it is an object, the lvalue-to-rvalue conversion (4.1)
@@ -12164,14 +12295,11 @@
   // C++03 depends on whether we get the C++03 version correct. The second
   // part does not apply to references, since they are not objects.
   if (E && IsVariableAConstantExpression(Var, SemaRef.Context)) {
-    // A reference initialized by a constant expression can never be 
+    // A reference initialized by a constant expression can never be
     // odr-used, so simply ignore it.
-    // But a non-reference might get odr-used if it doesn't undergo
-    // an lvalue-to-rvalue or is discarded, so track it.
     if (!Var->getType()->isReferenceType())
       SemaRef.MaybeODRUseExprs.insert(E);
-  }
-  else
+  } else
     MarkVarDeclODRUsed(Var, Loc, SemaRef, /*MaxFunctionScopeIndex ptr*/0);
 }
 
@@ -12447,8 +12575,8 @@
   public:
     CallReturnIncompleteDiagnoser(FunctionDecl *FD, CallExpr *CE)
       : FD(FD), CE(CE) { }
-    
-    virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+
+    void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
       if (!FD) {
         S.Diag(Loc, diag::err_call_incomplete_return)
           << T << CE->getSourceRange();
@@ -12825,7 +12953,7 @@
     // This is a hack, but it is far superior to moving the
     // corresponding target-specific code from IR-gen to Sema/AST.
 
-    ArrayRef<QualType> ParamTypes = Proto->getArgTypes();
+    ArrayRef<QualType> ParamTypes = Proto->getParamTypes();
     SmallVector<QualType, 8> ArgTypes;
     if (ParamTypes.empty() && Proto->isVariadic()) { // the special case
       ArgTypes.reserve(E->getNumArgs());
@@ -12882,8 +13010,8 @@
 
   // Rewrite the method result type if available.
   if (ObjCMethodDecl *Method = E->getMethodDecl()) {
-    assert(Method->getResultType() == S.Context.UnknownAnyTy);
-    Method->setResultType(DestType);
+    assert(Method->getReturnType() == S.Context.UnknownAnyTy);
+    Method->setReturnType(DestType);
   }
 
   // Change the type of the message.
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 07e4657..8b9c0e2 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -117,7 +117,7 @@
     SearchType = GetTypeFromParser(ObjectTypePtr);
 
   if (SS.isSet()) {
-    NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
+    NestedNameSpecifier *NNS = SS.getScopeRep();
 
     bool AlreadySearched = false;
     bool LookAtPrefix = true;
@@ -209,7 +209,8 @@
           Context.hasSameUnqualifiedType(T, SearchType)) {
         // We found our type!
 
-        return ParsedType::make(T);
+        return CreateParsedType(T,
+                                Context.getTrivialTypeSourceInfo(T, NameLoc));
       }
 
       if (!SearchType.isNull())
@@ -245,7 +246,9 @@
               = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
           if (Spec->getSpecializedTemplate()->getCanonicalDecl() ==
                 Template->getCanonicalDecl())
-            return ParsedType::make(MemberOfType);
+            return CreateParsedType(
+                MemberOfType,
+                Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
         }
 
         continue;
@@ -264,7 +267,9 @@
         // specialized.
         if (TemplateDecl *SpecTemplate = SpecName.getAsTemplateDecl()) {
           if (SpecTemplate->getCanonicalDecl() == Template->getCanonicalDecl())
-            return ParsedType::make(MemberOfType);
+            return CreateParsedType(
+                MemberOfType,
+                Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
 
           continue;
         }
@@ -275,7 +280,9 @@
                                     = SpecName.getAsDependentTemplateName()) {
           if (DepTemplate->isIdentifier() &&
               DepTemplate->getIdentifier() == Template->getIdentifier())
-            return ParsedType::make(MemberOfType);
+            return CreateParsedType(
+                MemberOfType,
+                Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
 
           continue;
         }
@@ -333,6 +340,34 @@
     return ParsedType();
 }
 
+bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
+                                  const UnqualifiedId &Name) {
+  assert(Name.getKind() == UnqualifiedId::IK_LiteralOperatorId);
+
+  if (!SS.isValid())
+    return false;
+
+  switch (SS.getScopeRep()->getKind()) {
+  case NestedNameSpecifier::Identifier:
+  case NestedNameSpecifier::TypeSpec:
+  case NestedNameSpecifier::TypeSpecWithTemplate:
+    // Per C++11 [over.literal]p2, literal operators can only be declared at
+    // namespace scope. Therefore, this unqualified-id cannot name anything.
+    // Reject it early, because we have no AST representation for this in the
+    // case where the scope is dependent.
+    Diag(Name.getLocStart(), diag::err_literal_operator_id_outside_namespace)
+      << SS.getScopeRep();
+    return true;
+
+  case NestedNameSpecifier::Global:
+  case NestedNameSpecifier::Namespace:
+  case NestedNameSpecifier::NamespaceAlias:
+    return false;
+  }
+
+  llvm_unreachable("unknown nested name specifier kind");
+}
+
 /// \brief Build a C++ typeid expression with a type operand.
 ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
                                 SourceLocation TypeidLoc,
@@ -426,7 +461,7 @@
     CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
     // Microsoft's typeinfo doesn't have type_info in std but in the global
     // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
-    if (!CXXTypeInfoDecl && LangOpts.MicrosoftMode) {
+    if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
       LookupQualifiedName(R, Context.getTranslationUnitDecl());
       CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
     }
@@ -1153,14 +1188,15 @@
   else if (Initializer && isa<ImplicitValueInitExpr>(Initializer))
     HaveCompleteInit = true;
 
-  // C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+  // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
   if (TypeMayContainAuto && AllocType->isUndeducedType()) {
     if (initStyle == CXXNewExpr::NoInit || NumInits == 0)
       return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
                        << AllocType << TypeRange);
-    if (initStyle == CXXNewExpr::ListInit)
+    if (initStyle == CXXNewExpr::ListInit ||
+        (NumInits == 1 && isa<InitListExpr>(Inits[0])))
       return ExprError(Diag(Inits[0]->getLocStart(),
-                            diag::err_auto_new_requires_parens)
+                            diag::err_auto_new_list_init)
                        << AllocType << TypeRange);
     if (NumInits > 1) {
       Expr *FirstBad = Inits[1];
@@ -1225,9 +1261,8 @@
   if (ArraySize && !ArraySize->isTypeDependent()) {
     ExprResult ConvertedSize;
     if (getLangOpts().CPlusPlus1y) {
-      unsigned IntWidth = Context.getTargetInfo().getIntWidth();
-      assert(IntWidth && "Builtin type of size 0?");
-      llvm::APSInt Value(IntWidth);
+      assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
+
       ConvertedSize = PerformImplicitConversion(ArraySize, Context.getSizeType(),
 						AA_Converting);
 
@@ -1245,43 +1280,43 @@
         SizeConvertDiagnoser(Expr *ArraySize)
             : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
               ArraySize(ArraySize) {}
-  
-        virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
-                                                     QualType T) {
+
+        SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+                                             QualType T) override {
           return S.Diag(Loc, diag::err_array_size_not_integral)
                    << S.getLangOpts().CPlusPlus11 << T;
         }
-  
-        virtual SemaDiagnosticBuilder diagnoseIncomplete(
-            Sema &S, SourceLocation Loc, QualType T) {
+
+        SemaDiagnosticBuilder diagnoseIncomplete(
+            Sema &S, SourceLocation Loc, QualType T) override {
           return S.Diag(Loc, diag::err_array_size_incomplete_type)
                    << T << ArraySize->getSourceRange();
         }
-  
-        virtual SemaDiagnosticBuilder diagnoseExplicitConv(
-            Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) {
+
+        SemaDiagnosticBuilder diagnoseExplicitConv(
+            Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
           return S.Diag(Loc, diag::err_array_size_explicit_conversion) << T << ConvTy;
         }
-  
-        virtual SemaDiagnosticBuilder noteExplicitConv(
-            Sema &S, CXXConversionDecl *Conv, QualType ConvTy) {
+
+        SemaDiagnosticBuilder noteExplicitConv(
+            Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
           return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
                    << ConvTy->isEnumeralType() << ConvTy;
         }
-  
-        virtual SemaDiagnosticBuilder diagnoseAmbiguous(
-            Sema &S, SourceLocation Loc, QualType T) {
+
+        SemaDiagnosticBuilder diagnoseAmbiguous(
+            Sema &S, SourceLocation Loc, QualType T) override {
           return S.Diag(Loc, diag::err_array_size_ambiguous_conversion) << T;
         }
-  
-        virtual SemaDiagnosticBuilder noteAmbiguous(
-            Sema &S, CXXConversionDecl *Conv, QualType ConvTy) {
+
+        SemaDiagnosticBuilder noteAmbiguous(
+            Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
           return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
                    << ConvTy->isEnumeralType() << ConvTy;
         }
 
         virtual SemaDiagnosticBuilder diagnoseConversion(
-            Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) {
+            Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
           return S.Diag(Loc,
                         S.getLangOpts().CPlusPlus11
                           ? diag::warn_cxx98_compat_array_size_conversion
@@ -1623,7 +1658,7 @@
     // Didn't find a member overload. Look for a global one.
     DeclareGlobalNewDelete();
     DeclContext *TUDecl = Context.getTranslationUnitDecl();
-    bool FallbackEnabled = IsArray && Context.getLangOpts().MicrosoftMode;
+    bool FallbackEnabled = IsArray && Context.getLangOpts().MSVCCompat;
     if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl,
                                /*AllowMissing=*/FallbackEnabled, OperatorNew,
                                /*Diagnose=*/!FallbackEnabled)) {
@@ -1708,8 +1743,8 @@
 
       SmallVector<QualType, 4> ArgTypes;
       ArgTypes.push_back(Context.VoidPtrTy);
-      for (unsigned I = 1, N = Proto->getNumArgs(); I < N; ++I)
-        ArgTypes.push_back(Proto->getArgType(I));
+      for (unsigned I = 1, N = Proto->getNumParams(); I < N; ++I)
+        ArgTypes.push_back(Proto->getParamType(I));
 
       FunctionProtoType::ExtProtoInfo EPI;
       EPI.Variadic = Proto->isVariadic();
@@ -1763,7 +1798,7 @@
       else
         Matches.erase(Matches.begin() + 1);
       assert(Matches[0].second->getNumParams() == 2 &&
-             "found an unexpected uusal deallocation function");
+             "found an unexpected usual deallocation function");
     }
   }
 
@@ -2031,8 +2066,7 @@
         if (InitialParam1Type == Param1 &&
             (NumParams == 1 || InitialParam2Type == Param2)) {
           if (AddMallocAttr && !Func->hasAttr<MallocAttr>())
-            Func->addAttr(::new (Context) MallocAttr(SourceLocation(),
-                                                     Context));
+            Func->addAttr(MallocAttr::CreateImplicit(Context));
           // Make the function visible to name lookup, even if we found it in
           // an unimported module. It either is an implicitly-declared global
           // allocation function, or is suppressing that function.
@@ -2043,18 +2077,16 @@
     }
   }
 
+  FunctionProtoType::ExtProtoInfo EPI;
+
   QualType BadAllocType;
   bool HasBadAllocExceptionSpec
     = (Name.getCXXOverloadedOperator() == OO_New ||
        Name.getCXXOverloadedOperator() == OO_Array_New);
-  if (HasBadAllocExceptionSpec && !getLangOpts().CPlusPlus11) {
-    assert(StdBadAlloc && "Must have std::bad_alloc declared");
-    BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
-  }
-
-  FunctionProtoType::ExtProtoInfo EPI;
   if (HasBadAllocExceptionSpec) {
     if (!getLangOpts().CPlusPlus11) {
+      BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
+      assert(StdBadAlloc && "Must have std::bad_alloc declared");
       EPI.ExceptionSpecType = EST_Dynamic;
       EPI.NumExceptions = 1;
       EPI.Exceptions = &BadAllocType;
@@ -2075,14 +2107,16 @@
   Alloc->setImplicit();
 
   if (AddMallocAttr)
-    Alloc->addAttr(::new (Context) MallocAttr(SourceLocation(), Context));
+    Alloc->addAttr(MallocAttr::CreateImplicit(Context));
 
   ParmVarDecl *ParamDecls[2];
-  for (unsigned I = 0; I != NumParams; ++I)
+  for (unsigned I = 0; I != NumParams; ++I) {
     ParamDecls[I] = ParmVarDecl::Create(Context, Alloc, SourceLocation(),
                                         SourceLocation(), 0,
                                         Params[I], /*TInfo=*/0,
                                         SC_None, 0);
+    ParamDecls[I]->setImplicit();
+  }
   Alloc->setParams(ArrayRef<ParmVarDecl*>(ParamDecls, NumParams));
 
   // FIXME: Also add this declaration to the IdentifierResolver, but
@@ -2125,7 +2159,7 @@
     else
       Matches.erase(Matches.begin() + 1);
     assert(Matches[0]->getNumParams() == NumArgs &&
-           "found an unexpected uusal deallocation function");
+           "found an unexpected usual deallocation function");
   }
 
   assert(Matches.size() == 1 &&
@@ -2241,7 +2275,7 @@
     public:
       DeleteConverter() : ContextualImplicitConverter(false, true) {}
 
-      bool match(QualType ConvType) {
+      bool match(QualType ConvType) override {
         // FIXME: If we have an operator T* and an operator void*, we must pick
         // the operator T*.
         if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
@@ -2251,39 +2285,41 @@
       }
 
       SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc,
-                                            QualType T) {
+                                            QualType T) override {
         return S.Diag(Loc, diag::err_delete_operand) << T;
       }
 
       SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
-                                               QualType T) {
+                                               QualType T) override {
         return S.Diag(Loc, diag::err_delete_incomplete_class_type) << T;
       }
 
       SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
-                                                 QualType T, QualType ConvTy) {
+                                                 QualType T,
+                                                 QualType ConvTy) override {
         return S.Diag(Loc, diag::err_delete_explicit_conversion) << T << ConvTy;
       }
 
       SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
-                                             QualType ConvTy) {
+                                             QualType ConvTy) override {
         return S.Diag(Conv->getLocation(), diag::note_delete_conversion)
           << ConvTy;
       }
 
       SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
-                                              QualType T) {
+                                              QualType T) override {
         return S.Diag(Loc, diag::err_ambiguous_delete_operand) << T;
       }
 
       SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
-                                          QualType ConvTy) {
+                                          QualType ConvTy) override {
         return S.Diag(Conv->getLocation(), diag::note_delete_conversion)
           << ConvTy;
       }
 
       SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
-                                               QualType T, QualType ConvTy) {
+                                               QualType T,
+                                               QualType ConvTy) override {
         llvm_unreachable("conversion functions are permitted");
       }
     } Converter;
@@ -3032,9 +3068,12 @@
                              CK_NoOp, VK, /*BasePath=*/0, CCK).take();
 
     if (SCS.DeprecatedStringLiteralToCharPtr &&
-        !getLangOpts().WritableStrings)
-      Diag(From->getLocStart(), diag::warn_deprecated_string_literal_conversion)
+        !getLangOpts().WritableStrings) {
+      Diag(From->getLocStart(), getLangOpts().CPlusPlus11
+           ? diag::ext_deprecated_string_literal_conversion
+           : diag::warn_deprecated_string_literal_conversion)
         << ToType.getNonReferenceType();
+    }
 
     break;
   }
@@ -3055,26 +3094,13 @@
   return Owned(From);
 }
 
-ExprResult Sema::ActOnUnaryTypeTrait(UnaryTypeTrait UTT,
-                                     SourceLocation KWLoc,
-                                     ParsedType Ty,
-                                     SourceLocation RParen) {
-  TypeSourceInfo *TSInfo;
-  QualType T = GetTypeFromParser(Ty, &TSInfo);
-
-  if (!TSInfo)
-    TSInfo = Context.getTrivialTypeSourceInfo(T);
-  return BuildUnaryTypeTrait(UTT, KWLoc, TSInfo, RParen);
-}
-
 /// \brief Check the completeness of a type in a unary type trait.
 ///
 /// If the particular type trait requires a complete type, tries to complete
 /// it. If completing the type fails, a diagnostic is emitted and false
 /// returned. If completing the type succeeds or no completion was required,
 /// returns true.
-static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S,
-                                                UnaryTypeTrait UTT,
+static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
                                                 SourceLocation Loc,
                                                 QualType ArgTy) {
   // C++0x [meta.unary.prop]p3:
@@ -3087,6 +3113,7 @@
   // these class templates. We also try to follow any GCC documented behavior
   // in these expressions to ensure portability of standard libraries.
   switch (UTT) {
+  default: llvm_unreachable("not a UTT");
     // is_complete_type somewhat obviously cannot require a complete type.
   case UTT_IsCompleteType:
     // Fall-through
@@ -3139,6 +3166,8 @@
   case UTT_IsPolymorphic:
   case UTT_IsAbstract:
   case UTT_IsInterfaceClass:
+  case UTT_IsDestructible:
+  case UTT_IsNothrowDestructible:
     // Fall-through
 
   // These traits require a complete type.
@@ -3173,7 +3202,6 @@
     return !S.RequireCompleteType(
       Loc, ElTy, diag::err_incomplete_type_used_in_type_trait_expr);
   }
-  llvm_unreachable("Type trait not handled by switch");
 }
 
 static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
@@ -3203,7 +3231,7 @@
         const FunctionProtoType *CPT =
           Operator->getType()->getAs<FunctionProtoType>();
         CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
-        if (!CPT || !CPT->isNothrow(Self.Context))
+        if (!CPT || !CPT->isNothrow(C))
           return false;
       }
     }
@@ -3212,12 +3240,13 @@
   return false;
 }
 
-static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
+static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
                                    SourceLocation KeyLoc, QualType T) {
   assert(!T->isDependentType() && "Cannot evaluate traits of dependent type");
 
   ASTContext &C = Self.Context;
   switch(UTT) {
+  default: llvm_unreachable("not a UTT");
     // Type trait expressions corresponding to the primary type category
     // predicates in C++0x [meta.unary.cat].
   case UTT_IsVoid:
@@ -3404,8 +3433,12 @@
       return RD->hasTrivialCopyAssignment() &&
              !RD->hasNonTrivialCopyAssignment();
     return false;
+  case UTT_IsDestructible:
+  case UTT_IsNothrowDestructible:
+    // FIXME: Implement UTT_IsDestructible and UTT_IsNothrowDestructible.
+    // For now, let's fall through.
   case UTT_HasTrivialDestructor:
-    // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+    // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html
     //   If __is_pod (type) is true or type is a reference type
     //   then the trait is true, else if type is a cv class or union
     //   type (or array thereof) with a trivial destructor
@@ -3488,9 +3521,9 @@
           CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
           if (!CPT)
             return false;
-          // FIXME: check whether evaluating default arguments can throw.
+          // TODO: check whether evaluating default arguments can throw.
           // For now, we'll be conservative and assume that they can throw.
-          if (!CPT->isNothrow(Self.Context) || CPT->getNumArgs() > 1)
+          if (!CPT->isNothrow(Self.Context) || CPT->getNumParams() > 1)
             return false;
         }
       }
@@ -3499,7 +3532,7 @@
     }
     return false;
   case UTT_HasNothrowConstructor:
-    // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+    // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html
     //   If __has_trivial_constructor (type) is true then the trait is
     //   true, else if type is a cv class or union type (or array
     //   thereof) with a default constructor that is known not to
@@ -3511,6 +3544,7 @@
           !RD->hasNonTrivialDefaultConstructor())
         return true;
 
+      bool FoundConstructor = false;
       DeclContext::lookup_const_result R = Self.LookupConstructors(RD);
       for (DeclContext::lookup_const_iterator Con = R.begin(),
            ConEnd = R.end(); Con != ConEnd; ++Con) {
@@ -3519,16 +3553,19 @@
           continue;
         CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
         if (Constructor->isDefaultConstructor()) {
+          FoundConstructor = true;
           const FunctionProtoType *CPT
               = Constructor->getType()->getAs<FunctionProtoType>();
           CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
           if (!CPT)
             return false;
-          // TODO: check whether evaluating default arguments can throw.
+          // FIXME: check whether evaluating default arguments can throw.
           // For now, we'll be conservative and assume that they can throw.
-          return CPT->isNothrow(Self.Context) && CPT->getNumArgs() == 0;
+          if (!CPT->isNothrow(Self.Context) || CPT->getNumParams() > 0)
+            return false;
         }
       }
+      return FoundConstructor;
     }
     return false;
   case UTT_HasVirtualDestructor:
@@ -3549,41 +3586,6 @@
     //   function call.
     return !T->isIncompleteType();
   }
-  llvm_unreachable("Type trait not covered by switch");
-}
-
-ExprResult Sema::BuildUnaryTypeTrait(UnaryTypeTrait UTT,
-                                     SourceLocation KWLoc,
-                                     TypeSourceInfo *TSInfo,
-                                     SourceLocation RParen) {
-  QualType T = TSInfo->getType();
-  if (!CheckUnaryTypeTraitTypeCompleteness(*this, UTT, KWLoc, T))
-    return ExprError();
-
-  bool Value = false;
-  if (!T->isDependentType())
-    Value = EvaluateUnaryTypeTrait(*this, UTT, KWLoc, T);
-
-  return Owned(new (Context) UnaryTypeTraitExpr(KWLoc, UTT, TSInfo, Value,
-                                                RParen, Context.BoolTy));
-}
-
-ExprResult Sema::ActOnBinaryTypeTrait(BinaryTypeTrait BTT,
-                                      SourceLocation KWLoc,
-                                      ParsedType LhsTy,
-                                      ParsedType RhsTy,
-                                      SourceLocation RParen) {
-  TypeSourceInfo *LhsTSInfo;
-  QualType LhsT = GetTypeFromParser(LhsTy, &LhsTSInfo);
-  if (!LhsTSInfo)
-    LhsTSInfo = Context.getTrivialTypeSourceInfo(LhsT);
-
-  TypeSourceInfo *RhsTSInfo;
-  QualType RhsT = GetTypeFromParser(RhsTy, &RhsTSInfo);
-  if (!RhsTSInfo)
-    RhsTSInfo = Context.getTrivialTypeSourceInfo(RhsT);
-
-  return BuildBinaryTypeTrait(BTT, KWLoc, LhsTSInfo, RhsTSInfo, RParen);
 }
 
 /// \brief Determine whether T has a non-trivial Objective-C lifetime in
@@ -3605,10 +3607,22 @@
   llvm_unreachable("Unknown ObjC lifetime qualifier");
 }
 
+static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
+                                    QualType RhsT, SourceLocation KeyLoc);
+
 static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
                               ArrayRef<TypeSourceInfo *> Args,
                               SourceLocation RParenLoc) {
+  if (Kind <= UTT_Last)
+    return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType());
+
+  if (Kind <= BTT_Last)
+    return EvaluateBinaryTypeTrait(S, Kind, Args[0]->getType(),
+                                   Args[1]->getType(), RParenLoc);
+
   switch (Kind) {
+  case clang::TT_IsConstructible:
+  case clang::TT_IsNothrowConstructible:
   case clang::TT_IsTriviallyConstructible: {
     // C++11 [meta.unary.prop]:
     //   is_trivially_constructible is defined as:
@@ -3623,11 +3637,7 @@
     //   variable t:
     //
     //     T t(create<Args>()...);
-    if (Args.empty()) {
-      S.Diag(KWLoc, diag::err_type_trait_arity)
-        << 1 << 1 << 1 << (int)Args.size();
-      return false;
-    }
+    assert(!Args.empty());
 
     // Precondition: T and all types in the parameter pack Args shall be
     // complete types, (possibly cv-qualified) void, or arrays of
@@ -3671,21 +3681,33 @@
     InitializationSequence Init(S, To, InitKind, ArgExprs);
     if (Init.Failed())
       return false;
-    
+
     ExprResult Result = Init.Perform(S, To, InitKind, ArgExprs);
     if (Result.isInvalid() || SFINAE.hasErrorOccurred())
       return false;
 
-    // Under Objective-C ARC, if the destination has non-trivial Objective-C
-    // lifetime, this is a non-trivial construction.
-    if (S.getLangOpts().ObjCAutoRefCount &&
-        hasNontrivialObjCLifetime(Args[0]->getType().getNonReferenceType()))
-      return false;
+    if (Kind == clang::TT_IsConstructible)
+      return true;
 
-    // The initialization succeeded; now make sure there are no non-trivial
-    // calls.
-    return !Result.get()->hasNonTrivialCall(S.Context);
+    if (Kind == clang::TT_IsNothrowConstructible)
+      return S.canThrow(Result.get()) == CT_Cannot;
+
+    if (Kind == clang::TT_IsTriviallyConstructible) {
+      // Under Objective-C ARC, if the destination has non-trivial Objective-C
+      // lifetime, this is a non-trivial construction.
+      if (S.getLangOpts().ObjCAutoRefCount &&
+          hasNontrivialObjCLifetime(Args[0]->getType().getNonReferenceType()))
+        return false;
+
+      // The initialization succeeded; now make sure there are no non-trivial
+      // calls.
+      return !Result.get()->hasNonTrivialCall(S.Context);
+    }
+
+    llvm_unreachable("unhandled type trait");
+    return false;
   }
+    default: llvm_unreachable("not a TT");
   }
   
   return false;
@@ -3694,6 +3716,12 @@
 ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, 
                                 ArrayRef<TypeSourceInfo *> Args, 
                                 SourceLocation RParenLoc) {
+  QualType ResultType = Context.getLogicalOperationType();
+
+  if (Kind <= UTT_Last && !CheckUnaryTypeTraitTypeCompleteness(
+                               *this, Kind, KWLoc, Args[0]->getType()))
+    return ExprError();
+
   bool Dependent = false;
   for (unsigned I = 0, N = Args.size(); I != N; ++I) {
     if (Args[I]->getType()->isDependentType()) {
@@ -3701,17 +3729,17 @@
       break;
     }
   }
-  
-  bool Value = false;
+
+  bool Result = false;
   if (!Dependent)
-    Value = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc);
-  
-  return TypeTraitExpr::Create(Context, Context.BoolTy, KWLoc, Kind,
-                               Args, RParenLoc, Value);
+    Result = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc);
+
+  return TypeTraitExpr::Create(Context, ResultType, KWLoc, Kind, Args,
+                               RParenLoc, Result);
 }
 
-ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, 
-                                ArrayRef<ParsedType> Args, 
+ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+                                ArrayRef<ParsedType> Args,
                                 SourceLocation RParenLoc) {
   SmallVector<TypeSourceInfo *, 4> ConvertedArgs;
   ConvertedArgs.reserve(Args.size());
@@ -3724,13 +3752,12 @@
     
     ConvertedArgs.push_back(TInfo);    
   }
-  
+
   return BuildTypeTrait(Kind, KWLoc, ConvertedArgs, RParenLoc);
 }
 
-static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
-                                    QualType LhsT, QualType RhsT,
-                                    SourceLocation KeyLoc) {
+static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
+                                    QualType RhsT, SourceLocation KeyLoc) {
   assert(!LhsT->isDependentType() && !RhsT->isDependentType() &&
          "Cannot evaluate traits of dependent types");
 
@@ -3833,7 +3860,8 @@
     ExprResult Result = Init.Perform(Self, To, Kind, FromPtr);
     return !Result.isInvalid() && !SFINAE.hasErrorOccurred();
   }
-      
+
+  case BTT_IsNothrowAssignable:
   case BTT_IsTriviallyAssignable: {
     // C++11 [meta.unary.prop]p3:
     //   is_trivially_assignable is defined as:
@@ -3879,54 +3907,27 @@
     if (Result.isInvalid() || SFINAE.hasErrorOccurred())
       return false;
 
-    // Under Objective-C ARC, if the destination has non-trivial Objective-C
-    // lifetime, this is a non-trivial assignment.
-    if (Self.getLangOpts().ObjCAutoRefCount &&
-        hasNontrivialObjCLifetime(LhsT.getNonReferenceType()))
-      return false;
+    if (BTT == BTT_IsNothrowAssignable)
+      return Self.canThrow(Result.get()) == CT_Cannot;
 
-    return !Result.get()->hasNonTrivialCall(Self.Context);
+    if (BTT == BTT_IsTriviallyAssignable) {
+      // Under Objective-C ARC, if the destination has non-trivial Objective-C
+      // lifetime, this is a non-trivial assignment.
+      if (Self.getLangOpts().ObjCAutoRefCount &&
+          hasNontrivialObjCLifetime(LhsT.getNonReferenceType()))
+        return false;
+
+      return !Result.get()->hasNonTrivialCall(Self.Context);
+    }
+
+    llvm_unreachable("unhandled type trait");
+    return false;
   }
+    default: llvm_unreachable("not a BTT");
   }
   llvm_unreachable("Unknown type trait or not implemented");
 }
 
-ExprResult Sema::BuildBinaryTypeTrait(BinaryTypeTrait BTT,
-                                      SourceLocation KWLoc,
-                                      TypeSourceInfo *LhsTSInfo,
-                                      TypeSourceInfo *RhsTSInfo,
-                                      SourceLocation RParen) {
-  QualType LhsT = LhsTSInfo->getType();
-  QualType RhsT = RhsTSInfo->getType();
-
-  if (BTT == BTT_TypeCompatible) {
-    if (getLangOpts().CPlusPlus) {
-      Diag(KWLoc, diag::err_types_compatible_p_in_cplusplus)
-        << SourceRange(KWLoc, RParen);
-      return ExprError();
-    }
-  }
-
-  bool Value = false;
-  if (!LhsT->isDependentType() && !RhsT->isDependentType())
-    Value = EvaluateBinaryTypeTrait(*this, BTT, LhsT, RhsT, KWLoc);
-
-  // Select trait result type.
-  QualType ResultType;
-  switch (BTT) {
-  case BTT_IsBaseOf:       ResultType = Context.BoolTy; break;
-  case BTT_IsConvertible:  ResultType = Context.BoolTy; break;
-  case BTT_IsSame:         ResultType = Context.BoolTy; break;
-  case BTT_TypeCompatible: ResultType = Context.IntTy; break;
-  case BTT_IsConvertibleTo: ResultType = Context.BoolTy; break;
-  case BTT_IsTriviallyAssignable: ResultType = Context.BoolTy;
-  }
-
-  return Owned(new (Context) BinaryTypeTraitExpr(KWLoc, BTT, LhsTSInfo,
-                                                 RhsTSInfo, Value, RParen,
-                                                 ResultType));
-}
-
 ExprResult Sema::ActOnArrayTypeTrait(ArrayTypeTrait ATT,
                                      SourceLocation KWLoc,
                                      ParsedType Ty,
@@ -4117,22 +4118,23 @@
                             OpSpelling, (int)isIndirect)) {
       return QualType();
     }
-    CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
-                       /*DetectVirtual=*/false);
-    // FIXME: Would it be useful to print full ambiguity paths, or is that
-    // overkill?
-    if (!IsDerivedFrom(LHSType, Class, Paths) ||
-        Paths.isAmbiguous(Context.getCanonicalType(Class))) {
+
+    if (!IsDerivedFrom(LHSType, Class)) {
       Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling
         << (int)isIndirect << LHS.get()->getType();
       return QualType();
     }
+
+    CXXCastPath BasePath;
+    if (CheckDerivedToBaseConversion(LHSType, Class, Loc,
+                                     SourceRange(LHS.get()->getLocStart(),
+                                                 RHS.get()->getLocEnd()),
+                                     &BasePath))
+      return QualType();
+
     // Cast LHS to type of use.
     QualType UseType = isIndirect ? Context.getPointerType(Class) : Class;
     ExprValueKind VK = isIndirect ? VK_RValue : LHS.get()->getValueKind();
-
-    CXXCastPath BasePath;
-    BuildBasePathArray(Paths, BasePath);
     LHS = ImpCastExprToType(LHS.take(), UseType, CK_DerivedToBase, VK,
                             &BasePath);
   }
@@ -4400,42 +4402,21 @@
   bool LVoid = LTy->isVoidType();
   bool RVoid = RTy->isVoidType();
   if (LVoid || RVoid) {
-    //   ... then the [l2r] conversions are performed on the second and third
-    //   operands ...
-    LHS = DefaultFunctionArrayLvalueConversion(LHS.take());
-    RHS = DefaultFunctionArrayLvalueConversion(RHS.take());
-    if (LHS.isInvalid() || RHS.isInvalid())
-      return QualType();
-
-    // Finish off the lvalue-to-rvalue conversion by copy-initializing a
-    // temporary if necessary. DefaultFunctionArrayLvalueConversion doesn't
-    // do this part for us.
-    ExprResult &NonVoid = LVoid ? RHS : LHS;
-    if (NonVoid.get()->getType()->isRecordType() &&
-        NonVoid.get()->isGLValue()) {
-      if (RequireNonAbstractType(QuestionLoc, NonVoid.get()->getType(),
-                             diag::err_allocation_of_abstract_type))
-        return QualType();
-      InitializedEntity Entity =
-          InitializedEntity::InitializeTemporary(NonVoid.get()->getType());
-      NonVoid = PerformCopyInitialization(Entity, SourceLocation(), NonVoid);
-      if (NonVoid.isInvalid())
-        return QualType();
+    //   ... one of the following shall hold:
+    //   -- The second or the third operand (but not both) is a (possibly
+    //      parenthesized) throw-expression; the result is of the type
+    //      and value category of the other.
+    bool LThrow = isa<CXXThrowExpr>(LHS.get()->IgnoreParenImpCasts());
+    bool RThrow = isa<CXXThrowExpr>(RHS.get()->IgnoreParenImpCasts());
+    if (LThrow != RThrow) {
+      Expr *NonThrow = LThrow ? RHS.get() : LHS.get();
+      VK = NonThrow->getValueKind();
+      // DR (no number yet): the result is a bit-field if the
+      // non-throw-expression operand is a bit-field.
+      OK = NonThrow->getObjectKind();
+      return NonThrow->getType();
     }
 
-    LTy = LHS.get()->getType();
-    RTy = RHS.get()->getType();
-
-    //   ... and one of the following shall hold:
-    //   -- The second or the third operand (but not both) is a throw-
-    //      expression; the result is of the type of the other and is a prvalue.
-    bool LThrow = isa<CXXThrowExpr>(LHS.get()->IgnoreParenCasts());
-    bool RThrow = isa<CXXThrowExpr>(RHS.get()->IgnoreParenCasts());
-    if (LThrow && !RThrow)
-      return RTy;
-    if (RThrow && !LThrow)
-      return LTy;
-
     //   -- Both the second and third operands have type void; the result is of
     //      type void and is a prvalue.
     if (LVoid && RVoid)
@@ -4456,7 +4437,6 @@
   //   those operands to the type of the other.
   if (!Context.hasSameType(LTy, RTy) &&
       (LTy->isRecordType() || RTy->isRecordType())) {
-    ImplicitConversionSequence ICSLeftToRight, ICSRightToLeft;
     // These return true if a single direction is already ambiguous.
     QualType L2RType, R2LType;
     bool HaveL2R, HaveR2L;
@@ -5023,7 +5003,7 @@
 }
 
 Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
-  assert(SubExpr && "sub expression can't be null!");
+  assert(SubExpr && "subexpression can't be null!");
 
   CleanupVarDeclMarking();
 
@@ -5044,7 +5024,7 @@
 }
 
 Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
-  assert(SubStmt && "sub statement can't be null!");
+  assert(SubStmt && "sub-statement can't be null!");
 
   CleanupVarDeclMarking();
 
@@ -5105,19 +5085,21 @@
   }
 
   CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(E);
-  if (TopBind)
-    E = TopBind->getSubExpr();
+  CallExpr *TopCall = TopBind ? dyn_cast<CallExpr>(TopBind->getSubExpr()) : 0;
+  if (TopCall)
+    E = TopCall;
+  else
+    TopBind = 0;
 
   // Disable the special decltype handling now.
   ExprEvalContexts.back().IsDecltype = false;
 
   // In MS mode, don't perform any extra checking of call return types within a
   // decltype expression.
-  if (getLangOpts().MicrosoftMode)
+  if (getLangOpts().MSVCCompat)
     return Owned(E);
 
   // Perform the semantic checks we delayed until this point.
-  CallExpr *TopCall = dyn_cast<CallExpr>(E);
   for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeCalls.size();
        I != N; ++I) {
     CallExpr *Call = ExprEvalContexts.back().DelayedDecltypeCalls[I];
@@ -5388,7 +5370,7 @@
 
   if (!ObjectType->isDependentType() && !ObjectType->isScalarType() &&
       !ObjectType->isVectorType()) {
-    if (getLangOpts().MicrosoftMode && ObjectType->isVoidType())
+    if (getLangOpts().MSVCCompat && ObjectType->isVoidType())
       Diag(OpLoc, diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
     else
       Diag(OpLoc, diag::err_pseudo_dtor_base_not_scalar)
@@ -5677,7 +5659,7 @@
     ME->setHadMultipleCandidates(true);
   MarkMemberReferenced(ME);
 
-  QualType ResultType = Method->getResultType();
+  QualType ResultType = Method->getReturnType();
   ExprValueKind VK = Expr::getValueKindForType(ResultType);
   ResultType = ResultType.getNonLValueExprType(Context);
 
@@ -5836,56 +5818,71 @@
   assert(DefVD);
   if (DefVD->isWeak()) return false;
   EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt();
-  
+
   Expr *Init = cast<Expr>(Eval->Value);
 
   if (Var->getType()->isDependentType() || Init->isValueDependent()) {
-    if (!Init->isValueDependent())
-      return !DefVD->checkInitIsICE();
-    // FIXME: We might still be able to do some analysis of Init here
-    // to conclude that even in a dependent setting, Init can never
-    // be a constexpr - but for now admit agnosticity.
+    // FIXME: Teach the constant evaluator to deal with the non-dependent parts
+    // of value-dependent expressions, and use it here to determine whether the
+    // initializer is a potential constant expression.
     return false;
-  } 
+  }
+
   return !IsVariableAConstantExpression(Var, Context); 
 }
 
-/// \brief Check if the current lambda scope has any potential captures, and 
-///  whether they can be captured by any of the enclosing lambdas that are 
-///  ready to capture. If there is a lambda that can capture a nested 
-///  potential-capture, go ahead and do so.  Also, check to see if any 
-///  variables are uncaptureable or do not involve an odr-use so do not 
-///  need to be captured.
+/// \brief Check if the current lambda has any potential captures 
+/// that must be captured by any of its enclosing lambdas that are ready to 
+/// capture. If there is a lambda that can capture a nested 
+/// potential-capture, go ahead and do so.  Also, check to see if any 
+/// variables are uncaptureable or do not involve an odr-use so do not 
+/// need to be captured.
 
-static void CheckLambdaCaptures(Expr *const FE, 
-    LambdaScopeInfo *const CurrentLSI, Sema &S) {
-    
+static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
+    Expr *const FE, LambdaScopeInfo *const CurrentLSI, Sema &S) {
+
   assert(!S.isUnevaluatedContext());  
   assert(S.CurContext->isDependentContext()); 
-  const bool IsFullExprInstantiationDependent = 
-      FE->isInstantiationDependent();
-  // All the potentially captureable variables in the current nested 
+  assert(CurrentLSI->CallOperator == S.CurContext && 
+      "The current call operator must be synchronized with Sema's CurContext");
+
+  const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent();
+
+  ArrayRef<const FunctionScopeInfo *> FunctionScopesArrayRef(
+      S.FunctionScopes.data(), S.FunctionScopes.size());
+  
+  // All the potentially captureable variables in the current nested
   // lambda (within a generic outer lambda), must be captured by an
   // outer lambda that is enclosed within a non-dependent context.
-     
-  for (size_t I = 0, N = CurrentLSI->getNumPotentialVariableCaptures(); 
-      I != N; ++I) {
+  const unsigned NumPotentialCaptures =
+      CurrentLSI->getNumPotentialVariableCaptures();
+  for (unsigned I = 0; I != NumPotentialCaptures; ++I) {
     Expr *VarExpr = 0;
     VarDecl *Var = 0;
     CurrentLSI->getPotentialVariableCapture(I, Var, VarExpr);
-    // 
-    if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(VarExpr) && 
+    // If the variable is clearly identified as non-odr-used and the full
+    // expression is not instantiation dependent, only then do we not 
+    // need to check enclosing lambda's for speculative captures.
+    // For e.g.:
+    // Even though 'x' is not odr-used, it should be captured.
+    // int test() {
+    //   const int x = 10;
+    //   auto L = [=](auto a) {
+    //     (void) +x + a;
+    //   };
+    // }
+    if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(VarExpr) &&
         !IsFullExprInstantiationDependent)
-      continue; 
-    // Climb up until we find a lambda that can capture:
-    //   - a generic-or-non-generic lambda call operator that is enclosed
-    //     within a non-dependent context.
-    unsigned FunctionScopeIndexOfCapturableLambda = 0;
-    if (GetInnermostEnclosingCapturableLambda(
-            S.FunctionScopes, FunctionScopeIndexOfCapturableLambda,
-            S.CurContext, Var, S)) {
-      MarkVarDeclODRUsed(Var, VarExpr->getExprLoc(), 
-          S, &FunctionScopeIndexOfCapturableLambda);
+      continue;
+
+    // If we have a capture-capable lambda for the variable, go ahead and
+    // capture the variable in that lambda (and all its enclosing lambdas).
+    if (const Optional<unsigned> Index =
+            getStackIndexOfNearestEnclosingCaptureCapableLambda(
+                FunctionScopesArrayRef, Var, S)) {
+      const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
+      MarkVarDeclODRUsed(Var, VarExpr->getExprLoc(), S,
+                         &FunctionScopeIndexOfCapturableLambda);
     } 
     const bool IsVarNeverAConstantExpression = 
         VariableCanNeverBeAConstantExpression(Var, S.Context);
@@ -5912,16 +5909,21 @@
     }
   }
 
+  // Check if 'this' needs to be captured.
   if (CurrentLSI->hasPotentialThisCapture()) {
-    unsigned FunctionScopeIndexOfCapturableLambda = 0;
-    if (GetInnermostEnclosingCapturableLambda(
-            S.FunctionScopes, FunctionScopeIndexOfCapturableLambda,
-            S.CurContext, /*0 is 'this'*/ 0, S)) {
-      S.CheckCXXThisCapture(CurrentLSI->PotentialThisCaptureLocation, 
-          /*Explicit*/false, /*BuildAndDiagnose*/true,  
-          &FunctionScopeIndexOfCapturableLambda);
+    // If we have a capture-capable lambda for 'this', go ahead and capture
+    // 'this' in that lambda (and all its enclosing lambdas).
+    if (const Optional<unsigned> Index =
+            getStackIndexOfNearestEnclosingCaptureCapableLambda(
+                FunctionScopesArrayRef, /*0 is 'this'*/ 0, S)) {
+      const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
+      S.CheckCXXThisCapture(CurrentLSI->PotentialThisCaptureLocation,
+                            /*Explicit*/ false, /*BuildAndDiagnose*/ true,
+                            &FunctionScopeIndexOfCapturableLambda);
     }
   }
+
+  // Reset all the potential captures at the end of each full-expression.
   CurrentLSI->clearPotentialCaptures();
 }
 
@@ -6018,11 +6020,12 @@
   //     FunctionScopes.size() in InstantiatingTemplate's 
   //     constructor/destructor.
   //  - Teach the handful of places that iterate over FunctionScopes to 
-  //    stop at the outermost enclosing lexical scope." 
-  const bool IsInLambdaDeclContext = isLambdaCallOperator(CurContext); 
-  if (IsInLambdaDeclContext && CurrentLSI && 
+  //    stop at the outermost enclosing lexical scope."
+  const bool IsInLambdaDeclContext = isLambdaCallOperator(CurContext);
+  if (IsInLambdaDeclContext && CurrentLSI &&
       CurrentLSI->hasPotentialCaptures() && !FullExpr.isInvalid())
-    CheckLambdaCaptures(FE, CurrentLSI, *this);
+    CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(FE, CurrentLSI,
+                                                              *this);
   return MaybeCreateExprWithCleanups(FullExpr);
 }
 
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index 0807e8f..c4a1780 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -322,8 +322,6 @@
              (Idx = vecType->getPointAccessorIdx(*compStr)) != -1) {
     bool HasRGBA = IsRGBA(*compStr);
     do {
-      // If we mix/match rgba with xyzw, break to signal that we encountered
-      // an illegal name.
       if (HasRGBA != IsRGBA(*compStr))
         break;
       if (HasIndex[Idx]) HasRepeated = true;
@@ -403,9 +401,8 @@
   if (ObjCMethodDecl *OMD = PDecl->getInstanceMethod(Sel))
     return OMD;
 
-  for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
-       E = PDecl->protocol_end(); I != E; ++I) {
-    if (Decl *D = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
+  for (const auto *I : PDecl->protocols()) {
+    if (Decl *D = FindGetterSetterNameDeclFromProtocolList(I, Member, Sel,
                                                            Context))
       return D;
   }
@@ -418,25 +415,22 @@
                                       ASTContext &Context) {
   // Check protocols on qualified interfaces.
   Decl *GDecl = 0;
-  for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
-       E = QIdTy->qual_end(); I != E; ++I) {
+  for (const auto *I : QIdTy->quals()) {
     if (Member)
-      if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Member)) {
+      if (ObjCPropertyDecl *PD = I->FindPropertyDeclaration(Member)) {
         GDecl = PD;
         break;
       }
     // Also must look for a getter or setter name which uses property syntax.
-    if (ObjCMethodDecl *OMD = (*I)->getInstanceMethod(Sel)) {
+    if (ObjCMethodDecl *OMD = I->getInstanceMethod(Sel)) {
       GDecl = OMD;
       break;
     }
   }
   if (!GDecl) {
-    for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
-         E = QIdTy->qual_end(); I != E; ++I) {
+    for (const auto *I : QIdTy->quals()) {
       // Search in the protocol-qualifier list of current protocol.
-      GDecl = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel, 
-                                                       Context);
+      GDecl = FindGetterSetterNameDeclFromProtocolList(I, Member, Sel, Context);
       if (GDecl)
         return GDecl;
     }
@@ -565,7 +559,7 @@
   explicit RecordMemberExprValidatorCCC(const RecordType *RTy)
       : Record(RTy->getDecl()) {}
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     NamedDecl *ND = candidate.getCorrectionDecl();
     // Don't accept candidates that cannot be member functions, constants,
     // variables, or templates.
@@ -578,11 +572,9 @@
 
     if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record)) {
       // Accept candidates that occur in any of the current class' base classes.
-      for (CXXRecordDecl::base_class_const_iterator BS = RD->bases_begin(),
-                                                    BSEnd = RD->bases_end();
-           BS != BSEnd; ++BS) {
+      for (const auto &BS : RD->bases()) {
         if (const RecordType *BSTy = dyn_cast_or_null<RecordType>(
-                BS->getType().getTypePtrOrNull())) {
+                BS.getType().getTypePtrOrNull())) {
           if (BSTy->getDecl()->containsDecl(ND))
             return true;
         }
@@ -830,8 +822,6 @@
     if (!result)
       return ExprError();
 
-    baseObjectIsPointer = false;
-    
     // FIXME: check qualified member access
   }
   
@@ -1647,8 +1637,7 @@
   bool IsArrow = (OpKind == tok::arrow);
 
   NamedDecl *FirstQualifierInScope
-    = (!SS.isSet() ? 0 : FindFirstQualifierInScope(S,
-                       static_cast<NestedNameSpecifier*>(SS.getScopeRep())));
+    = (!SS.isSet() ? 0 : FindFirstQualifierInScope(S, SS.getScopeRep()));
 
   // This is a postfix expression, so get rid of ParenListExprs.
   ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base);
@@ -1772,14 +1761,7 @@
   assert(!R.empty() && !R.isAmbiguous());
   
   SourceLocation loc = R.getNameLoc();
-  
-  // We may have found a field within an anonymous union or struct
-  // (C++ [class.union]).
-  // FIXME: template-ids inside anonymous structs?
-  if (IndirectFieldDecl *FD = R.getAsSingle<IndirectFieldDecl>())
-    return BuildAnonymousStructUnionMemberReference(SS, R.getNameLoc(), FD,
-                                                    R.begin().getPair());
-  
+
   // If this is known to be an instance access, go ahead and build an
   // implicit 'this' expression now.
   // 'this' expression now.
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index cc8eace..5bffdd1 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -67,10 +67,14 @@
 
     // Create the aggregate string with the appropriate content and location
     // information.
-    S = StringLiteral::Create(Context, StrBuf,
-                              StringLiteral::Ascii, /*Pascal=*/false,
-                              Context.getPointerType(Context.CharTy),
-                              &StrLocs[0], StrLocs.size());
+    const ConstantArrayType *CAT = Context.getAsConstantArrayType(S->getType());
+    assert(CAT && "String literal not of constant array type!");
+    QualType StrTy = Context.getConstantArrayType(
+        CAT->getElementType(), llvm::APInt(32, StrBuf.size() + 1),
+        CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers());
+    S = StringLiteral::Create(Context, StrBuf, StringLiteral::Ascii,
+                              /*Pascal=*/false, StrTy, &StrLocs[0],
+                              StrLocs.size());
   }
   
   return BuildObjCStringLiteral(AtLocs[0], S);
@@ -152,7 +156,7 @@
   }
 
   // Make sure the return type is reasonable.
-  QualType ReturnType = Method->getResultType();
+  QualType ReturnType = Method->getReturnType();
   if (!ReturnType->isObjCObjectPointerType()) {
     S.Diag(Loc, diag::err_objc_literal_method_sig)
       << Sel;
@@ -224,16 +228,15 @@
   ObjCMethodDecl *Method = S.NSNumberDecl->lookupClassMethod(Sel);
   if (!Method && S.getLangOpts().DebuggerObjCLiteral) {
     // create a stub definition this NSNumber factory method.
-    TypeSourceInfo *ResultTInfo = 0;
-    Method = ObjCMethodDecl::Create(CX, SourceLocation(), SourceLocation(), Sel,
-                                    S.NSNumberPointer, ResultTInfo,
-                                    S.NSNumberDecl,
-                                    /*isInstance=*/false, /*isVariadic=*/false,
-                                    /*isPropertyAccessor=*/false,
-                                    /*isImplicitlyDeclared=*/true,
-                                    /*isDefined=*/false,
-                                    ObjCMethodDecl::Required,
-                                    /*HasRelatedResultType=*/false);
+    TypeSourceInfo *ReturnTInfo = 0;
+    Method =
+        ObjCMethodDecl::Create(CX, SourceLocation(), SourceLocation(), Sel,
+                               S.NSNumberPointer, ReturnTInfo, S.NSNumberDecl,
+                               /*isInstance=*/false, /*isVariadic=*/false,
+                               /*isPropertyAccessor=*/false,
+                               /*isImplicitlyDeclared=*/true,
+                               /*isDefined=*/false, ObjCMethodDecl::Required,
+                               /*HasRelatedResultType=*/false);
     ParmVarDecl *value = ParmVarDecl::Create(S.Context, Method,
                                              SourceLocation(), SourceLocation(),
                                              &CX.Idents.get("value"),
@@ -492,17 +495,15 @@
         BoxingMethod = NSStringDecl->lookupClassMethod(stringWithUTF8String);
         if (!BoxingMethod && getLangOpts().DebuggerObjCLiteral) {
           // Debugger needs to work even if NSString hasn't been defined.
-          TypeSourceInfo *ResultTInfo = 0;
-          ObjCMethodDecl *M =
-            ObjCMethodDecl::Create(Context, SourceLocation(), SourceLocation(),
-                                   stringWithUTF8String, NSStringPointer,
-                                   ResultTInfo, NSStringDecl,
-                                   /*isInstance=*/false, /*isVariadic=*/false,
-                                   /*isPropertyAccessor=*/false,
-                                   /*isImplicitlyDeclared=*/true,
-                                   /*isDefined=*/false,
-                                   ObjCMethodDecl::Required,
-                                   /*HasRelatedResultType=*/false);
+          TypeSourceInfo *ReturnTInfo = 0;
+          ObjCMethodDecl *M = ObjCMethodDecl::Create(
+              Context, SourceLocation(), SourceLocation(), stringWithUTF8String,
+              NSStringPointer, ReturnTInfo, NSStringDecl,
+              /*isInstance=*/false, /*isVariadic=*/false,
+              /*isPropertyAccessor=*/false,
+              /*isImplicitlyDeclared=*/true,
+              /*isDefined=*/false, ObjCMethodDecl::Required,
+              /*HasRelatedResultType=*/false);
           QualType ConstCharType = Context.CharTy.withConst();
           ParmVarDecl *value =
             ParmVarDecl::Create(Context, M,
@@ -660,17 +661,14 @@
       Sel = NSAPIObj->getNSArraySelector(NSAPI::NSArr_arrayWithObjectsCount);
     ObjCMethodDecl *Method = NSArrayDecl->lookupClassMethod(Sel);
     if (!Method && getLangOpts().DebuggerObjCLiteral) {
-      TypeSourceInfo *ResultTInfo = 0;
-      Method = ObjCMethodDecl::Create(Context,
-                           SourceLocation(), SourceLocation(), Sel,
-                           IdT,
-                           ResultTInfo,
-                           Context.getTranslationUnitDecl(),
-                           false /*Instance*/, false/*isVariadic*/,
-                           /*isPropertyAccessor=*/false,
-                           /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
-                           ObjCMethodDecl::Required,
-                           false);
+      TypeSourceInfo *ReturnTInfo = 0;
+      Method = ObjCMethodDecl::Create(
+          Context, SourceLocation(), SourceLocation(), Sel, IdT, ReturnTInfo,
+          Context.getTranslationUnitDecl(), false /*Instance*/,
+          false /*isVariadic*/,
+          /*isPropertyAccessor=*/false,
+          /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+          ObjCMethodDecl::Required, false);
       SmallVector<ParmVarDecl *, 2> Params;
       ParmVarDecl *objects = ParmVarDecl::Create(Context, Method,
                                                  SourceLocation(),
@@ -975,6 +973,58 @@
   return BuildObjCEncodeExpression(AtLoc, TInfo, RParenLoc);
 }
 
+static bool HelperToDiagnoseMismatchedMethodsInGlobalPool(Sema &S,
+                                               SourceLocation AtLoc,
+                                               ObjCMethodDecl *Method,
+                                               ObjCMethodList &MethList) {
+  ObjCMethodList *M = &MethList;
+  bool Warned = false;
+  for (M = M->getNext(); M; M=M->getNext()) {
+    ObjCMethodDecl *MatchingMethodDecl = M->Method;
+    if (MatchingMethodDecl == Method ||
+        isa<ObjCImplDecl>(MatchingMethodDecl->getDeclContext()) ||
+        MatchingMethodDecl->getSelector() != Method->getSelector())
+      continue;
+    if (!S.MatchTwoMethodDeclarations(Method,
+                                      MatchingMethodDecl, Sema::MMS_loose)) {
+      if (!Warned) {
+        Warned = true;
+        S.Diag(AtLoc, diag::warning_multiple_selectors)
+          << Method->getSelector();
+        S.Diag(Method->getLocation(), diag::note_method_declared_at)
+          << Method->getDeclName();
+      }
+      S.Diag(MatchingMethodDecl->getLocation(), diag::note_method_declared_at)
+        << MatchingMethodDecl->getDeclName();
+    }
+  }
+  return Warned;
+}
+
+static void DiagnoseMismatchedSelectors(Sema &S, SourceLocation AtLoc,
+                                        ObjCMethodDecl *Method) {
+  if (S.Diags.getDiagnosticLevel(diag::warning_multiple_selectors,
+                                 SourceLocation())
+        == DiagnosticsEngine::Ignored)
+    return;
+  bool Warned = false;
+  for (Sema::GlobalMethodPool::iterator b = S.MethodPool.begin(),
+       e = S.MethodPool.end(); b != e; b++) {
+    // first, instance methods
+    ObjCMethodList &InstMethList = b->second.first;
+    if (HelperToDiagnoseMismatchedMethodsInGlobalPool(S, AtLoc,
+                                                      Method, InstMethList))
+      Warned = true;
+        
+    // second, class methods
+    ObjCMethodList &ClsMethList = b->second.second;
+    if (HelperToDiagnoseMismatchedMethodsInGlobalPool(S, AtLoc,
+                                                      Method, ClsMethList) ||
+        Warned)
+      return;
+  }
+}
+
 ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
                                              SourceLocation AtLoc,
                                              SourceLocation SelLoc,
@@ -996,7 +1046,8 @@
       
     } else
         Diag(SelLoc, diag::warn_undeclared_selector) << Sel;
-  }
+  } else
+    DiagnoseMismatchedSelectors(*this, AtLoc, Method);
   
   if (!Method ||
       Method->getImplementationControl() != ObjCMethodDecl::Optional) {
@@ -1120,7 +1171,8 @@
 static const ObjCMethodDecl *
 findExplicitInstancetypeDeclarer(const ObjCMethodDecl *MD,
                                  QualType instancetype) {
-  if (MD->getResultType() == instancetype) return MD;
+  if (MD->getReturnType() == instancetype)
+    return MD;
 
   // For these purposes, a method in an @implementation overrides a
   // declaration in the @interface.
@@ -1155,7 +1207,7 @@
   // type doesn't match the method's declared return type.
   ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurContext);
   if (!MD || !MD->hasRelatedResultType() ||
-      Context.hasSameUnqualifiedType(destType, MD->getResultType()))
+      Context.hasSameUnqualifiedType(destType, MD->getReturnType()))
     return;
 
   // Look for a method overridden by this method which explicitly uses
@@ -1164,7 +1216,7 @@
         findExplicitInstancetypeDeclarer(MD, Context.getObjCInstanceType())) {
     SourceLocation loc;
     SourceRange range;
-    if (TypeSourceInfo *TSI = overridden->getResultTypeSourceInfo()) {
+    if (TypeSourceInfo *TSI = overridden->getReturnTypeSourceInfo()) {
       range = TSI->getTypeLoc().getSourceRange();
       loc = range.getBegin();
     }
@@ -1195,13 +1247,12 @@
   
   if (!Method->hasRelatedResultType())
     return;
-  
-  if (Context.hasSameUnqualifiedType(Method->getResultType()
-                                                        .getNonReferenceType(),
-                                     MsgSend->getType()))
+
+  if (Context.hasSameUnqualifiedType(
+          Method->getReturnType().getNonReferenceType(), MsgSend->getType()))
     return;
-  
-  if (!Context.hasSameUnqualifiedType(Method->getResultType(), 
+
+  if (!Context.hasSameUnqualifiedType(Method->getReturnType(),
                                       Context.getObjCInstanceType()))
     return;
   
@@ -1287,7 +1338,7 @@
 
   ReturnType = getMessageSendResultType(ReceiverType, Method, isClassMessage, 
                                         isSuperMessage);
-  VK = Expr::getValueKindForType(Method->getResultType());
+  VK = Expr::getValueKindForType(Method->getReturnType());
 
   unsigned NumNamedArgs = Sel.getNumArgs();
   // Method might have more arguments than selector indicates. This is due
@@ -1380,10 +1431,14 @@
   return IsError;
 }
 
-bool Sema::isSelfExpr(Expr *receiver) {
+bool Sema::isSelfExpr(Expr *RExpr) {
   // 'self' is objc 'self' in an objc method only.
-  ObjCMethodDecl *method =
-    dyn_cast_or_null<ObjCMethodDecl>(CurContext->getNonClosureAncestor());
+  ObjCMethodDecl *Method =
+      dyn_cast_or_null<ObjCMethodDecl>(CurContext->getNonClosureAncestor());
+  return isSelfExpr(RExpr, Method);
+}
+
+bool Sema::isSelfExpr(Expr *receiver, const ObjCMethodDecl *method) {
   if (!method) return false;
 
   receiver = receiver->IgnoreParenLValueCasts();
@@ -1409,9 +1464,8 @@
   }
 
   // Check qualifiers.
-  for (ObjCObjectType::qual_iterator
-         i = objType->qual_begin(), e = objType->qual_end(); i != e; ++i)
-    if (ObjCMethodDecl *method = (*i)->lookupMethod(sel, isInstance))
+  for (const auto *I : objType->quals())
+    if (ObjCMethodDecl *method = I->lookupMethod(sel, isInstance))
       return method;
 
   return 0;
@@ -1424,9 +1478,7 @@
                                               bool Instance)
 {
   ObjCMethodDecl *MD = 0;
-  for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
-       E = OPT->qual_end(); I != E; ++I) {
-    ObjCProtocolDecl *PROTO = (*I);
+  for (const auto *PROTO : OPT->quals()) {
     if ((MD = PROTO->lookupMethod(Sel, Instance))) {
       return MD;
     }
@@ -1452,7 +1504,7 @@
       if (PRE->isImplicitProperty()) {
         GDecl = PRE->getImplicitPropertyGetter();
         if (GDecl) {
-          T = GDecl->getResultType();
+          T = GDecl->getReturnType();
         }
       }
       else {
@@ -1535,9 +1587,8 @@
                                                      MemberLoc, BaseExpr));
   }
   // Check protocols on qualified interfaces.
-  for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
-       E = OPT->qual_end(); I != E; ++I)
-    if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Member)) {
+  for (const auto *I : OPT->quals())
+    if (ObjCPropertyDecl *PD = I->FindPropertyDeclaration(Member)) {
       // Check whether we can reference this property.
       if (DiagnoseUseOfDecl(PD, MemberLoc))
         return ExprError();
@@ -1699,7 +1750,8 @@
     }
     
     if (IFace == 0) {
-      Diag(receiverNameLoc, diag::err_expected_ident_or_lparen);
+      Diag(receiverNameLoc, diag::err_expected_either) << tok::identifier
+                                                       << tok::l_paren;
       return ExprError();
     }
   }
@@ -1773,7 +1825,7 @@
       WantObjCSuper = Method->getClassInterface()->getSuperClass();
   }
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     return candidate.getCorrectionDeclAs<ObjCInterfaceDecl>() ||
         candidate.isKeyword("super");
   }
@@ -2110,8 +2162,8 @@
                                 ReturnType, VK))
     return ExprError();
 
-  if (Method && !Method->getResultType()->isVoidType() &&
-      RequireCompleteType(LBracLoc, Method->getResultType(), 
+  if (Method && !Method->getReturnType()->isVoidType() &&
+      RequireCompleteType(LBracLoc, Method->getReturnType(),
                           diag::err_illegal_message_expr_incomplete_type))
     return ExprError();
 
@@ -2447,6 +2499,52 @@
     }
   }
 
+  FunctionScopeInfo *DIFunctionScopeInfo =
+    (Method && Method->getMethodFamily() == OMF_init)
+      ? getEnclosingFunction() : 0;
+  
+  if (DIFunctionScopeInfo &&
+      DIFunctionScopeInfo->ObjCIsDesignatedInit &&
+      (SuperLoc.isValid() || isSelfExpr(Receiver))) {
+    bool isDesignatedInitChain = false;
+    if (SuperLoc.isValid()) {
+      if (const ObjCObjectPointerType *
+            OCIType = ReceiverType->getAsObjCInterfacePointerType()) {
+        if (const ObjCInterfaceDecl *ID = OCIType->getInterfaceDecl()) {
+          // Either we know this is a designated initializer or we
+          // conservatively assume it because we don't know for sure.
+          if (!ID->declaresOrInheritsDesignatedInitializers() ||
+              ID->isDesignatedInitializer(Sel)) {
+            isDesignatedInitChain = true;
+            DIFunctionScopeInfo->ObjCWarnForNoDesignatedInitChain = false;
+          }
+        }
+      }
+    }
+    if (!isDesignatedInitChain) {
+      const ObjCMethodDecl *InitMethod = 0;
+      bool isDesignated =
+        getCurMethodDecl()->isDesignatedInitializerForTheInterface(&InitMethod);
+      assert(isDesignated && InitMethod);
+      (void)isDesignated;
+      Diag(SelLoc, SuperLoc.isValid() ?
+             diag::warn_objc_designated_init_non_designated_init_call :
+             diag::warn_objc_designated_init_non_super_designated_init_call);
+      Diag(InitMethod->getLocation(),
+           diag::note_objc_designated_init_marked_here);
+    }
+  }
+
+  if (DIFunctionScopeInfo &&
+      DIFunctionScopeInfo->ObjCIsSecondaryInit &&
+      (SuperLoc.isValid() || isSelfExpr(Receiver))) {
+    if (SuperLoc.isValid()) {
+      Diag(SelLoc, diag::warn_objc_secondary_init_super_init_call);
+    } else {
+      DIFunctionScopeInfo->ObjCWarnForNoInitDelegation = false;
+    }
+  }
+
   // Check the message arguments.
   unsigned NumArgs = ArgsIn.size();
   Expr **Args = ArgsIn.data();
@@ -2459,9 +2557,9 @@
                                 ClassMessage, SuperLoc.isValid(), 
                                 LBracLoc, RBracLoc, ReturnType, VK))
     return ExprError();
-  
-  if (Method && !Method->getResultType()->isVoidType() &&
-      RequireCompleteType(LBracLoc, Method->getResultType(), 
+
+  if (Method && !Method->getReturnType()->isVoidType() &&
+      RequireCompleteType(LBracLoc, Method->getReturnType(),
                           diag::err_illegal_message_expr_incomplete_type))
     return ExprError();
 
@@ -2600,7 +2698,7 @@
       }
     }
   }
-      
+  
   return MaybeBindToTemporary(Result);
 }
 
@@ -2859,7 +2957,7 @@
 
     ACCResult checkCallToFunction(FunctionDecl *fn) {
       // Require a CF*Ref return type.
-      if (!isCFType(fn->getResultType()))
+      if (!isCFType(fn->getReturnType()))
         return ACC_invalid;
 
       if (!isAnyRetainable(TargetClass))
@@ -2912,7 +3010,7 @@
       // Check for message sends to functions returning CF types.  We
       // just obey the Cocoa conventions with these, even though the
       // return type is CF.
-      if (!isAnyRetainable(TargetClass) || !isCFType(method->getResultType()))
+      if (!isAnyRetainable(TargetClass) || !isCFType(method->getReturnType()))
         return ACC_invalid;
       
       // If the method is explicitly marked not-retained, it's +0.
@@ -3045,6 +3143,31 @@
   }
 }
 
+template <typename T>
+static inline T *getObjCBridgeAttr(const TypedefType *TD) {
+  TypedefNameDecl *TDNDecl = TD->getDecl();
+  QualType QT = TDNDecl->getUnderlyingType();
+  if (QT->isPointerType()) {
+    QT = QT->getPointeeType();
+    if (const RecordType *RT = QT->getAs<RecordType>())
+      if (RecordDecl *RD = RT->getDecl())
+        return RD->getAttr<T>();
+  }
+  return 0;
+}
+
+static ObjCBridgeRelatedAttr *ObjCBridgeRelatedAttrFromType(QualType T,
+                                                            TypedefNameDecl *&TDNDecl) {
+  while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
+    TDNDecl = TD->getDecl();
+    if (ObjCBridgeRelatedAttr *ObjCBAttr =
+        getObjCBridgeAttr<ObjCBridgeRelatedAttr>(TD))
+      return ObjCBAttr;
+    T = TDNDecl->getUnderlyingType();
+  }
+  return 0;
+}
+
 static void
 diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
                           QualType castType, ARCConversionTypeClass castACTC,
@@ -3059,6 +3182,12 @@
     return;
 
   QualType castExprType = castExpr->getType();
+  TypedefNameDecl *TDNDecl = 0;
+  if ((castACTC == ACTC_coreFoundation &&  exprACTC == ACTC_retainable &&
+       ObjCBridgeRelatedAttrFromType(castType, TDNDecl)) ||
+      (exprACTC == ACTC_coreFoundation && castACTC == ACTC_retainable &&
+       ObjCBridgeRelatedAttrFromType(castExprType, TDNDecl)))
+    return;
   
   unsigned srcKind = 0;
   switch (exprACTC) {
@@ -3165,24 +3294,12 @@
     << castRange << castExpr->getSourceRange();
 }
 
-static inline ObjCBridgeAttr *getObjCBridgeAttr(const TypedefType *TD) {
-  TypedefNameDecl *TDNDecl = TD->getDecl();
-  QualType QT = TDNDecl->getUnderlyingType();
-  if (QT->isPointerType()) {
-    QT = QT->getPointeeType();
-    if (const RecordType *RT = QT->getAs<RecordType>())
-      if (RecordDecl *RD = RT->getDecl())
-        if (RD->hasAttr<ObjCBridgeAttr>())
-          return RD->getAttr<ObjCBridgeAttr>();
-  }
-  return 0;
-}
-
+template <typename TB>
 static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr) {
   QualType T = castExpr->getType();
   while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
     TypedefNameDecl *TDNDecl = TD->getDecl();
-    if (ObjCBridgeAttr *ObjCBAttr = getObjCBridgeAttr(TD)) {
+    if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) {
       if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) {
         NamedDecl *Target = 0;
         // Check for an existing type with this name.
@@ -3196,20 +3313,30 @@
                   castType->getAsObjCInterfacePointerType()) {
               ObjCInterfaceDecl *CastClass
                 = InterfacePointerType->getObjectType()->getInterface();
-              if ((CastClass == ExprClass) || (CastClass && ExprClass->isSuperClassOf(CastClass)))
+              if ((CastClass == ExprClass) ||
+                  (CastClass && ExprClass->isSuperClassOf(CastClass)))
                 return true;
               S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge)
                 << T << Target->getName() << castType->getPointeeType();
               return true;
-            } else {
+            } else if (castType->isObjCIdType() ||
+                       (S.Context.ObjCObjectAdoptsQTypeProtocols(
+                          castType, ExprClass)))
+              // ok to cast to 'id'.
+              // casting to id<p-list> is ok if bridge type adopts all of
+              // p-list protocols.
+              return true;
+            else {
               S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge)
                 << T << Target->getName() << castType;
+              S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+              S.Diag(Target->getLocStart(), diag::note_declared_at);
               return true;
            }
           }
         }
         S.Diag(castExpr->getLocStart(), diag::err_objc_cf_bridged_not_interface)
-        << castExpr->getType() << Parm->getName();
+          << castExpr->getType() << Parm;
         S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
         if (Target)
           S.Diag(Target->getLocStart(), diag::note_declared_at);
@@ -3221,12 +3348,12 @@
   return false;
 }
 
-// (CFErrorRef)ns
+template <typename TB>
 static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr) {
   QualType T = castType;
   while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
     TypedefNameDecl *TDNDecl = TD->getDecl();
-    if (ObjCBridgeAttr *ObjCBAttr = getObjCBridgeAttr(TD)) {
+    if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) {
       if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) {
         NamedDecl *Target = 0;
         // Check for an existing type with this name.
@@ -3240,16 +3367,25 @@
                   castExpr->getType()->getAsObjCInterfacePointerType()) {
               ObjCInterfaceDecl *ExprClass
                 = InterfacePointerType->getObjectType()->getInterface();
-              if ((CastClass == ExprClass) || (ExprClass && CastClass->isSuperClassOf(ExprClass)))
+              if ((CastClass == ExprClass) ||
+                  (ExprClass && CastClass->isSuperClassOf(ExprClass)))
                 return true;
               S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge_to_cf)
                 << castExpr->getType()->getPointeeType() << T;
               S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
               return true;
-            } else {
+            } else if (castExpr->getType()->isObjCIdType() ||
+                       (S.Context.QIdProtocolsAdoptObjCObjectProtocols(
+                          castExpr->getType(), CastClass)))
+              // ok to cast an 'id' expression to a CFtype.
+              // ok to cast an 'id<plist>' expression to CFtype provided plist
+              // adopts all of CFtype's ObjetiveC's class plist.
+              return true;
+            else {
               S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge_to_cf)
                 << castExpr->getType() << castType;
               S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+              S.Diag(Target->getLocStart(), diag::note_declared_at);
               return true;
             }
           }
@@ -3267,6 +3403,173 @@
   return false;
 }
 
+void Sema::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) {
+  // warn in presence of __bridge casting to or from a toll free bridge cast.
+  ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExpr->getType());
+  ARCConversionTypeClass castACTC = classifyTypeForARCConversion(castType);
+  if (castACTC == ACTC_retainable && exprACTC == ACTC_coreFoundation) {
+    (void)CheckObjCBridgeNSCast<ObjCBridgeAttr>(*this, castType, castExpr);
+    (void)CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(*this, castType, castExpr);
+  }
+  else if (castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable) {
+    (void)CheckObjCBridgeCFCast<ObjCBridgeAttr>(*this, castType, castExpr);
+    (void)CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(*this, castType, castExpr);
+  }
+}
+
+
+bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
+                                            QualType DestType, QualType SrcType,
+                                            ObjCInterfaceDecl *&RelatedClass,
+                                            ObjCMethodDecl *&ClassMethod,
+                                            ObjCMethodDecl *&InstanceMethod,
+                                            TypedefNameDecl *&TDNDecl,
+                                            bool CfToNs) {
+  QualType T = CfToNs ? SrcType : DestType;
+  ObjCBridgeRelatedAttr *ObjCBAttr = ObjCBridgeRelatedAttrFromType(T, TDNDecl);
+  if (!ObjCBAttr)
+    return false;
+  
+  IdentifierInfo *RCId = ObjCBAttr->getRelatedClass();
+  IdentifierInfo *CMId = ObjCBAttr->getClassMethod();
+  IdentifierInfo *IMId = ObjCBAttr->getInstanceMethod();
+  if (!RCId)
+    return false;
+  NamedDecl *Target = 0;
+  // Check for an existing type with this name.
+  LookupResult R(*this, DeclarationName(RCId), SourceLocation(),
+                 Sema::LookupOrdinaryName);
+  if (!LookupName(R, TUScope)) {
+    Diag(Loc, diag::err_objc_bridged_related_invalid_class) << RCId
+          << SrcType << DestType;
+    Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+    return false;
+  }
+  Target = R.getFoundDecl();
+  if (Target && isa<ObjCInterfaceDecl>(Target))
+    RelatedClass = cast<ObjCInterfaceDecl>(Target);
+  else {
+    Diag(Loc, diag::err_objc_bridged_related_invalid_class_name) << RCId
+          << SrcType << DestType;
+    Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+    if (Target)
+      Diag(Target->getLocStart(), diag::note_declared_at);
+    return false;
+  }
+      
+  // Check for an existing class method with the given selector name.
+  if (CfToNs && CMId) {
+    Selector Sel = Context.Selectors.getUnarySelector(CMId);
+    ClassMethod = RelatedClass->lookupMethod(Sel, false);
+    if (!ClassMethod) {
+      Diag(Loc, diag::err_objc_bridged_related_known_method)
+            << SrcType << DestType << Sel << false;
+      Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+      return false;
+    }
+  }
+      
+  // Check for an existing instance method with the given selector name.
+  if (!CfToNs && IMId) {
+    Selector Sel = Context.Selectors.getNullarySelector(IMId);
+    InstanceMethod = RelatedClass->lookupMethod(Sel, true);
+    if (!InstanceMethod) {
+      Diag(Loc, diag::err_objc_bridged_related_known_method)
+            << SrcType << DestType << Sel << true;
+      Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+      return false;
+    }
+  }
+  return true;
+}
+
+bool
+Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
+                                        QualType DestType, QualType SrcType,
+                                        Expr *&SrcExpr) {
+  ARCConversionTypeClass rhsExprACTC = classifyTypeForARCConversion(SrcType);
+  ARCConversionTypeClass lhsExprACTC = classifyTypeForARCConversion(DestType);
+  bool CfToNs = (rhsExprACTC == ACTC_coreFoundation && lhsExprACTC == ACTC_retainable);
+  bool NsToCf = (rhsExprACTC == ACTC_retainable && lhsExprACTC == ACTC_coreFoundation);
+  if (!CfToNs && !NsToCf)
+    return false;
+  
+  ObjCInterfaceDecl *RelatedClass;
+  ObjCMethodDecl *ClassMethod = 0;
+  ObjCMethodDecl *InstanceMethod = 0;
+  TypedefNameDecl *TDNDecl = 0;
+  if (!checkObjCBridgeRelatedComponents(Loc, DestType, SrcType, RelatedClass,
+                                        ClassMethod, InstanceMethod, TDNDecl, CfToNs))
+    return false;
+  
+  if (CfToNs) {
+    // Implicit conversion from CF to ObjC object is needed.
+    if (ClassMethod) {
+      std::string ExpressionString = "[";
+      ExpressionString += RelatedClass->getNameAsString();
+      ExpressionString += " ";
+      ExpressionString += ClassMethod->getSelector().getAsString();
+      SourceLocation SrcExprEndLoc = PP.getLocForEndOfToken(SrcExpr->getLocEnd());
+      // Provide a fixit: [RelatedClass ClassMethod SrcExpr]
+      Diag(Loc, diag::err_objc_bridged_related_known_method)
+        << SrcType << DestType << ClassMethod->getSelector() << false
+        << FixItHint::CreateInsertion(SrcExpr->getLocStart(), ExpressionString)
+        << FixItHint::CreateInsertion(SrcExprEndLoc, "]");
+      Diag(RelatedClass->getLocStart(), diag::note_declared_at);
+      Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+      
+      QualType receiverType =
+        Context.getObjCInterfaceType(RelatedClass);
+      // Argument.
+      Expr *args[] = { SrcExpr };
+      ExprResult msg = BuildClassMessageImplicit(receiverType, false,
+                                      ClassMethod->getLocation(),
+                                      ClassMethod->getSelector(), ClassMethod,
+                                      MultiExprArg(args, 1));
+      SrcExpr = msg.take();
+      return true;
+    }
+  }
+  else {
+    // Implicit conversion from ObjC type to CF object is needed.
+    if (InstanceMethod) {
+      std::string ExpressionString;
+      SourceLocation SrcExprEndLoc = PP.getLocForEndOfToken(SrcExpr->getLocEnd());
+      if (InstanceMethod->isPropertyAccessor())
+        if (const ObjCPropertyDecl *PDecl = InstanceMethod->findPropertyDecl()) {
+          // fixit: ObjectExpr.propertyname when it is  aproperty accessor.
+          ExpressionString = ".";
+          ExpressionString += PDecl->getNameAsString();
+          Diag(Loc, diag::err_objc_bridged_related_known_method)
+          << SrcType << DestType << InstanceMethod->getSelector() << true
+          << FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
+        }
+      if (ExpressionString.empty()) {
+        // Provide a fixit: [ObjectExpr InstanceMethod]
+        ExpressionString = " ";
+        ExpressionString += InstanceMethod->getSelector().getAsString();
+        ExpressionString += "]";
+      
+        Diag(Loc, diag::err_objc_bridged_related_known_method)
+        << SrcType << DestType << InstanceMethod->getSelector() << true
+        << FixItHint::CreateInsertion(SrcExpr->getLocStart(), "[")
+        << FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
+      }
+      Diag(RelatedClass->getLocStart(), diag::note_declared_at);
+      Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+      
+      ExprResult msg =
+        BuildInstanceMessageImplicit(SrcExpr, SrcType,
+                                     InstanceMethod->getLocation(),
+                                     InstanceMethod->getSelector(),
+                                     InstanceMethod, None);
+      SrcExpr = msg.take();
+      return true;
+    }
+  }
+  return false;
+}
+
 Sema::ARCConversionResult
 Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
                              Expr *&castExpr, CheckedConversionKind CCK,
@@ -3327,12 +3630,14 @@
   
   if (castACTC == ACTC_retainable && exprACTC == ACTC_coreFoundation &&
       (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast))
-    if (CheckObjCBridgeNSCast(*this, castType, castExpr))
+    if (CheckObjCBridgeNSCast<ObjCBridgeAttr>(*this, castType, castExpr) ||
+        CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(*this, castType, castExpr))
       return ACR_okay;
     
   if (castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable &&
       (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast))
-    if (CheckObjCBridgeCFCast(*this, castType, castExpr))
+    if (CheckObjCBridgeCFCast<ObjCBridgeAttr>(*this, castType, castExpr) ||
+        CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(*this, castType, castExpr))
       return ACR_okay;
     
 
@@ -3362,6 +3667,13 @@
       CCK != CCK_ImplicitConversion)
     return ACR_unbridged;
 
+  // Do not issue bridge cast" diagnostic when implicit casting a cstring
+  // to 'NSString *'. Let caller issue a normal mismatched diagnostic with
+  // suitable fix-it.
+  if (castACTC == ACTC_retainable && exprACTC == ACTC_none &&
+      ConversionToObjCStringLiteralCheck(castType, castExpr))
+    return ACR_okay;
+  
   // Do not issue "bridge cast" diagnostic when implicit casting
   // a retainable object to a CF type parameter belonging to an audited
   // CF API function. Let caller issue a normal type mismatched diagnostic
@@ -3599,6 +3911,8 @@
                                       Expr *SubExpr) {
   TypeSourceInfo *TSInfo = 0;
   QualType T = GetTypeFromParser(Type, &TSInfo);
+  if (Kind == OBC_Bridge)
+    CheckTollFreeBridgeCast(T, SubExpr);
   if (!TSInfo)
     TSInfo = Context.getTrivialTypeSourceInfo(T, LParenLoc);
   return BuildObjCBridgedCast(LParenLoc, Kind, BridgeKeywordLoc, TSInfo, 
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index 034c1b6..3829016 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -441,26 +441,22 @@
                               Entity, ILE, RequiresSecondPass);
     else if (RDecl->isUnion() && isa<CXXRecordDecl>(RDecl) &&
              cast<CXXRecordDecl>(RDecl)->hasInClassInitializer()) {
-      for (RecordDecl::field_iterator Field = RDecl->field_begin(),
-                                      FieldEnd = RDecl->field_end();
-           Field != FieldEnd; ++Field) {
+      for (auto *Field : RDecl->fields()) {
         if (Field->hasInClassInitializer()) {
-          FillInValueInitForField(0, *Field, Entity, ILE, RequiresSecondPass);
+          FillInValueInitForField(0, Field, Entity, ILE, RequiresSecondPass);
           break;
         }
       }
     } else {
       unsigned Init = 0;
-      for (RecordDecl::field_iterator Field = RDecl->field_begin(),
-                                      FieldEnd = RDecl->field_end();
-           Field != FieldEnd; ++Field) {
+      for (auto *Field : RDecl->fields()) {
         if (Field->isUnnamedBitfield())
           continue;
 
         if (hadError)
           return;
 
-        FillInValueInitForField(Init, *Field, Entity, ILE, RequiresSecondPass);
+        FillInValueInitForField(Init, Field, Entity, ILE, RequiresSecondPass);
         if (hadError)
           return;
 
@@ -587,13 +583,10 @@
 int InitListChecker::numStructUnionElements(QualType DeclType) {
   RecordDecl *structDecl = DeclType->getAs<RecordType>()->getDecl();
   int InitializableMembers = 0;
-  for (RecordDecl::field_iterator
-         Field = structDecl->field_begin(),
-         FieldEnd = structDecl->field_end();
-       Field != FieldEnd; ++Field) {
+  for (const auto *Field : structDecl->fields())
     if (!Field->isUnnamedBitfield())
       ++InitializableMembers;
-  }
+
   if (structDecl->isUnion())
     return std::min(InitializableMembers, 1);
   return InitializableMembers - structDecl->hasFlexibleArrayMember();
@@ -1005,9 +998,11 @@
 
   Expr *expr = IList->getInit(Index);
   if (InitListExpr *SubIList = dyn_cast<InitListExpr>(expr)) {
+    // FIXME: This is invalid, and accepting it causes overload resolution
+    // to pick the wrong overload in some corner cases.
     if (!VerifyOnly)
       SemaRef.Diag(SubIList->getLocStart(),
-                   diag::warn_many_braces_around_scalar_init)
+                   diag::ext_many_braces_around_scalar_init)
         << SubIList->getSourceRange();
 
     CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList,
@@ -1538,7 +1533,7 @@
          it != end; ++it) {
       if (!it->isUnnamedBitfield() && !it->hasInClassInitializer()) {
         SemaRef.Diag(IList->getSourceRange().getEnd(),
-                     diag::warn_missing_field_initializers) << it->getName();
+                     diag::warn_missing_field_initializers) << *it;
         break;
       }
     }
@@ -1648,7 +1643,7 @@
   explicit FieldInitializerValidatorCCC(RecordDecl *RD)
       : Record(RD) {}
 
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     FieldDecl *FD = candidate.getCorrectionDeclAs<FieldDecl>();
     return FD && FD->getDeclContext()->getRedeclContext()->Equals(Record);
   }
@@ -2291,8 +2286,7 @@
     if (RDecl->isUnion())
       NumElements = 1;
     else
-      NumElements = std::distance(RDecl->field_begin(),
-                                  RDecl->field_end());
+      NumElements = std::distance(RDecl->field_begin(), RDecl->field_end());
   }
 
   Result->reserveInits(SemaRef.Context, NumElements);
@@ -3520,10 +3514,13 @@
         if (ConvTemplate)
           S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
                                            ActingDC, Initializer,
-                                           DestType, CandidateSet);
+                                           DestType, CandidateSet,
+                                           /*AllowObjCConversionOnExplicit=*/
+                                             false);
         else
           S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
-                                   Initializer, DestType, CandidateSet);
+                                   Initializer, DestType, CandidateSet,
+                                   /*AllowObjCConversionOnExplicit=*/false);
       }
     }
   }
@@ -3545,7 +3542,7 @@
 
   // Compute the returned type of the conversion.
   if (isa<CXXConversionDecl>(Function))
-    T2 = Function->getResultType();
+    T2 = Function->getReturnType();
   else
     T2 = cv1T1;
 
@@ -4143,10 +4140,11 @@
           if (ConvTemplate)
             S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
                                              ActingDC, Initializer, DestType,
-                                             CandidateSet);
+                                             CandidateSet, AllowExplicit);
           else
             S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
-                                     Initializer, DestType, CandidateSet);
+                                     Initializer, DestType, CandidateSet,
+                                     AllowExplicit);
         }
       }
     }
@@ -4460,6 +4458,14 @@
   Expr *Initializer = 0;
   if (Args.size() == 1) {
     Initializer = Args[0];
+    if (S.getLangOpts().ObjC1) {
+      if (S.CheckObjCBridgeRelatedConversions(Initializer->getLocStart(),
+                                              DestType, Initializer->getType(),
+                                              Initializer) ||
+          S.ConversionToObjCStringLiteralCheck(DestType, Initializer))
+        Args[0] = Initializer;
+        
+    }
     if (!isa<InitListExpr>(Initializer))
       SourceType = Initializer->getType();
   }
@@ -5218,10 +5224,13 @@
       ConstructKind = CXXConstructExpr::CK_Delegating;
     }
 
-    // Only get the parenthesis range if it is a direct construction.
-    SourceRange parenRange =
-        Kind.getKind() == InitializationKind::IK_Direct ?
-        Kind.getParenRange() : SourceRange();
+    // Only get the parenthesis or brace range if it is a list initialization or
+    // direct construction.
+    SourceRange ParenOrBraceRange;
+    if (IsListInitialization)
+      ParenOrBraceRange = SourceRange(LBraceLoc, RBraceLoc);
+    else if (Kind.getKind() == InitializationKind::IK_Direct)
+      ParenOrBraceRange = Kind.getParenRange();
 
     // If the entity allows NRVO, mark the construction as elidable
     // unconditionally.
@@ -5233,7 +5242,7 @@
                                         IsListInitialization,
                                         ConstructorInitRequiresZeroInit,
                                         ConstructKind,
-                                        parenRange);
+                                        ParenOrBraceRange);
     else
       CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
                                         Constructor,
@@ -5242,7 +5251,7 @@
                                         IsListInitialization,
                                         ConstructorInitRequiresZeroInit,
                                         ConstructKind,
-                                        parenRange);
+                                        ParenOrBraceRange);
   }
   if (CurInit.isInvalid())
     return ExprError();
@@ -5451,9 +5460,7 @@
         performReferenceExtension(ILE->getInit(0), ExtendingD);
       else {
         unsigned Index = 0;
-        for (RecordDecl::field_iterator I = RD->field_begin(),
-                                        E = RD->field_end();
-             I != E; ++I) {
+        for (const auto *I : RD->fields()) {
           if (Index >= ILE->getNumInits())
             break;
           if (I->isUnnamedBitfield())
@@ -5874,7 +5881,7 @@
 
         CastKind = CK_UserDefinedConversion;
 
-        CreatedObject = Conversion->getResultType()->isRecordType();
+        CreatedObject = Conversion->getReturnType()->isRecordType();
       }
 
       bool RequiresCopy = !IsCopy && !isReferenceBinding(Steps.back());
@@ -6196,7 +6203,7 @@
 
     case SK_OCLSamplerInit: {
       assert(Step->Type->isSamplerT() && 
-             "Sampler initialization on non sampler type.");
+             "Sampler initialization on non-sampler type.");
 
       QualType SourceType = CurInit.get()->getType();
 
@@ -6212,7 +6219,7 @@
     }
     case SK_OCLZeroEvent: {
       assert(Step->Type->isEventT() && 
-             "Event initialization on non event type.");
+             "Event initialization on non-event type.");
 
       CurInit = S.ImpCastExprToType(CurInit.take(), Step->Type,
                                     CK_ZeroToOCLEvent,
@@ -6246,8 +6253,7 @@
   if (!RD || !RD->hasUninitializedReferenceMember())
     return false;
 
-  for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
-                                     FE = RD->field_end(); FI != FE; ++FI) {
+  for (const auto *FI : RD->fields()) {
     if (FI->isUnnamedBitfield())
       continue;
 
@@ -6257,10 +6263,8 @@
     }
   }
 
-  for (CXXRecordDecl::base_class_iterator BI = RD->bases_begin(),
-                                          BE = RD->bases_end();
-       BI != BE; ++BI) {
-    if (DiagnoseUninitializedReference(S, BI->getLocStart(), BI->getType())) {
+  for (const auto &BI : RD->bases()) {
+    if (DiagnoseUninitializedReference(S, BI.getLocStart(), BI.getType())) {
       S.Diag(Loc, diag::note_value_initialization_here) << RD;
       return true;
     }
diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp
index a7d5b65..0d287f7 100644
--- a/lib/Sema/SemaLambda.cpp
+++ b/lib/Sema/SemaLambda.cpp
@@ -11,6 +11,7 @@
 //
 //===----------------------------------------------------------------------===//
 #include "clang/Sema/DeclSpec.h"
+#include "TypeLocBuilder.h"
 #include "clang/AST/ASTLambda.h"
 #include "clang/AST/ExprCXX.h"
 #include "clang/Basic/TargetInfo.h"
@@ -21,115 +22,204 @@
 #include "clang/Sema/ScopeInfo.h"
 #include "clang/Sema/SemaInternal.h"
 #include "clang/Sema/SemaLambda.h"
-#include "TypeLocBuilder.h"
 using namespace clang;
 using namespace sema;
 
-// returns -1 if none of the lambdas on the scope stack can capture.
-// A lambda 'L' is capture-ready for a certain variable 'V' if,
-//  - its enclosing context is non-dependent
-//  - and if the chain of lambdas between L and the lambda in which 
-//    V is potentially used, call all capture or have captured V.
-static inline int GetScopeIndexOfNearestCaptureReadyLambda(
-    ArrayRef<clang::sema::FunctionScopeInfo*> FunctionScopes,
-    DeclContext *const CurContext, VarDecl *VD) {
+/// \brief Examines the FunctionScopeInfo stack to determine the nearest
+/// enclosing lambda (to the current lambda) that is 'capture-ready' for 
+/// the variable referenced in the current lambda (i.e. \p VarToCapture).
+/// If successful, returns the index into Sema's FunctionScopeInfo stack
+/// of the capture-ready lambda's LambdaScopeInfo.
+///  
+/// Climbs down the stack of lambdas (deepest nested lambda - i.e. current 
+/// lambda - is on top) to determine the index of the nearest enclosing/outer
+/// lambda that is ready to capture the \p VarToCapture being referenced in 
+/// the current lambda. 
+/// As we climb down the stack, we want the index of the first such lambda -
+/// that is the lambda with the highest index that is 'capture-ready'. 
+/// 
+/// A lambda 'L' is capture-ready for 'V' (var or this) if:
+///  - its enclosing context is non-dependent
+///  - and if the chain of lambdas between L and the lambda in which
+///    V is potentially used (i.e. the lambda at the top of the scope info 
+///    stack), can all capture or have already captured V.
+/// If \p VarToCapture is 'null' then we are trying to capture 'this'.
+/// 
+/// Note that a lambda that is deemed 'capture-ready' still needs to be checked
+/// for whether it is 'capture-capable' (see
+/// getStackIndexOfNearestEnclosingCaptureCapableLambda), before it can truly 
+/// capture.
+///
+/// \param FunctionScopes - Sema's stack of nested FunctionScopeInfo's (which a
+///  LambdaScopeInfo inherits from).  The current/deepest/innermost lambda
+///  is at the top of the stack and has the highest index.
+/// \param VarToCapture - the variable to capture.  If NULL, capture 'this'.
+///
+/// \returns An Optional<unsigned> Index that if evaluates to 'true' contains
+/// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda
+/// which is capture-ready.  If the return value evaluates to 'false' then
+/// no lambda is capture-ready for \p VarToCapture.
+
+static inline Optional<unsigned>
+getStackIndexOfNearestEnclosingCaptureReadyLambda(
+    ArrayRef<const clang::sema::FunctionScopeInfo *> FunctionScopes,
+    VarDecl *VarToCapture) {
+  // Label failure to capture.
+  const Optional<unsigned> NoLambdaIsCaptureReady;
+
+  assert(
+      isa<clang::sema::LambdaScopeInfo>(
+          FunctionScopes[FunctionScopes.size() - 1]) &&
+      "The function on the top of sema's function-info stack must be a lambda");
   
-  DeclContext *EnclosingDC = CurContext;
-  // If VD is null, we are attempting to capture 'this'
-  const bool IsCapturingThis = !VD;
+  // If VarToCapture is null, we are attempting to capture 'this'.
+  const bool IsCapturingThis = !VarToCapture;
   const bool IsCapturingVariable = !IsCapturingThis;
-  int RetIndex = -1;
+
+  // Start with the current lambda at the top of the stack (highest index).
   unsigned CurScopeIndex = FunctionScopes.size() - 1;
-  while (!EnclosingDC->isTranslationUnit() && 
-      EnclosingDC->isDependentContext() && isLambdaCallOperator(EnclosingDC)) {
-    RetIndex = CurScopeIndex;
-    clang::sema::LambdaScopeInfo *LSI = 
-          cast<sema::LambdaScopeInfo>(FunctionScopes[CurScopeIndex]);
-    // We have crawled up to an intervening lambda that contains the 
-    // variable declaration - so not only does it not need to capture;
-    // none of the enclosing lambdas need to capture it, and since all
-    // other nested lambdas are dependent (otherwise we wouldn't have
-    // arrived here) - we don't yet have a lambda that can capture the
+  DeclContext *EnclosingDC =
+      cast<sema::LambdaScopeInfo>(FunctionScopes[CurScopeIndex])->CallOperator;
+
+  do {
+    const clang::sema::LambdaScopeInfo *LSI =
+        cast<sema::LambdaScopeInfo>(FunctionScopes[CurScopeIndex]);
+    // IF we have climbed down to an intervening enclosing lambda that contains
+    // the variable declaration - it obviously can/must not capture the
     // variable.
-    if (IsCapturingVariable && VD->getDeclContext()->Equals(EnclosingDC)) 
-      return -1;
-    // All intervening lambda call operators have to be able to capture.
+    // Since its enclosing DC is dependent, all the lambdas between it and the
+    // innermost nested lambda are dependent (otherwise we wouldn't have
+    // arrived here) - so we don't yet have a lambda that can capture the
+    // variable.
+    if (IsCapturingVariable &&
+        VarToCapture->getDeclContext()->Equals(EnclosingDC))
+      return NoLambdaIsCaptureReady;
+
+    // For an enclosing lambda to be capture ready for an entity, all
+    // intervening lambda's have to be able to capture that entity. If even
+    // one of the intervening lambda's is not capable of capturing the entity
+    // then no enclosing lambda can ever capture that entity.
+    // For e.g.
+    // const int x = 10;
+    // [=](auto a) {    #1
+    //   [](auto b) {   #2 <-- an intervening lambda that can never capture 'x'
+    //    [=](auto c) { #3
+    //       f(x, c);  <-- can not lead to x's speculative capture by #1 or #2
+    //    }; }; };
     // If they do not have a default implicit capture, check to see
     // if the entity has already been explicitly captured.
-    // If even a single dependent enclosing lambda lacks the capability 
-    // to ever capture this variable, there is no further enclosing 
+    // If even a single dependent enclosing lambda lacks the capability
+    // to ever capture this variable, there is no further enclosing
     // non-dependent lambda that can capture this variable.
     if (LSI->ImpCaptureStyle == sema::LambdaScopeInfo::ImpCap_None) {
-      if (IsCapturingVariable && !LSI->isCaptured(VD)) 
-        return -1;
+      if (IsCapturingVariable && !LSI->isCaptured(VarToCapture))
+        return NoLambdaIsCaptureReady;
       if (IsCapturingThis && !LSI->isCXXThisCaptured())
-        return -1;
+        return NoLambdaIsCaptureReady;
     }
     EnclosingDC = getLambdaAwareParentOfDeclContext(EnclosingDC);
+    
+    assert(CurScopeIndex);
     --CurScopeIndex;
-  }
-  // If the enclosingDC is not dependent, then the immediately nested lambda
-  // is capture-ready.
-  if (!EnclosingDC->isDependentContext())
-    return RetIndex;
-  return -1;
-}
-// Given a lambda's call operator and a variable (or null for 'this'), 
-// compute the nearest enclosing lambda that is capture-ready (i.e 
-// the enclosing context is not dependent, and all intervening lambdas can 
-// either implicitly or explicitly capture Var)
-// 
-// The approach is as follows, for the entity VD ('this' if null):
-//   - start with the current lambda
-//     - if it is non-dependent and can capture VD, return it. 
-//     - if it is dependent and has an implicit or explicit capture, check its parent 
-//       whether the parent is non-depdendent and all its intervening lambdas
-//       can capture, if so return the child.
-//       [Note: When we hit a generic lambda specialization, do not climb up
-//         the scope stack any further since not only do we not need to,
-//         the scope stack will often not be synchronized with any lambdas 
-//         enclosing the specialized generic lambda]
-//       
-// Return the CallOperator of the capturable lambda and set function scope 
-// index to the correct index within the function scope stack to correspond 
-// to the capturable lambda.
-// If VarDecl *VD is null, we check for 'this' capture.
-CXXMethodDecl* clang::GetInnermostEnclosingCapturableLambda(
-                             ArrayRef<sema::FunctionScopeInfo*> FunctionScopes,
-                             unsigned &FunctionScopeIndex, 
-                             DeclContext *const CurContext, VarDecl *VD, 
-                             Sema &S) {
+  } while (!EnclosingDC->isTranslationUnit() &&
+           EnclosingDC->isDependentContext() &&
+           isLambdaCallOperator(EnclosingDC));
 
-  const int IndexOfCaptureReadyLambda = 
-      GetScopeIndexOfNearestCaptureReadyLambda(FunctionScopes,CurContext, VD);
-  if (IndexOfCaptureReadyLambda == -1) return 0;
-  assert(IndexOfCaptureReadyLambda >= 0);
-  const unsigned IndexOfCaptureReadyLambdaU = 
-      static_cast<unsigned>(IndexOfCaptureReadyLambda);
-  sema::LambdaScopeInfo *const CaptureReadyLambdaLSI = 
-      cast<sema::LambdaScopeInfo>(FunctionScopes[IndexOfCaptureReadyLambdaU]);
-  // If VD is null, we are attempting to capture 'this'
-  const bool IsCapturingThis = !VD;
+  assert(CurScopeIndex < (FunctionScopes.size() - 1));
+  // If the enclosingDC is not dependent, then the immediately nested lambda
+  // (one index above) is capture-ready.
+  if (!EnclosingDC->isDependentContext())
+    return CurScopeIndex + 1;
+  return NoLambdaIsCaptureReady;
+}
+
+/// \brief Examines the FunctionScopeInfo stack to determine the nearest
+/// enclosing lambda (to the current lambda) that is 'capture-capable' for 
+/// the variable referenced in the current lambda (i.e. \p VarToCapture).
+/// If successful, returns the index into Sema's FunctionScopeInfo stack
+/// of the capture-capable lambda's LambdaScopeInfo.
+///
+/// Given the current stack of lambdas being processed by Sema and
+/// the variable of interest, to identify the nearest enclosing lambda (to the 
+/// current lambda at the top of the stack) that can truly capture
+/// a variable, it has to have the following two properties:
+///  a) 'capture-ready' - be the innermost lambda that is 'capture-ready':
+///     - climb down the stack (i.e. starting from the innermost and examining
+///       each outer lambda step by step) checking if each enclosing
+///       lambda can either implicitly or explicitly capture the variable.
+///       Record the first such lambda that is enclosed in a non-dependent
+///       context. If no such lambda currently exists return failure.
+///  b) 'capture-capable' - make sure the 'capture-ready' lambda can truly
+///  capture the variable by checking all its enclosing lambdas:
+///     - check if all outer lambdas enclosing the 'capture-ready' lambda
+///       identified above in 'a' can also capture the variable (this is done
+///       via tryCaptureVariable for variables and CheckCXXThisCapture for
+///       'this' by passing in the index of the Lambda identified in step 'a')
+///
+/// \param FunctionScopes - Sema's stack of nested FunctionScopeInfo's (which a
+/// LambdaScopeInfo inherits from).  The current/deepest/innermost lambda
+/// is at the top of the stack.
+///
+/// \param VarToCapture - the variable to capture.  If NULL, capture 'this'.
+///
+///
+/// \returns An Optional<unsigned> Index that if evaluates to 'true' contains
+/// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda
+/// which is capture-capable.  If the return value evaluates to 'false' then
+/// no lambda is capture-capable for \p VarToCapture.
+
+Optional<unsigned> clang::getStackIndexOfNearestEnclosingCaptureCapableLambda(
+    ArrayRef<const sema::FunctionScopeInfo *> FunctionScopes,
+    VarDecl *VarToCapture, Sema &S) {
+
+  const Optional<unsigned> NoLambdaIsCaptureCapable;
+  
+  const Optional<unsigned> OptionalStackIndex =
+      getStackIndexOfNearestEnclosingCaptureReadyLambda(FunctionScopes,
+                                                        VarToCapture);
+  if (!OptionalStackIndex)
+    return NoLambdaIsCaptureCapable;
+
+  const unsigned IndexOfCaptureReadyLambda = OptionalStackIndex.getValue();
+  assert(((IndexOfCaptureReadyLambda != (FunctionScopes.size() - 1)) ||
+          S.getCurGenericLambda()) &&
+         "The capture ready lambda for a potential capture can only be the "
+         "current lambda if it is a generic lambda");
+
+  const sema::LambdaScopeInfo *const CaptureReadyLambdaLSI =
+      cast<sema::LambdaScopeInfo>(FunctionScopes[IndexOfCaptureReadyLambda]);
+  
+  // If VarToCapture is null, we are attempting to capture 'this'
+  const bool IsCapturingThis = !VarToCapture;
   const bool IsCapturingVariable = !IsCapturingThis;
 
   if (IsCapturingVariable) {
-    // Now check to see if this lambda can truly capture, and also
-    // if all enclosing lambdas of this lambda allow this capture.
+    // Check if the capture-ready lambda can truly capture the variable, by
+    // checking whether all enclosing lambdas of the capture-ready lambda allow
+    // the capture - i.e. make sure it is capture-capable.
     QualType CaptureType, DeclRefType;
-    const bool CanCaptureVariable = !S.tryCaptureVariable(VD, 
-      /*ExprVarIsUsedInLoc*/SourceLocation(), clang::Sema::TryCapture_Implicit,
-      /*EllipsisLoc*/ SourceLocation(), 
-      /*BuildAndDiagnose*/false, CaptureType, DeclRefType, 
-      &IndexOfCaptureReadyLambdaU);
-    if (!CanCaptureVariable) return 0;
-  } else { 
-    const bool CanCaptureThis = !S.CheckCXXThisCapture(
-        CaptureReadyLambdaLSI->PotentialThisCaptureLocation, false, false, 
-        &IndexOfCaptureReadyLambdaU);
-    if (!CanCaptureThis) return 0;      
-  } // end 'this' capture test
-  FunctionScopeIndex = IndexOfCaptureReadyLambdaU;
-  return CaptureReadyLambdaLSI->CallOperator;
+    const bool CanCaptureVariable =
+        !S.tryCaptureVariable(VarToCapture,
+                              /*ExprVarIsUsedInLoc*/ SourceLocation(),
+                              clang::Sema::TryCapture_Implicit,
+                              /*EllipsisLoc*/ SourceLocation(),
+                              /*BuildAndDiagnose*/ false, CaptureType,
+                              DeclRefType, &IndexOfCaptureReadyLambda);
+    if (!CanCaptureVariable)
+      return NoLambdaIsCaptureCapable;
+  } else {
+    // Check if the capture-ready lambda can truly capture 'this' by checking
+    // whether all enclosing lambdas of the capture-ready lambda can capture
+    // 'this'.
+    const bool CanCaptureThis =
+        !S.CheckCXXThisCapture(
+             CaptureReadyLambdaLSI->PotentialThisCaptureLocation,
+             /*Explicit*/ false, /*BuildAndDiagnose*/ false,
+             &IndexOfCaptureReadyLambda);
+    if (!CanCaptureThis)
+      return NoLambdaIsCaptureCapable;
+  } 
+  return IndexOfCaptureReadyLambda;
 }
 
 static inline TemplateParameterList *
@@ -142,17 +232,14 @@
     SourceLocation LAngleLoc = IntroRange.getBegin();
     SourceLocation RAngleLoc = IntroRange.getEnd();
     LSI->GLTemplateParameterList = TemplateParameterList::Create(
-                                   SemaRef.Context, 
-                                   /*Template kw loc*/SourceLocation(), 
-                                   LAngleLoc,
-                                   (NamedDecl**)LSI->AutoTemplateParams.data(),
-                                   LSI->AutoTemplateParams.size(), RAngleLoc);  
+        SemaRef.Context,
+        /*Template kw loc*/ SourceLocation(), LAngleLoc,
+        (NamedDecl **)LSI->AutoTemplateParams.data(),
+        LSI->AutoTemplateParams.size(), RAngleLoc);
   }
   return LSI->GLTemplateParameterList;
 }
 
-
-
 CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange,
                                              TypeSourceInfo *Info,
                                              bool KnownDependent, 
@@ -169,7 +256,7 @@
                                                      IsGenericLambda, 
                                                      CaptureDefault);
   DC->addDecl(Class);
-  
+
   return Class;
 }
 
@@ -277,10 +364,10 @@
   // dependent type.
   if (Class->isDependentContext() || TemplateParams) {
     const FunctionProtoType *FPT = MethodType->castAs<FunctionProtoType>();
-    QualType Result = FPT->getResultType();
+    QualType Result = FPT->getReturnType();
     if (Result->isUndeducedType()) {
       Result = SubstAutoType(Result, Context.DependentTy);
-      MethodType = Context.getFunctionType(Result, FPT->getArgTypes(),
+      MethodType = Context.getFunctionType(Result, FPT->getParamTypes(),
                                            FPT->getExtProtoInfo());
     }
   }
@@ -331,10 +418,8 @@
                              const_cast<ParmVarDecl **>(Params.end()),
                              /*CheckParameterNames=*/false);
     
-    for (CXXMethodDecl::param_iterator P = Method->param_begin(), 
-                                    PEnd = Method->param_end();
-         P != PEnd; ++P)
-      (*P)->setOwningFunction(Method);
+    for (auto P : Method->params())
+      P->setOwningFunction(Method);
   }
 
   Decl *ManglingContextDecl;
@@ -369,8 +454,8 @@
   LSI->Mutable = Mutable;
 
   if (ExplicitResultType) {
-    LSI->ReturnType = CallOperator->getResultType();
-    
+    LSI->ReturnType = CallOperator->getReturnType();
+
     if (!LSI->ReturnType->isDependentType() &&
         !LSI->ReturnType->isVoidType()) {
       if (RequireCompleteType(CallOperator->getLocStart(), LSI->ReturnType,
@@ -651,6 +736,9 @@
       return QualType();
     } else {
       DeduceInit = CXXDirectInit->getExpr(0);
+      if (isa<InitListExpr>(DeduceInit))
+        Diag(CXXDirectInit->getLocStart(), diag::err_init_capture_paren_braces)
+          << DeclarationName(Id) << Loc;
     }
   }
 
@@ -811,14 +899,13 @@
 
     ExplicitResultType = FTI.hasTrailingReturnType();
 
-    if (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
-        cast<ParmVarDecl>(FTI.ArgInfo[0].Param)->getType()->isVoidType()) {
+    if (FTI.NumParams == 1 && !FTI.isVariadic && FTI.Params[0].Ident == 0 &&
+        cast<ParmVarDecl>(FTI.Params[0].Param)->getType()->isVoidType()) {
       // Empty arg list, don't push any params.
-      checkVoidParamDecl(cast<ParmVarDecl>(FTI.ArgInfo[0].Param));
     } else {
-      Params.reserve(FTI.NumArgs);
-      for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i)
-        Params.push_back(cast<ParmVarDecl>(FTI.ArgInfo[i].Param));
+      Params.reserve(FTI.NumParams);
+      for (unsigned i = 0, e = FTI.NumParams; i != e; ++i)
+        Params.push_back(cast<ParmVarDecl>(FTI.Params[i].Param));
     }
 
     // Check for unexpanded parameter packs in the method type.
@@ -848,6 +935,23 @@
                        ExplicitResultType,
                        !Method->isConst());
 
+  // C++11 [expr.prim.lambda]p9:
+  //   A lambda-expression whose smallest enclosing scope is a block scope is a
+  //   local lambda expression; any other lambda expression shall not have a
+  //   capture-default or simple-capture in its lambda-introducer.
+  //
+  // For simple-captures, this is covered by the check below that any named
+  // entity is a variable that can be captured.
+  //
+  // For DR1632, we also allow a capture-default in any context where we can
+  // odr-use 'this' (in particular, in a default initializer for a non-static
+  // data member).
+  if (Intro.Default != LCD_None && !Class->getParent()->isFunctionOrMethod() &&
+      (getCurrentThisType().isNull() ||
+       CheckCXXThisCapture(SourceLocation(), /*Explicit*/true,
+                           /*BuildAndDiagnose*/false)))
+    Diag(Intro.DefaultLoc, diag::err_capture_default_non_local);
+
   // Distinct capture names, for diagnostics.
   llvm::SmallSet<IdentifierInfo*, 8> CaptureNames;
 
@@ -958,6 +1062,8 @@
       }
 
       Var = R.getAsSingle<VarDecl>();
+      if (Var && DiagnoseUseOfDecl(Var, C->Loc))
+        continue;
     }
 
     // C++11 [expr.prim.lambda]p8:
@@ -1045,12 +1151,9 @@
   LambdaScopeInfo *LSI = getCurLambda();
   CXXRecordDecl *Class = LSI->Lambda;
   Class->setInvalidDecl();
-  SmallVector<Decl*, 4> Fields;
-  for (RecordDecl::field_iterator i = Class->field_begin(),
-                                  e = Class->field_end(); i != e; ++i)
-    Fields.push_back(*i);
-  ActOnFields(0, Class->getLocation(), Class, Fields, 
-              SourceLocation(), SourceLocation(), 0);
+  SmallVector<Decl*, 4> Fields(Class->fields());
+  ActOnFields(0, Class->getLocation(), Class, Fields, SourceLocation(),
+              SourceLocation(), 0);
   CheckCompletedCXXClass(Class);
 
   PopFunctionScopeInfo();
@@ -1077,8 +1180,9 @@
     InvokerExtInfo.TypeQuals = 0;
     assert(InvokerExtInfo.RefQualifier == RQ_None && 
         "Lambda's call operator should not have a reference qualifier");
-    InvokerFunctionTy = S.Context.getFunctionType(CallOpProto->getResultType(),
-        CallOpProto->getArgTypes(), InvokerExtInfo);
+    InvokerFunctionTy =
+        S.Context.getFunctionType(CallOpProto->getReturnType(),
+                                  CallOpProto->getParamTypes(), InvokerExtInfo);
     PtrToFunctionTy = S.Context.getPointerType(InvokerFunctionTy);
   }
 
@@ -1122,7 +1226,7 @@
       ConvTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>();
   // Get the result of the conversion function which is a pointer-to-function.
   PointerTypeLoc PtrToFunctionTL = 
-      ConvTL.getResultLoc().getAs<PointerTypeLoc>();
+      ConvTL.getReturnLoc().getAs<PointerTypeLoc>();
   // Do the same for the TypeSourceInfo that is used to name the conversion
   // operator.
   PointerTypeLoc ConvNamePtrToFunctionTL = 
@@ -1157,8 +1261,8 @@
                                              From->getTypeSourceInfo(),
                                              From->getStorageClass(),
                                              /*DefaultArg=*/0));
-    CallOpConvTL.setArg(I, From);
-    CallOpConvNameTL.setArg(I, From);
+    CallOpConvTL.setParam(I, From);
+    CallOpConvNameTL.setParam(I, From);
   }
 
   CXXConversionDecl *Conversion 
@@ -1244,7 +1348,7 @@
     FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
     ExtInfo.TypeQuals = 0;
     QualType FunctionTy = S.Context.getFunctionType(
-        Proto->getResultType(), Proto->getArgTypes(), ExtInfo);
+        Proto->getReturnType(), Proto->getParamTypes(), ExtInfo);
     BlockPtrTy = S.Context.getBlockPointerType(FunctionTy);
   }
 
@@ -1369,7 +1473,7 @@
       const FunctionProtoType *Proto
         = CallOperator->getType()->getAs<FunctionProtoType>();
       QualType FunctionTy = Context.getFunctionType(
-          LSI->ReturnType, Proto->getArgTypes(), Proto->getExtProtoInfo());
+          LSI->ReturnType, Proto->getParamTypes(), Proto->getExtProtoInfo());
       CallOperator->setType(FunctionTy);
     }
     // C++ [expr.prim.lambda]p7:
@@ -1407,12 +1511,9 @@
       addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
     
     // Finalize the lambda class.
-    SmallVector<Decl*, 4> Fields;
-    for (RecordDecl::field_iterator i = Class->field_begin(),
-                                    e = Class->field_end(); i != e; ++i)
-      Fields.push_back(*i);
-    ActOnFields(0, Class->getLocation(), Class, Fields, 
-                SourceLocation(), SourceLocation(), 0);
+    SmallVector<Decl*, 4> Fields(Class->fields());
+    ActOnFields(0, Class->getLocation(), Class, Fields, SourceLocation(),
+                SourceLocation(), 0);
     CheckCompletedCXXClass(Class);
   }
 
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index 919c6ad..ac6fb25 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -113,10 +113,8 @@
         if (Ctx && Ctx->isFileContext()) {
           visit(Ctx, Ctx);
         } else if (!Ctx || Ctx->isFunctionOrMethod()) {
-          Scope::udir_iterator I = S->using_directives_begin(),
-                             End = S->using_directives_end();
-          for (; I != End; ++I)
-            visit(*I, InnermostFileDC);
+          for (auto *I : S->using_directives())
+            visit(I, InnermostFileDC);
         }
       }
     }
@@ -153,9 +151,7 @@
     void addUsingDirectives(DeclContext *DC, DeclContext *EffectiveDC) {
       SmallVector<DeclContext*,4> queue;
       while (true) {
-        DeclContext::udir_iterator I, End;
-        for (llvm::tie(I, End) = DC->getUsingDirectives(); I != End; ++I) {
-          UsingDirectiveDecl *UD = *I;
+        for (auto UD : DC->using_directives()) {
           DeclContext *NS = UD->getNominatedNamespace();
           if (visited.insert(NS)) {
             addUsingDirective(UD, EffectiveDC);
@@ -288,36 +284,33 @@
   IDNS = getIDNS(LookupKind, SemaRef.getLangOpts().CPlusPlus,
                  isForRedeclaration());
 
-  if (!isForRedeclaration()) {
-    // If we're looking for one of the allocation or deallocation
-    // operators, make sure that the implicitly-declared new and delete
-    // operators can be found.
-    switch (NameInfo.getName().getCXXOverloadedOperator()) {
-    case OO_New:
-    case OO_Delete:
-    case OO_Array_New:
-    case OO_Array_Delete:
-      SemaRef.DeclareGlobalNewDelete();
-      break;
+  // If we're looking for one of the allocation or deallocation
+  // operators, make sure that the implicitly-declared new and delete
+  // operators can be found.
+  switch (NameInfo.getName().getCXXOverloadedOperator()) {
+  case OO_New:
+  case OO_Delete:
+  case OO_Array_New:
+  case OO_Array_Delete:
+    SemaRef.DeclareGlobalNewDelete();
+    break;
 
-    default:
-      break;
-    }
+  default:
+    break;
+  }
 
-    // Compiler builtins are always visible, regardless of where they end
-    // up being declared.
-    if (IdentifierInfo *Id = NameInfo.getName().getAsIdentifierInfo()) {
-      if (unsigned BuiltinID = Id->getBuiltinID()) {
-        if (!SemaRef.Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
-          AllowHidden = true;
-      }
+  // Compiler builtins are always visible, regardless of where they end
+  // up being declared.
+  if (IdentifierInfo *Id = NameInfo.getName().getAsIdentifierInfo()) {
+    if (unsigned BuiltinID = Id->getBuiltinID()) {
+      if (!SemaRef.Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+        AllowHidden = true;
     }
   }
 }
 
-void LookupResult::sanityImpl() const {
-  // Note that this function is never called by NDEBUG builds. See
-  // LookupResult::sanity().
+bool LookupResult::sanity() const {
+  // This function is never called by NDEBUG builds.
   assert(ResultKind != NotFound || Decls.size() == 0);
   assert(ResultKind != Found || Decls.size() == 1);
   assert(ResultKind != FoundOverloaded || Decls.size() > 1 ||
@@ -330,6 +323,7 @@
   assert((Paths != NULL) == (ResultKind == Ambiguous &&
                              (Ambiguity == AmbiguousBaseSubobjectTypes ||
                               Ambiguity == AmbiguousBaseSubobjects)));
+  return true;
 }
 
 // Necessary because CXXBasePaths is not complete in Sema.h
@@ -544,14 +538,6 @@
           R.addDecl(D);
           return true;
         }
-
-        if (R.isForRedeclaration()) {
-          // If we're redeclaring this function anyway, forget that
-          // this was a builtin at all.
-          S.Context.BuiltinInfo.ForgetBuiltin(BuiltinID, S.Context.Idents);
-        }
-
-        return false;
       }
     }
   }
@@ -777,7 +763,7 @@
   // Perform direct name lookup into the namespaces nominated by the
   // using directives whose common ancestor is this namespace.
   UnqualUsingDirectiveSet::const_iterator UI, UEnd;
-  llvm::tie(UI, UEnd) = UDirs.getNamespacesFor(NS);
+  std::tie(UI, UEnd) = UDirs.getNamespacesFor(NS);
 
   for (; UI != UEnd; ++UI)
     if (LookupDirect(S, R, UI->getNominatedNamespace()))
@@ -989,7 +975,7 @@
     if (Ctx) {
       DeclContext *OuterCtx;
       bool SearchAfterTemplateScope;
-      llvm::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
+      std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
       if (SearchAfterTemplateScope)
         OutsideOfTemplateParamDC = OuterCtx;
 
@@ -1132,7 +1118,7 @@
     if (Ctx) {
       DeclContext *OuterCtx;
       bool SearchAfterTemplateScope;
-      llvm::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
+      std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
       if (SearchAfterTemplateScope)
         OutsideOfTemplateParamDC = OuterCtx;
 
@@ -1278,9 +1264,8 @@
 static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) {
   assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case");
 
-  for (Decl::redecl_iterator RD = D->redecls_begin(), RDEnd = D->redecls_end();
-       RD != RDEnd; ++RD) {
-    if (NamedDecl *ND = dyn_cast<NamedDecl>(*RD)) {
+  for (auto RD : D->redecls()) {
+    if (auto ND = dyn_cast<NamedDecl>(RD)) {
       if (LookupResult::isVisible(SemaRef, ND))
         return ND;
     }
@@ -1462,10 +1447,8 @@
                                                  DeclContext *StartDC) {
   assert(StartDC->isFileContext() && "start context is not a file context");
 
-  DeclContext::udir_iterator I = StartDC->using_directives_begin();
-  DeclContext::udir_iterator E = StartDC->using_directives_end();
-
-  if (I == E) return false;
+  DeclContext::udir_range UsingDirectives = StartDC->using_directives();
+  if (UsingDirectives.begin() == UsingDirectives.end()) return false;
 
   // We have at least added all these contexts to the queue.
   llvm::SmallPtrSet<DeclContext*, 8> Visited;
@@ -1477,8 +1460,8 @@
 
   // We have already looked into the initial namespace; seed the queue
   // with its using-children.
-  for (; I != E; ++I) {
-    NamespaceDecl *ND = (*I)->getNominatedNamespace()->getOriginalNamespace();
+  for (auto *I : UsingDirectives) {
+    NamespaceDecl *ND = I->getNominatedNamespace()->getOriginalNamespace();
     if (Visited.insert(ND))
       Queue.push_back(ND);
   }
@@ -1525,8 +1508,8 @@
       continue;
     }
 
-    for (llvm::tie(I,E) = ND->getUsingDirectives(); I != E; ++I) {
-      NamespaceDecl *Nom = (*I)->getNominatedNamespace();
+    for (auto I : ND->using_directives()) {
+      NamespaceDecl *Nom = I->getNominatedNamespace();
       if (Visited.insert(Nom))
         Queue.push_back(Nom);
     }
@@ -2039,6 +2022,10 @@
 
   // Add the class itself. If we've already seen this class, we don't
   // need to visit base classes.
+  //
+  // FIXME: That's not correct, we may have added this class only because it
+  // was the enclosing class of another class, and in that case we won't have
+  // added its base classes yet.
   if (!Result.Classes.insert(Class))
     return;
 
@@ -2065,12 +2052,8 @@
   }
 
   // Only recurse into base classes for complete types.
-  if (!Class->hasDefinition()) {
-    QualType type = Result.S.Context.getTypeDeclType(Class);
-    if (Result.S.RequireCompleteType(Result.InstantiationLoc, type,
-                                     /*no diagnostic*/ 0))
-      return;
-  }
+  if (!Class->hasDefinition())
+    return;
 
   // Add direct and indirect base classes along with their associated
   // namespaces.
@@ -2081,10 +2064,8 @@
     Class = Bases.pop_back_val();
 
     // Visit the base classes.
-    for (CXXRecordDecl::base_class_iterator Base = Class->bases_begin(),
-                                         BaseEnd = Class->bases_end();
-         Base != BaseEnd; ++Base) {
-      const RecordType *BaseType = Base->getType()->getAs<RecordType>();
+    for (const auto &Base : Class->bases()) {
+      const RecordType *BaseType = Base.getType()->getAs<RecordType>();
       // In dependent contexts, we do ADL twice, and the first time around,
       // the base type might be a dependent TemplateSpecializationType, or a
       // TemplateTypeParmType. If that happens, simply ignore it.
@@ -2164,6 +2145,8 @@
     //        classes. Its associated namespaces are the namespaces in
     //        which its associated classes are defined.
     case Type::Record: {
+      Result.S.RequireCompleteType(Result.InstantiationLoc, QualType(T, 0),
+                                   /*no diagnostic*/ 0);
       CXXRecordDecl *Class
         = cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
       addAssociatedClassesAndNamespaces(Result, Class);
@@ -2192,15 +2175,13 @@
     //        types and those associated with the return type.
     case Type::FunctionProto: {
       const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
-      for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
-                                             ArgEnd = Proto->arg_type_end();
-             Arg != ArgEnd; ++Arg)
-        Queue.push_back(Arg->getTypePtr());
+      for (const auto &Arg : Proto->param_types())
+        Queue.push_back(Arg.getTypePtr());
       // fallthrough
     }
     case Type::FunctionNoProto: {
       const FunctionType *FnType = cast<FunctionType>(T);
-      T = FnType->getResultType().getTypePtr();
+      T = FnType->getReturnType().getTypePtr();
       continue;
     }
 
@@ -2318,11 +2299,7 @@
     for (UnresolvedSetIterator I = ULE->decls_begin(), E = ULE->decls_end();
            I != E; ++I) {
       // Look through any using declarations to find the underlying function.
-      NamedDecl *Fn = (*I)->getUnderlyingDecl();
-
-      FunctionDecl *FDecl = dyn_cast<FunctionDecl>(Fn);
-      if (!FDecl)
-        FDecl = cast<FunctionTemplateDecl>(Fn)->getTemplatedDecl();
+      FunctionDecl *FDecl = (*I)->getUnderlyingDecl()->getAsFunction();
 
       // Add the classes and namespaces associated with the parameter
       // types and return type of this function.
@@ -2347,20 +2324,20 @@
     return true;
 
   const FunctionProtoType *Proto = Fn->getType()->getAs<FunctionProtoType>();
-  if (Proto->getNumArgs() < 1)
+  if (Proto->getNumParams() < 1)
     return false;
 
   if (T1->isEnumeralType()) {
-    QualType ArgType = Proto->getArgType(0).getNonReferenceType();
+    QualType ArgType = Proto->getParamType(0).getNonReferenceType();
     if (Context.hasSameUnqualifiedType(T1, ArgType))
       return true;
   }
 
-  if (Proto->getNumArgs() < 2)
+  if (Proto->getNumParams() < 2)
     return false;
 
   if (!T2.isNull() && T2->isEnumeralType()) {
-    QualType ArgType = Proto->getArgType(1).getNonReferenceType();
+    QualType ArgType = Proto->getParamType(1).getNonReferenceType();
     if (Context.hasSameUnqualifiedType(T2, ArgType))
       return true;
   }
@@ -2545,7 +2522,7 @@
   // Now we perform lookup on the name we computed earlier and do overload
   // resolution. Lookup is only performed directly into the class since there
   // will always be a (possibly implicit) declaration to shadow any others.
-  OverloadCandidateSet OCS((SourceLocation()));
+  OverloadCandidateSet OCS(RD->getLocation());
   DeclContext::lookup_result R = RD->lookup(Name);
   assert(!R.empty() &&
          "lookup for a constructor or assignment operator was empty");
@@ -2812,14 +2789,8 @@
   // operator template, but not both.
   if (FoundRaw && FoundTemplate) {
     Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
-    for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
-      Decl *D = *I;
-      if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
-        D = USD->getTargetDecl();
-      if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D))
-        D = FunTmpl->getTemplatedDecl();
-      NoteOverloadCandidate(cast<FunctionDecl>(D));
-    }
+    for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
+      NoteOverloadCandidate((*I)->getUnderlyingDecl()->getAsFunction());
     return LOLR_Error;
   }
 
@@ -2851,14 +2822,8 @@
   }
 
   // Otherwise, decide which is a more recent redeclaration.
-  FunctionDecl *OldFD, *NewFD;
-  if (isa<FunctionTemplateDecl>(New)) {
-    OldFD = cast<FunctionTemplateDecl>(Old)->getTemplatedDecl();
-    NewFD = cast<FunctionTemplateDecl>(New)->getTemplatedDecl();
-  } else {
-    OldFD = cast<FunctionDecl>(Old);
-    NewFD = cast<FunctionDecl>(New);
-  }
+  FunctionDecl *OldFD = Old->getAsFunction();
+  FunctionDecl *NewFD = New->getAsFunction();
 
   FunctionDecl *Cursor = NewFD;
   while (true) {
@@ -3063,8 +3028,8 @@
       // Functions and function templates in the same scope overload
       // rather than hide.  FIXME: Look for hiding based on function
       // signatures!
-      if ((*I)->isFunctionOrFunctionTemplate() &&
-          ND->isFunctionOrFunctionTemplate() &&
+      if ((*I)->getUnderlyingDecl()->isFunctionOrFunctionTemplate() &&
+          ND->getUnderlyingDecl()->isFunctionOrFunctionTemplate() &&
           SM == ShadowMaps.rbegin())
         continue;
 
@@ -3092,13 +3057,9 @@
     Result.getSema().ForceDeclarationOfImplicitMembers(Class);
 
   // Enumerate all of the results in this context.
-  for (DeclContext::all_lookups_iterator L = Ctx->lookups_begin(),
-                                      LEnd = Ctx->lookups_end();
-       L != LEnd; ++L) {
-    DeclContext::lookup_result R = *L;
-    for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
-         ++I) {
-      if (NamedDecl *ND = dyn_cast<NamedDecl>(*I)) {
+  for (const auto &R : Ctx->lookups()) {
+    for (auto *I : R) {
+      if (NamedDecl *ND = dyn_cast<NamedDecl>(I)) {
         if ((ND = Result.getAcceptableDecl(ND))) {
           Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
           Visited.add(ND);
@@ -3110,9 +3071,8 @@
   // Traverse using directives for qualified name lookup.
   if (QualifiedNameLookup) {
     ShadowContextRAII Shadow(Visited);
-    DeclContext::udir_iterator I, E;
-    for (llvm::tie(I, E) = Ctx->getUsingDirectives(); I != E; ++I) {
-      LookupVisibleDecls((*I)->getNominatedNamespace(), Result,
+    for (auto I : Ctx->using_directives()) {
+      LookupVisibleDecls(I->getNominatedNamespace(), Result,
                          QualifiedNameLookup, InBaseClass, Consumer, Visited);
     }
   }
@@ -3122,10 +3082,8 @@
     if (!Record->hasDefinition())
       return;
 
-    for (CXXRecordDecl::base_class_iterator B = Record->bases_begin(),
-                                         BEnd = Record->bases_end();
-         B != BEnd; ++B) {
-      QualType BaseType = B->getType();
+    for (const auto &B : Record->bases()) {
+      QualType BaseType = B.getType();
 
       // Don't look into dependent bases, because name lookup can't look
       // there anyway.
@@ -3165,21 +3123,16 @@
   // Traverse the contexts of Objective-C classes.
   if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Ctx)) {
     // Traverse categories.
-    for (ObjCInterfaceDecl::visible_categories_iterator
-           Cat = IFace->visible_categories_begin(),
-           CatEnd = IFace->visible_categories_end();
-         Cat != CatEnd; ++Cat) {
+    for (auto *Cat : IFace->visible_categories()) {
       ShadowContextRAII Shadow(Visited);
-      LookupVisibleDecls(*Cat, Result, QualifiedNameLookup, false,
+      LookupVisibleDecls(Cat, Result, QualifiedNameLookup, false,
                          Consumer, Visited);
     }
 
     // Traverse protocols.
-    for (ObjCInterfaceDecl::all_protocol_iterator
-         I = IFace->all_referenced_protocol_begin(),
-         E = IFace->all_referenced_protocol_end(); I != E; ++I) {
+    for (auto *I : IFace->all_referenced_protocols()) {
       ShadowContextRAII Shadow(Visited);
-      LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+      LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
                          Visited);
     }
 
@@ -3198,17 +3151,15 @@
                          QualifiedNameLookup, InBaseClass, Consumer, Visited);
     }
   } else if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Ctx)) {
-    for (ObjCProtocolDecl::protocol_iterator I = Protocol->protocol_begin(),
-           E = Protocol->protocol_end(); I != E; ++I) {
+    for (auto *I : Protocol->protocols()) {
       ShadowContextRAII Shadow(Visited);
-      LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+      LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
                          Visited);
     }
   } else if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Ctx)) {
-    for (ObjCCategoryDecl::protocol_iterator I = Category->protocol_begin(),
-           E = Category->protocol_end(); I != E; ++I) {
+    for (auto *I : Category->protocols()) {
       ShadowContextRAII Shadow(Visited);
-      LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+      LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
                          Visited);
     }
 
@@ -3234,9 +3185,8 @@
       (S->getEntity())->isFunctionOrMethod()) {
     FindLocalExternScope FindLocals(Result);
     // Walk through the declarations in this Scope.
-    for (Scope::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
-         D != DEnd; ++D) {
-      if (NamedDecl *ND = dyn_cast<NamedDecl>(*D))
+    for (auto *D : S->decls()) {
+      if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
         if ((ND = Result.getAcceptableDecl(ND))) {
           Consumer.FoundDecl(ND, Visited.checkHidden(ND), 0, false);
           Visited.add(ND);
@@ -3297,7 +3247,7 @@
     // Lookup visible declarations in any namespaces found by using
     // directives.
     UnqualUsingDirectiveSet::const_iterator UI, UEnd;
-    llvm::tie(UI, UEnd) = UDirs.getNamespacesFor(Entity);
+    std::tie(UI, UEnd) = UDirs.getNamespacesFor(Entity);
     for (; UI != UEnd; ++UI)
       LookupVisibleDecls(const_cast<DeclContext *>(UI->getNominatedNamespace()),
                          Result, /*QualifiedNameLookup=*/false,
@@ -3411,10 +3361,10 @@
     : Typo(Typo->getName()),
       SemaRef(SemaRef) {}
 
-  bool includeHiddenDecls() const { return true; }
+  bool includeHiddenDecls() const override { return true; }
 
-  virtual void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
-                         bool InBaseClass);
+  void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
+                 bool InBaseClass) override;
   void FoundName(StringRef Name);
   void addKeywordResult(StringRef Keyword);
   void addName(StringRef Name, NamedDecl *ND, NestedNameSpecifier *NNS = NULL,
@@ -3528,7 +3478,7 @@
     CList.push_back(Correction);
 
   while (CorrectionResults.size() > MaxTypoDistanceResultSets)
-    erase(llvm::prior(CorrectionResults.end()));
+    erase(std::prev(CorrectionResults.end()));
 }
 
 // Fill the supplied vector with the IdentifierInfo pointers for each piece of
@@ -4064,7 +4014,7 @@
   // In Microsoft mode, don't perform typo correction in a template member
   // function dependent context because it interferes with the "lookup into
   // dependent bases of class templates" feature.
-  if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() &&
+  if (getLangOpts().MSVCCompat && CurContext->isDependentContext() &&
       isa<CXXMethodDecl>(CurContext))
     return TypoCorrection();
 
@@ -4116,10 +4066,8 @@
 
     // Look in qualified interfaces.
     if (OPT) {
-      for (ObjCObjectPointerType::qual_iterator
-             I = OPT->qual_begin(), E = OPT->qual_end();
-           I != E; ++I)
-        LookupVisibleDecls(*I, LookupKind, Consumer);
+      for (auto *I : OPT->quals())
+        LookupVisibleDecls(I, LookupKind, Consumer);
     }
   } else if (SS && SS->isSet()) {
     QualifiedDC = computeDeclContext(*SS, EnteringContext);
@@ -4191,7 +4139,7 @@
     // FIXME: Re-add the ability to skip very unlikely potential corrections.
     if (IdentifierInfoLookup *External
                             = Context.Idents.getExternalIdentifierLookup()) {
-      OwningPtr<IdentifierIterator> Iter(External->getIdentifiers());
+      std::unique_ptr<IdentifierIterator> Iter(External->getIdentifiers());
       do {
         StringRef Name = Iter->Next();
         if (Name.empty())
@@ -4228,19 +4176,21 @@
         KnownNamespaces[ExternalKnownNamespaces[I]] = true;
     }
 
-    for (llvm::MapVector<NamespaceDecl*, bool>::iterator
-           KNI = KnownNamespaces.begin(),
-           KNIEnd = KnownNamespaces.end();
-         KNI != KNIEnd; ++KNI)
-      Namespaces.AddNameSpecifier(KNI->first);
+    for (auto KNPair : KnownNamespaces)
+      Namespaces.AddNameSpecifier(KNPair.first);
 
-    for (ASTContext::type_iterator TI = Context.types_begin(),
-                                   TIEnd = Context.types_end();
-         TI != TIEnd; ++TI) {
-      if (CXXRecordDecl *CD = (*TI)->getAsCXXRecordDecl()) {
+    bool SSIsTemplate = false;
+    if (NestedNameSpecifier *NNS =
+            (SS && SS->isValid()) ? SS->getScopeRep() : 0) {
+      if (const Type *T = NNS->getAsType())
+        SSIsTemplate = T->getTypeClass() == Type::TemplateSpecialization;
+    }
+    for (const auto *TI : Context.types()) {
+      if (CXXRecordDecl *CD = TI->getAsCXXRecordDecl()) {
         CD = CD->getCanonicalDecl();
         if (!CD->isDependentType() && !CD->isAnonymousStructOrUnion() &&
-            !CD->isUnion() &&
+            !CD->isUnion() && CD->getIdentifier() &&
+            (SSIsTemplate || !isa<ClassTemplateSpecializationDecl>(CD)) &&
             (CD->isBeingDefined() || CD->isCompleteDefinition()))
           Namespaces.AddNameSpecifier(CD);
       }
@@ -4258,13 +4208,17 @@
                                               IEnd = DI->second.end();
          I != IEnd; /* Increment in loop. */) {
       // If we only want nested name specifier corrections, ignore potential
-      // corrections that have a different base identifier from the typo.
-      if (AllowOnlyNNSChanges &&
-          I->second.front().getCorrectionAsIdentifierInfo() != Typo) {
-        TypoCorrectionConsumer::result_iterator Prev = I;
-        ++I;
-        DI->second.erase(Prev);
-        continue;
+      // corrections that have a different base identifier from the typo or
+      // which have a normalized edit distance longer than the typo itself.
+      if (AllowOnlyNNSChanges) {
+        TypoCorrection &TC = I->second.front();
+        if (TC.getCorrectionAsIdentifierInfo() != Typo ||
+            TC.getEditDistance(true) > TypoLen) {
+          TypoCorrectionConsumer::result_iterator Prev = I;
+          ++I;
+          DI->second.erase(Prev);
+          continue;
+        }
       }
 
       // If the item already has been looked up or is a keyword, keep it.
@@ -4337,10 +4291,8 @@
       case LookupResult::FoundOverloaded: {
         TypoCorrectionConsumer::result_iterator Prev = I;
         // Store all of the Decls for overloaded symbols
-        for (LookupResult::iterator TRD = TmpRes.begin(),
-                                 TRDEnd = TmpRes.end();
-             TRD != TRDEnd; ++TRD)
-          Candidate.addCorrectionDecl(*TRD);
+        for (auto *TRD : TmpRes)
+          Candidate.addCorrectionDecl(TRD);
         ++I;
         if (!isCandidateViable(CCC, Candidate)) {
           QualifiedResults.push_back(Candidate);
@@ -4372,15 +4324,10 @@
     // Only perform the qualified lookups for C++
     if (SearchNamespaces) {
       TmpRes.suppressDiagnostics();
-      for (SmallVector<TypoCorrection,
-                       16>::iterator QRI = QualifiedResults.begin(),
-                                  QRIEnd = QualifiedResults.end();
-           QRI != QRIEnd; ++QRI) {
-        for (NamespaceSpecifierSet::iterator NI = Namespaces.begin(),
-                                          NIEnd = Namespaces.end();
-             NI != NIEnd; ++NI) {
-          DeclContext *Ctx = NI->DeclCtx;
-          const Type *NSType = NI->NameSpecifier->getAsType();
+      for (auto QR : QualifiedResults) {
+        for (auto NSI : Namespaces) {
+          DeclContext *Ctx = NSI.DeclCtx;
+          const Type *NSType = NSI.NameSpecifier->getAsType();
 
           // If the current NestedNameSpecifier refers to a class and the
           // current correction candidate is the name of that class, then skip
@@ -4388,26 +4335,26 @@
           // is an appropriate correction.
           if (CXXRecordDecl *NSDecl =
                   NSType ? NSType->getAsCXXRecordDecl() : 0) {
-            if (NSDecl->getIdentifier() == QRI->getCorrectionAsIdentifierInfo())
+            if (NSDecl->getIdentifier() == QR.getCorrectionAsIdentifierInfo())
               continue;
           }
 
-          TypoCorrection TC(*QRI);
+          TypoCorrection TC(QR);
           TC.ClearCorrectionDecls();
-          TC.setCorrectionSpecifier(NI->NameSpecifier);
-          TC.setQualifierDistance(NI->EditDistance);
+          TC.setCorrectionSpecifier(NSI.NameSpecifier);
+          TC.setQualifierDistance(NSI.EditDistance);
           TC.setCallbackDistance(0); // Reset the callback distance
 
           // If the current correction candidate and namespace combination are
           // too far away from the original typo based on the normalized edit
           // distance, then skip performing a qualified name lookup.
           unsigned TmpED = TC.getEditDistance(true);
-          if (QRI->getCorrectionAsIdentifierInfo() != Typo &&
+          if (QR.getCorrectionAsIdentifierInfo() != Typo &&
               TmpED && TypoLen / TmpED < 3)
             continue;
 
           TmpRes.clear();
-          TmpRes.setLookupName(QRI->getCorrectionAsIdentifierInfo());
+          TmpRes.setLookupName(QR.getCorrectionAsIdentifierInfo());
           if (!LookupQualifiedName(TmpRes, Ctx)) continue;
 
           // Any corrections added below will be validated in subsequent
@@ -4563,8 +4510,11 @@
 }
 
 FunctionCallFilterCCC::FunctionCallFilterCCC(Sema &SemaRef, unsigned NumArgs,
-                                             bool HasExplicitTemplateArgs)
-    : NumArgs(NumArgs), HasExplicitTemplateArgs(HasExplicitTemplateArgs) {
+                                             bool HasExplicitTemplateArgs,
+                                             bool AllowNonStaticMethods)
+    : NumArgs(NumArgs), HasExplicitTemplateArgs(HasExplicitTemplateArgs),
+      AllowNonStaticMethods(AllowNonStaticMethods),
+      CurContext(SemaRef.CurContext) {
   WantTypeSpecifiers = SemaRef.getLangOpts().CPlusPlus;
   WantRemainingKeywords = false;
 }
@@ -4589,13 +4539,32 @@
         if (ValType->isAnyPointerType() || ValType->isReferenceType())
           ValType = ValType->getPointeeType();
         if (const FunctionProtoType *FPT = ValType->getAs<FunctionProtoType>())
-          if (FPT->getNumArgs() == NumArgs)
+          if (FPT->getNumParams() == NumArgs)
             return true;
       }
     }
-    if (FD && FD->getNumParams() >= NumArgs &&
-        FD->getMinRequiredArguments() <= NumArgs)
-      return true;
+
+    // Skip the current candidate if it is not a FunctionDecl or does not accept
+    // the current number of arguments.
+    if (!FD || !(FD->getNumParams() >= NumArgs &&
+                 FD->getMinRequiredArguments() <= NumArgs))
+      continue;
+
+    // If the current candidate is a non-static C++ method and non-static
+    // methods are being excluded, then skip the candidate unless the current
+    // DeclContext is a method in the same class or a descendent class of the
+    // candidate's parent class.
+    if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+      if (!AllowNonStaticMethods && !MD->isStatic()) {
+        CXXMethodDecl *CurMD = dyn_cast_or_null<CXXMethodDecl>(CurContext);
+        CXXRecordDecl *CurRD =
+            CurMD ? CurMD->getParent()->getCanonicalDecl() : 0;
+        CXXRecordDecl *RD = MD->getParent()->getCanonicalDecl();
+        if (!CurRD || (CurRD != RD && !CurRD->isDerivedFrom(RD)))
+          continue;
+      }
+    }
+    return true;
   }
   return false;
 }
diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp
index d9d9cec..3f37efa 100644
--- a/lib/Sema/SemaObjCProperty.cpp
+++ b/lib/Sema/SemaObjCProperty.cpp
@@ -132,11 +132,8 @@
   }
 
   // Check this property against any protocols we inherit.
-  for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
-                                        PEnd = Proto->protocol_end();
-       P != PEnd; ++P) {
-    CheckPropertyAgainstProtocol(S, Prop, *P, Known);
-  }
+  for (auto *P : Proto->protocols())
+    CheckPropertyAgainstProtocol(S, Prop, P, Known);
 }
 
 Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
@@ -204,7 +201,8 @@
   if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
     // For a class, compare the property against a property in our superclass.
     bool FoundInSuper = false;
-    if (ObjCInterfaceDecl *Super = IFace->getSuperClass()) {
+    ObjCInterfaceDecl *CurrentInterfaceDecl = IFace;
+    while (ObjCInterfaceDecl *Super = CurrentInterfaceDecl->getSuperClass()) {
       DeclContext::lookup_result R = Super->lookup(Res->getDeclName());
       for (unsigned I = 0, N = R.size(); I != N; ++I) {
         if (ObjCPropertyDecl *SuperProp = dyn_cast<ObjCPropertyDecl>(R[I])) {
@@ -213,37 +211,30 @@
           break;
         }
       }
+      if (FoundInSuper)
+        break;
+      else
+        CurrentInterfaceDecl = Super;
     }
 
     if (FoundInSuper) {
       // Also compare the property against a property in our protocols.
-      for (ObjCInterfaceDecl::protocol_iterator P = IFace->protocol_begin(),
-                                             PEnd = IFace->protocol_end();
-           P != PEnd; ++P) {
-        CheckPropertyAgainstProtocol(*this, Res, *P, KnownProtos);
+      for (auto *P : CurrentInterfaceDecl->protocols()) {
+        CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
       }
     } else {
       // Slower path: look in all protocols we referenced.
-      for (ObjCInterfaceDecl::all_protocol_iterator
-             P = IFace->all_referenced_protocol_begin(),
-             PEnd = IFace->all_referenced_protocol_end();
-           P != PEnd; ++P) {
-        CheckPropertyAgainstProtocol(*this, Res, *P, KnownProtos);
+      for (auto *P : IFace->all_referenced_protocols()) {
+        CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
       }
     }
   } else if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
-    for (ObjCCategoryDecl::protocol_iterator P = Cat->protocol_begin(),
-                                          PEnd = Cat->protocol_end();
-         P != PEnd; ++P) {
-      CheckPropertyAgainstProtocol(*this, Res, *P, KnownProtos);
-    }
+    for (auto *P : Cat->protocols())
+      CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
   } else {
     ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(ClassDecl);
-    for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
-                                          PEnd = Proto->protocol_end();
-         P != PEnd; ++P) {
-      CheckPropertyAgainstProtocol(*this, Res, *P, KnownProtos);
-    }
+    for (auto *P : Proto->protocols())
+      CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
   }
 
   ActOnDocumentableDecl(Res);
@@ -321,21 +312,6 @@
                  ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
 }
 
-static const char *NameOfOwnershipAttribute(unsigned attr) {
-  if (attr & ObjCPropertyDecl::OBJC_PR_assign)
-    return "assign";
-  if (attr & ObjCPropertyDecl::OBJC_PR_retain )
-    return "retain";
-  if (attr & ObjCPropertyDecl::OBJC_PR_copy)
-    return "copy";
-  if (attr & ObjCPropertyDecl::OBJC_PR_weak)
-    return "weak";
-  if (attr & ObjCPropertyDecl::OBJC_PR_strong)
-    return "strong";
-  assert(attr & ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
-  return "unsafe_unretained";
-}
-
 ObjCPropertyDecl *
 Sema::HandlePropertyInClassExtension(Scope *S,
                                      SourceLocation AtLoc,
@@ -358,12 +334,9 @@
   if (CCPrimary) {
     // Check for duplicate declaration of this property in current and
     // other class extensions.
-    for (ObjCInterfaceDecl::known_extensions_iterator
-           Ext = CCPrimary->known_extensions_begin(),
-           ExtEnd = CCPrimary->known_extensions_end();
-         Ext != ExtEnd; ++Ext) {
+    for (const auto *Ext : CCPrimary->known_extensions()) {
       if (ObjCPropertyDecl *prevDecl
-            = ObjCPropertyDecl::findPropertyDecl(*Ext, PropertyId)) {
+            = ObjCPropertyDecl::findPropertyDecl(Ext, PropertyId)) {
         Diag(AtLoc, diag::err_duplicate_property);
         Diag(prevDecl->getLocation(), diag::note_property_declare);
         return 0;
@@ -478,6 +451,18 @@
     DeclContext *DC = cast<DeclContext>(CCPrimary);
     if (!ObjCPropertyDecl::findPropertyDecl(DC,
                                  PIDecl->getDeclName().getAsIdentifierInfo())) {
+      // In mrr mode, 'readwrite' property must have an explicit
+      // memory attribute. If none specified, select the default (assign).
+      if (!getLangOpts().ObjCAutoRefCount) {
+        if (!(PIkind & (ObjCDeclSpec::DQ_PR_assign |
+                        ObjCDeclSpec::DQ_PR_retain |
+                        ObjCDeclSpec::DQ_PR_strong |
+                        ObjCDeclSpec::DQ_PR_copy |
+                        ObjCDeclSpec::DQ_PR_unsafe_unretained |
+                        ObjCDeclSpec::DQ_PR_weak)))
+          PIkind |= ObjCPropertyDecl::OBJC_PR_assign;
+      }
+      
       // Protocol is not in the primary class. Must build one for it.
       ObjCDeclSpec ProtocolPropertyODS;
       // FIXME. Assuming that ObjCDeclSpec::ObjCPropertyAttributeKind
@@ -763,18 +748,14 @@
                                         ObjCInterfaceDecl *ClassDecl,
                                         ObjCPropertyDecl *Property) {
   ObjCInterfaceDecl::ProtocolPropertyMap PropMap;
-  for (ObjCInterfaceDecl::all_protocol_iterator
-       PI = ClassDecl->all_referenced_protocol_begin(),
-       E = ClassDecl->all_referenced_protocol_end(); PI != E; ++PI) {
-    if (const ObjCProtocolDecl *PDecl = (*PI)->getDefinition())
+  for (const auto *PI : ClassDecl->all_referenced_protocols()) {
+    if (const ObjCProtocolDecl *PDecl = PI->getDefinition())
       PDecl->collectInheritedProtocolProperties(Property, PropMap);
   }
   if (ObjCInterfaceDecl *SDecl = ClassDecl->getSuperClass())
     while (SDecl) {
-      for (ObjCInterfaceDecl::all_protocol_iterator
-           PI = SDecl->all_referenced_protocol_begin(),
-           E = SDecl->all_referenced_protocol_end(); PI != E; ++PI) {
-        if (const ObjCProtocolDecl *PDecl = (*PI)->getDefinition())
+      for (const auto *PI : SDecl->all_referenced_protocols()) {
+        if (const ObjCProtocolDecl *PDecl = PI->getDefinition())
           PDecl->collectInheritedProtocolProperties(Property, PropMap);
       }
       SDecl = SDecl->getSuperClass();
@@ -875,9 +856,7 @@
       bool ReadWriteProperty = false;
       // Search into the class extensions and see if 'readonly property is
       // redeclared 'readwrite', then no warning is to be issued.
-      for (ObjCInterfaceDecl::known_extensions_iterator
-            Ext = IDecl->known_extensions_begin(),
-            ExtEnd = IDecl->known_extensions_end(); Ext != ExtEnd; ++Ext) {
+      for (auto *Ext : IDecl->known_extensions()) {
         DeclContext::lookup_result R = Ext->lookup(property->getDeclName());
         if (!R.empty())
           if (ObjCPropertyDecl *ExtProp = dyn_cast<ObjCPropertyDecl>(R[0])) {
@@ -891,7 +870,7 @@
       
       if (!ReadWriteProperty) {
         Diag(property->getLocation(), diag::warn_auto_readonly_iboutlet_property)
-            << property->getName();
+            << property;
         SourceLocation readonlyLoc;
         if (LocPropertyAttribute(Context, "readonly", 
                                  property->getLParenLoc(), readonlyLoc)) {
@@ -1163,19 +1142,20 @@
       ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl();
       DeclRefExpr *SelfExpr = 
         new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
-                                  VK_RValue, PropertyDiagLoc);
+                                  VK_LValue, PropertyDiagLoc);
       MarkDeclRefReferenced(SelfExpr);
+      Expr *LoadSelfExpr =
+        ImplicitCastExpr::Create(Context, SelfDecl->getType(),
+                                 CK_LValueToRValue, SelfExpr, 0, VK_RValue);
       Expr *IvarRefExpr =
         new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), PropertyDiagLoc,
                                       Ivar->getLocation(),
-                                      SelfExpr, true, true);
-      ExprResult Res = 
-        PerformCopyInitialization(InitializedEntity::InitializeResult(
-                                    PropertyDiagLoc,
-                                    getterMethod->getResultType(),
-                                    /*NRVO=*/false),
-                                  PropertyDiagLoc,
-                                  Owned(IvarRefExpr));
+                                      LoadSelfExpr, true, true);
+      ExprResult Res = PerformCopyInitialization(
+          InitializedEntity::InitializeResult(PropertyDiagLoc,
+                                              getterMethod->getReturnType(),
+                                              /*NRVO=*/false),
+          PropertyDiagLoc, Owned(IvarRefExpr));
       if (!Res.isInvalid()) {
         Expr *ResExpr = Res.takeAs<Expr>();
         if (ResExpr)
@@ -1211,12 +1191,15 @@
       ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
       DeclRefExpr *SelfExpr = 
         new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
-                                  VK_RValue, PropertyDiagLoc);
+                                  VK_LValue, PropertyDiagLoc);
       MarkDeclRefReferenced(SelfExpr);
+      Expr *LoadSelfExpr =
+        ImplicitCastExpr::Create(Context, SelfDecl->getType(),
+                                 CK_LValueToRValue, SelfExpr, 0, VK_RValue);
       Expr *lhs =
         new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), PropertyDiagLoc,
                                       Ivar->getLocation(),
-                                      SelfExpr, true, true);
+                                      LoadSelfExpr, true, true);
       ObjCMethodDecl::param_iterator P = setterMethod->param_begin();
       ParmVarDecl *Param = (*P);
       QualType T = Param->getType().getNonReferenceType();
@@ -1394,7 +1377,7 @@
                                             SourceLocation Loc) {
   if (!GetterMethod)
     return false;
-  QualType GetterType = GetterMethod->getResultType().getNonReferenceType();
+  QualType GetterType = GetterMethod->getReturnType().getNonReferenceType();
   QualType PropertyIvarType = property->getType().getNonReferenceType();
   bool compat = Context.hasSameType(PropertyIvarType, GetterType);
   if (!compat) {
@@ -1433,37 +1416,32 @@
 
 /// CollectImmediateProperties - This routine collects all properties in
 /// the class and its conforming protocols; but not those in its super class.
-void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
-            ObjCContainerDecl::PropertyMap &PropMap,
-            ObjCContainerDecl::PropertyMap &SuperPropMap) {
+static void CollectImmediateProperties(ObjCContainerDecl *CDecl,
+                                       ObjCContainerDecl::PropertyMap &PropMap,
+                                       ObjCContainerDecl::PropertyMap &SuperPropMap,
+                                       bool IncludeProtocols = true) {
+
   if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
-    for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
-         E = IDecl->prop_end(); P != E; ++P) {
-      ObjCPropertyDecl *Prop = *P;
+    for (auto *Prop : IDecl->properties())
       PropMap[Prop->getIdentifier()] = Prop;
+    if (IncludeProtocols) {
+      // Scan through class's protocols.
+      for (auto *PI : IDecl->all_referenced_protocols())
+        CollectImmediateProperties(PI, PropMap, SuperPropMap);
     }
-    // scan through class's protocols.
-    for (ObjCInterfaceDecl::all_protocol_iterator
-         PI = IDecl->all_referenced_protocol_begin(),
-         E = IDecl->all_referenced_protocol_end(); PI != E; ++PI)
-        CollectImmediateProperties((*PI), PropMap, SuperPropMap);
   }
   if (ObjCCategoryDecl *CATDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) {
     if (!CATDecl->IsClassExtension())
-      for (ObjCContainerDecl::prop_iterator P = CATDecl->prop_begin(),
-           E = CATDecl->prop_end(); P != E; ++P) {
-        ObjCPropertyDecl *Prop = *P;
+      for (auto *Prop : CATDecl->properties())
         PropMap[Prop->getIdentifier()] = Prop;
-      }
-    // scan through class's protocols.
-    for (ObjCCategoryDecl::protocol_iterator PI = CATDecl->protocol_begin(),
-         E = CATDecl->protocol_end(); PI != E; ++PI)
-      CollectImmediateProperties((*PI), PropMap, SuperPropMap);
+    if (IncludeProtocols) {
+      // Scan through class's protocols.
+      for (auto *PI : CATDecl->protocols())
+        CollectImmediateProperties(PI, PropMap, SuperPropMap);
+    }
   }
   else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) {
-    for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
-         E = PDecl->prop_end(); P != E; ++P) {
-      ObjCPropertyDecl *Prop = *P;
+    for (auto *Prop : PDecl->properties()) {
       ObjCPropertyDecl *PropertyFromSuper = SuperPropMap[Prop->getIdentifier()];
       // Exclude property for protocols which conform to class's super-class, 
       // as super-class has to implement the property.
@@ -1475,9 +1453,8 @@
       }
     }
     // scan through protocol's protocols.
-    for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
-         E = PDecl->protocol_end(); PI != E; ++PI)
-      CollectImmediateProperties((*PI), PropMap, SuperPropMap);
+    for (auto *PI : PDecl->protocols())
+      CollectImmediateProperties(PI, PropMap, SuperPropMap);
   }
 }
 
@@ -1510,17 +1487,35 @@
   
   // look up a property declaration whose one of its accessors is implemented
   // by this method.
-  for (ObjCContainerDecl::prop_iterator P = IFace->prop_begin(),
-       E = IFace->prop_end(); P != E; ++P) {
-    ObjCPropertyDecl *property = *P;
-    if ((property->getGetterName() == IMD->getSelector() ||
-         property->getSetterName() == IMD->getSelector()) &&
-        (property->getPropertyIvarDecl() == IV))
+  for (const auto *Property : IFace->properties()) {
+    if ((Property->getGetterName() == IMD->getSelector() ||
+         Property->getSetterName() == IMD->getSelector()) &&
+        (Property->getPropertyIvarDecl() == IV))
       return true;
   }
   return false;
 }
 
+static bool SuperClassImplementsProperty(ObjCInterfaceDecl *IDecl,
+                                         ObjCPropertyDecl *Prop) {
+  bool SuperClassImplementsGetter = false;
+  bool SuperClassImplementsSetter = false;
+  if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly)
+    SuperClassImplementsSetter = true;
+
+  while (IDecl->getSuperClass()) {
+    ObjCInterfaceDecl *SDecl = IDecl->getSuperClass();
+    if (!SuperClassImplementsGetter && SDecl->getInstanceMethod(Prop->getGetterName()))
+      SuperClassImplementsGetter = true;
+
+    if (!SuperClassImplementsSetter && SDecl->getInstanceMethod(Prop->getSetterName()))
+      SuperClassImplementsSetter = true;
+    if (SuperClassImplementsGetter && SuperClassImplementsSetter)
+      return true;
+    IDecl = IDecl->getSuperClass();
+  }
+  return false;
+}
 
 /// \brief Default synthesizes all properties which must be synthesized
 /// in class's \@implementation.
@@ -1559,7 +1554,7 @@
           !IMPDecl->getInstanceMethod(Prop->getSetterName()) &&
           !IDecl->HasUserDeclaredSetterMethod(Prop)) {
             Diag(Prop->getLocation(), diag::warn_no_autosynthesis_property)
-              << Prop->getIdentifier()->getName();
+              << Prop->getIdentifier();
             Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
       }
       continue;
@@ -1568,17 +1563,23 @@
         IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier())) {
       if (PID->getPropertyDecl() != Prop) {
         Diag(Prop->getLocation(), diag::warn_no_autosynthesis_shared_ivar_property)
-        << Prop->getIdentifier()->getName();
+          << Prop->getIdentifier();
         if (!PID->getLocation().isInvalid())
           Diag(PID->getLocation(), diag::note_property_synthesize);
       }
       continue;
     }
-    if (isa<ObjCProtocolDecl>(Prop->getDeclContext())) {
+    if (ObjCProtocolDecl *Proto =
+          dyn_cast<ObjCProtocolDecl>(Prop->getDeclContext())) {
       // We won't auto-synthesize properties declared in protocols.
-      Diag(IMPDecl->getLocation(), 
-           diag::warn_auto_synthesizing_protocol_property);
-      Diag(Prop->getLocation(), diag::note_property_declare);
+      // Suppress the warning if class's superclass implements property's
+      // getter and implements property's setter (if readwrite property).
+      if (!SuperClassImplementsProperty(IDecl, Prop)) {
+        Diag(IMPDecl->getLocation(),
+             diag::warn_auto_synthesizing_protocol_property)
+          << Prop << Proto;
+        Diag(Prop->getLocation(), diag::note_property_declare);
+      }
       continue;
     }
 
@@ -1610,41 +1611,105 @@
       DefaultSynthesizeProperties(S, IC, IDecl);
 }
 
+static void DiagnoseUnimplementedAccessor(Sema &S,
+                                          ObjCInterfaceDecl *PrimaryClass,
+                                          Selector Method,
+                                          ObjCImplDecl* IMPDecl,
+                                          ObjCContainerDecl *CDecl,
+                                          ObjCCategoryDecl *C,
+                                          ObjCPropertyDecl *Prop,
+                                          Sema::SelectorSet &SMap) {
+  // When reporting on missing property setter/getter implementation in
+  // categories, do not report when they are declared in primary class,
+  // class's protocol, or one of it super classes. This is because,
+  // the class is going to implement them.
+  if (!SMap.count(Method) &&
+      (PrimaryClass == 0 ||
+       !PrimaryClass->lookupPropertyAccessor(Method, C))) {
+        S.Diag(IMPDecl->getLocation(),
+               isa<ObjCCategoryDecl>(CDecl) ?
+               diag::warn_setter_getter_impl_required_in_category :
+               diag::warn_setter_getter_impl_required)
+            << Prop->getDeclName() << Method;
+        S.Diag(Prop->getLocation(),
+             diag::note_property_declare);
+        if (S.LangOpts.ObjCDefaultSynthProperties &&
+            S.LangOpts.ObjCRuntime.isNonFragile())
+          if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
+            if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
+            S.Diag(RID->getLocation(), diag::note_suppressed_class_declare);
+      }
+}
+
 void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
-                                      ObjCContainerDecl *CDecl) {
-  ObjCContainerDecl::PropertyMap NoNeedToImplPropMap;
-  ObjCInterfaceDecl *IDecl;
-  // Gather properties which need not be implemented in this class
-  // or category.
-  if (!(IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)))
-    if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
-      // For categories, no need to implement properties declared in
-      // its primary class (and its super classes) if property is
-      // declared in one of those containers.
-      if ((IDecl = C->getClassInterface())) {
-        ObjCInterfaceDecl::PropertyDeclOrder PO;
-        IDecl->collectPropertiesToImplement(NoNeedToImplPropMap, PO);
+                                           ObjCContainerDecl *CDecl,
+                                           bool SynthesizeProperties) {
+  ObjCContainerDecl::PropertyMap PropMap;
+  ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl);
+
+  if (!SynthesizeProperties) {
+    ObjCContainerDecl::PropertyMap NoNeedToImplPropMap;
+    // Gather properties which need not be implemented in this class
+    // or category.
+    if (!IDecl)
+      if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+        // For categories, no need to implement properties declared in
+        // its primary class (and its super classes) if property is
+        // declared in one of those containers.
+        if ((IDecl = C->getClassInterface())) {
+          ObjCInterfaceDecl::PropertyDeclOrder PO;
+          IDecl->collectPropertiesToImplement(NoNeedToImplPropMap, PO);
+        }
+      }
+    if (IDecl)
+      CollectSuperClassPropertyImplementations(IDecl, NoNeedToImplPropMap);
+    
+    CollectImmediateProperties(CDecl, PropMap, NoNeedToImplPropMap);
+  }
+
+  // Scan the @interface to see if any of the protocols it adopts
+  // require an explicit implementation, via attribute
+  // 'objc_protocol_requires_explicit_implementation'.
+  if (IDecl) {
+    std::unique_ptr<ObjCContainerDecl::PropertyMap> LazyMap;
+
+    for (auto *PDecl : IDecl->all_referenced_protocols()) {
+      if (!PDecl->hasAttr<ObjCExplicitProtocolImplAttr>())
+        continue;
+      // Lazily construct a set of all the properties in the @interface
+      // of the class, without looking at the superclass.  We cannot
+      // use the call to CollectImmediateProperties() above as that
+      // utilizes information fromt he super class's properties as well
+      // as scans the adopted protocols.  This work only triggers for protocols
+      // with the attribute, which is very rare, and only occurs when
+      // analyzing the @implementation.
+      if (!LazyMap) {
+        ObjCContainerDecl::PropertyMap NoNeedToImplPropMap;
+        LazyMap.reset(new ObjCContainerDecl::PropertyMap());
+        CollectImmediateProperties(CDecl, *LazyMap, NoNeedToImplPropMap,
+                                   /* IncludeProtocols */ false);
+      }
+      // Add the properties of 'PDecl' to the list of properties that
+      // need to be implemented.
+      for (auto *PropDecl : PDecl->properties()) {
+        if ((*LazyMap)[PropDecl->getIdentifier()])
+          continue;
+        PropMap[PropDecl->getIdentifier()] = PropDecl;
       }
     }
-  if (IDecl)
-    CollectSuperClassPropertyImplementations(IDecl, NoNeedToImplPropMap);
-  
-  ObjCContainerDecl::PropertyMap PropMap;
-  CollectImmediateProperties(CDecl, PropMap, NoNeedToImplPropMap);
+  }
+
   if (PropMap.empty())
     return;
 
   llvm::DenseSet<ObjCPropertyDecl *> PropImplMap;
-  for (ObjCImplDecl::propimpl_iterator
-       I = IMPDecl->propimpl_begin(),
-       EI = IMPDecl->propimpl_end(); I != EI; ++I)
+  for (const auto *I : IMPDecl->property_impls())
     PropImplMap.insert(I->getPropertyDecl());
 
   SelectorSet InsMap;
   // Collect property accessors implemented in current implementation.
-  for (ObjCImplementationDecl::instmeth_iterator
-       I = IMPDecl->instmeth_begin(), E = IMPDecl->instmeth_end(); I!=E; ++I)
-    InsMap.insert((*I)->getSelector());
+  for (const auto *I : IMPDecl->instance_methods())
+    InsMap.insert(I->getSelector());
   
   ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
   ObjCInterfaceDecl *PrimaryClass = 0;
@@ -1655,9 +1720,8 @@
         // When reporting on missing setter/getters, do not report when
         // setter/getter is implemented in category's primary class
         // implementation.
-        for (ObjCImplementationDecl::instmeth_iterator
-             I = IMP->instmeth_begin(), E = IMP->instmeth_end(); I!=E; ++I)
-          InsMap.insert((*I)->getSelector());
+        for (const auto *I : IMP->instance_methods())
+          InsMap.insert(I->getSelector());
       }
 
   for (ObjCContainerDecl::PropertyMap::iterator
@@ -1669,45 +1733,14 @@
         PropImplMap.count(Prop) ||
         Prop->getAvailability() == AR_Unavailable)
       continue;
-    // When reporting on missing property getter implementation in
-    // categories, do not report when they are declared in primary class,
-    // class's protocol, or one of it super classes. This is because,
-    // the class is going to implement them.
-    if (!InsMap.count(Prop->getGetterName()) &&
-        (PrimaryClass == 0 ||
-         !PrimaryClass->lookupPropertyAccessor(Prop->getGetterName(), C))) {
-      Diag(IMPDecl->getLocation(),
-           isa<ObjCCategoryDecl>(CDecl) ?
-            diag::warn_setter_getter_impl_required_in_category :
-            diag::warn_setter_getter_impl_required)
-      << Prop->getDeclName() << Prop->getGetterName();
-      Diag(Prop->getLocation(),
-           diag::note_property_declare);
-      if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCRuntime.isNonFragile())
-        if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
-          if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
-            Diag(RID->getLocation(), diag::note_suppressed_class_declare);
-            
-    }
-    // When reporting on missing property setter implementation in
-    // categories, do not report when they are declared in primary class,
-    // class's protocol, or one of it super classes. This is because,
-    // the class is going to implement them.
-    if (!Prop->isReadOnly() && !InsMap.count(Prop->getSetterName()) &&
-        (PrimaryClass == 0 ||
-         !PrimaryClass->lookupPropertyAccessor(Prop->getSetterName(), C))) {
-      Diag(IMPDecl->getLocation(),
-           isa<ObjCCategoryDecl>(CDecl) ?
-           diag::warn_setter_getter_impl_required_in_category :
-           diag::warn_setter_getter_impl_required)
-      << Prop->getDeclName() << Prop->getSetterName();
-      Diag(Prop->getLocation(),
-           diag::note_property_declare);
-      if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCRuntime.isNonFragile())
-        if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
-          if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
-            Diag(RID->getLocation(), diag::note_suppressed_class_declare);
-    }
+
+    // Diagnose unimplemented getters and setters.
+    DiagnoseUnimplementedAccessor(*this,
+          PrimaryClass, Prop->getGetterName(), IMPDecl, CDecl, C, Prop, InsMap);
+    if (!Prop->isReadOnly())
+      DiagnoseUnimplementedAccessor(*this,
+                                    PrimaryClass, Prop->getSetterName(),
+                                    IMPDecl, CDecl, C, Prop, InsMap);
   }
 }
 
@@ -1717,10 +1750,7 @@
   // Rules apply in non-GC mode only
   if (getLangOpts().getGC() != LangOptions::NonGC)
     return;
-  for (ObjCContainerDecl::prop_iterator I = IDecl->prop_begin(),
-       E = IDecl->prop_end();
-       I != E; ++I) {
-    ObjCPropertyDecl *Property = *I;
+  for (const auto *Property : IDecl->properties()) {
     ObjCMethodDecl *GetterMethod = 0;
     ObjCMethodDecl *SetterMethod = 0;
     bool LookedUpGetterSetter = false;
@@ -1758,7 +1788,6 @@
       if (!LookedUpGetterSetter) {
         GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName());
         SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName());
-        LookedUpGetterSetter = true;
       }
       if ((GetterMethod && !SetterMethod) || (!GetterMethod && SetterMethod)) {
         SourceLocation MethodLoc =
@@ -1805,12 +1834,7 @@
   if (getLangOpts().getGC() == LangOptions::GCOnly)
     return;
 
-  for (ObjCImplementationDecl::propimpl_iterator
-         i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
-    ObjCPropertyImplDecl *PID = *i;
-    if (PID->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
-      continue;
-    
+  for (const auto *PID : D->property_impls()) {
     const ObjCPropertyDecl *PD = PID->getPropertyDecl();
     if (PD && !PD->hasAttr<NSReturnsNotRetainedAttr>() &&
         !D->getInstanceMethod(PD->getGetterName())) {
@@ -1821,27 +1845,51 @@
       if (family == OMF_alloc || family == OMF_copy ||
           family == OMF_mutableCopy || family == OMF_new) {
         if (getLangOpts().ObjCAutoRefCount)
-          Diag(PID->getLocation(), diag::err_ownin_getter_rule);
+          Diag(PD->getLocation(), diag::err_cocoa_naming_owned_rule);
         else
-          Diag(PID->getLocation(), diag::warn_owning_getter_rule);
-        Diag(PD->getLocation(), diag::note_property_declare);
+          Diag(PD->getLocation(), diag::warn_cocoa_naming_owned_rule);
       }
     }
   }
 }
 
+void Sema::DiagnoseMissingDesignatedInitOverrides(
+                                            const ObjCImplementationDecl *ImplD,
+                                            const ObjCInterfaceDecl *IFD) {
+  assert(IFD->hasDesignatedInitializers());
+  const ObjCInterfaceDecl *SuperD = IFD->getSuperClass();
+  if (!SuperD)
+    return;
+
+  SelectorSet InitSelSet;
+  for (const auto *I : ImplD->instance_methods())
+    if (I->getMethodFamily() == OMF_init)
+      InitSelSet.insert(I->getSelector());
+
+  SmallVector<const ObjCMethodDecl *, 8> DesignatedInits;
+  SuperD->getDesignatedInitializers(DesignatedInits);
+  for (SmallVector<const ObjCMethodDecl *, 8>::iterator
+         I = DesignatedInits.begin(), E = DesignatedInits.end(); I != E; ++I) {
+    const ObjCMethodDecl *MD = *I;
+    if (!InitSelSet.count(MD->getSelector())) {
+      Diag(ImplD->getLocation(),
+           diag::warn_objc_implementation_missing_designated_init_override)
+        << MD->getSelector();
+      Diag(MD->getLocation(), diag::note_objc_designated_init_marked_here);
+    }
+  }
+}
+
 /// AddPropertyAttrs - Propagates attributes from a property to the
 /// implicitly-declared getter or setter for that property.
 static void AddPropertyAttrs(Sema &S, ObjCMethodDecl *PropertyMethod,
                              ObjCPropertyDecl *Property) {
   // Should we just clone all attributes over?
-  for (Decl::attr_iterator A = Property->attr_begin(), 
-                        AEnd = Property->attr_end(); 
-       A != AEnd; ++A) {
-    if (isa<DeprecatedAttr>(*A) || 
-        isa<UnavailableAttr>(*A) || 
-        isa<AvailabilityAttr>(*A))
-      PropertyMethod->addAttr((*A)->clone(S.Context));
+  for (const auto *A : Property->attrs()) {
+    if (isa<DeprecatedAttr>(A) || 
+        isa<UnavailableAttr>(A) || 
+        isa<AvailabilityAttr>(A))
+      PropertyMethod->addAttr(A->clone(S.Context));
   }
 }
 
@@ -1866,8 +1914,8 @@
     ObjCPropertyDecl::PropertyAttributeKind CAttr =
       property->getPropertyAttributes();
     if ((!(CAttr & ObjCPropertyDecl::OBJC_PR_readonly)) &&
-        Context.getCanonicalType(SetterMethod->getResultType()) !=
-          Context.VoidTy)
+        Context.getCanonicalType(SetterMethod->getReturnType()) !=
+            Context.VoidTy)
       Diag(SetterMethod->getLocation(), diag::err_setter_type_void);
     if (SetterMethod->param_size() != 1 ||
         !Context.hasSameUnqualifiedType(
@@ -1913,12 +1961,16 @@
     if (lexicalDC)
       GetterMethod->setLexicalDeclContext(lexicalDC);
     if (property->hasAttr<NSReturnsNotRetainedAttr>())
-      GetterMethod->addAttr(
-        ::new (Context) NSReturnsNotRetainedAttr(Loc, Context));
+      GetterMethod->addAttr(NSReturnsNotRetainedAttr::CreateImplicit(Context,
+                                                                     Loc));
     
     if (property->hasAttr<ObjCReturnsInnerPointerAttr>())
       GetterMethod->addAttr(
-        ::new (Context) ObjCReturnsInnerPointerAttr(Loc, Context));
+        ObjCReturnsInnerPointerAttr::CreateImplicit(Context, Loc));
+    
+    if (const SectionAttr *SA = property->getAttr<SectionAttr>())
+      GetterMethod->addAttr(SectionAttr::CreateImplicit(Context, SA->getName(),
+                                                        Loc));
 
     if (getLangOpts().ObjCAutoRefCount)
       CheckARCMethodDecl(GetterMethod);
@@ -1969,7 +2021,9 @@
       // and the real context should be the same.
       if (lexicalDC)
         SetterMethod->setLexicalDeclContext(lexicalDC);
-
+      if (const SectionAttr *SA = property->getAttr<SectionAttr>())
+        SetterMethod->addAttr(SectionAttr::CreateImplicit(Context,
+                                                          SA->getName(), Loc));
       // It's possible for the user to have set a very odd custom
       // setter selector that causes it to have a method family.
       if (getLangOpts().ObjCAutoRefCount)
@@ -2027,27 +2081,19 @@
   QualType PropertyTy = PropertyDecl->getType();
   unsigned PropertyOwnership = getOwnershipRule(Attributes);
 
-  if (Attributes & ObjCDeclSpec::DQ_PR_readonly) {
-    if (getLangOpts().ObjCAutoRefCount &&
-        PropertyTy->isObjCRetainableType() &&
-        !PropertyOwnership) {
-      // 'readonly' property with no obvious lifetime.
-      // its life time will be determined by its backing ivar.
-      return;
-    }
-    else if (PropertyOwnership) {
-      if (!getSourceManager().isInSystemHeader(Loc))
-        Diag(Loc, diag::warn_objc_property_attr_mutually_exclusive)
-          << "readonly" << NameOfOwnershipAttribute(Attributes);
-      return;
-    }
-  }
+  // 'readonly' property with no obvious lifetime.
+  // its life time will be determined by its backing ivar.
+  if (getLangOpts().ObjCAutoRefCount &&
+      Attributes & ObjCDeclSpec::DQ_PR_readonly &&
+      PropertyTy->isObjCRetainableType() &&
+      !PropertyOwnership)
+    return;
 
   // Check for copy or retain on non-object types.
   if ((Attributes & (ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
                     ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong)) &&
       !PropertyTy->isObjCRetainableType() &&
-      !PropertyDecl->getAttr<ObjCNSObjectAttr>()) {
+      !PropertyDecl->hasAttr<ObjCNSObjectAttr>()) {
     Diag(Loc, diag::err_objc_property_requires_object)
       << (Attributes & ObjCDeclSpec::DQ_PR_weak ? "weak" :
           Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain (or strong)");
@@ -2079,7 +2125,7 @@
         << "assign" << "weak";
       Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
     }
-    if (PropertyDecl->getAttr<IBOutletCollectionAttr>())
+    if (PropertyDecl->hasAttr<IBOutletCollectionAttr>())
       Diag(Loc, diag::warn_iboutletcollection_property_assign);
   } else if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) {
     if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index c63caf4..78f73c0 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -21,10 +21,10 @@
 #include "clang/AST/StmtVisitor.h"
 #include "clang/Lex/Preprocessor.h"
 #include "clang/Sema/Initialization.h"
-#include "clang/Sema/SemaInternal.h"
 #include "clang/Sema/Lookup.h"
 #include "clang/Sema/Scope.h"
 #include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
 using namespace clang;
 
 //===----------------------------------------------------------------------===//
@@ -82,6 +82,9 @@
   typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator;
 
   DSAVarData getDSA(StackTy::reverse_iterator Iter, VarDecl *D);
+
+  /// \brief Checks if the variable is a local for OpenMP region.
+  bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter);
 public:
   explicit DSAStackTy(Sema &S) : Stack(1), Actions(S) { }
 
@@ -98,9 +101,6 @@
   /// \brief Adds explicit data sharing attribute to the specified declaration.
   void addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A);
 
-  /// \brief Checks if the variable is a local for OpenMP region.
-  bool isOpenMPLocal(VarDecl *D);
-
   /// \brief Returns data sharing attributes from top of the stack for the
   /// specified declaration.
   DSAVarData getTopDSA(VarDecl *D);
@@ -111,7 +111,6 @@
   DSAVarData hasDSA(VarDecl *D, OpenMPClauseKind CKind,
                     OpenMPDirectiveKind DKind = OMPD_unknown);
 
-
   /// \brief Returns currently analyzed directive.
   OpenMPDirectiveKind getCurrentDirective() const {
     return Stack.back().Directive;
@@ -126,6 +125,13 @@
     return Stack.back().DefaultAttr;
   }
 
+  /// \brief Checks if the spewcified variable is threadprivate.
+  bool isThreadPrivate(VarDecl *D) {
+    DSAVarData DVar = getTopDSA(D);
+    return (DVar.CKind == OMPC_threadprivate || DVar.CKind == OMPC_copyin);
+  }
+
+  Scope *getCurScope() const { return Stack.back().CurScope; }
   Scope *getCurScope() { return Stack.back().CurScope; }
 };
 } // end anonymous namespace.
@@ -152,7 +158,21 @@
 
     return DVar;
   }
+
   DVar.DKind = Iter->Directive;
+  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+  // in a Construct, C/C++, predetermined, p.1]
+  // Variables with automatic storage duration that are declared in a scope
+  // inside the construct are private.
+  if (DVar.DKind != OMPD_parallel) {
+    if (isOpenMPLocal(D, Iter) && D->isLocalVarDecl() &&
+        (D->getStorageClass() == SC_Auto ||
+         D->getStorageClass() == SC_None)) {
+      DVar.CKind = OMPC_private;
+      return DVar;
+    }
+  }
+
   // Explicitly specified attributes and local variables with predetermined
   // attributes.
   if (Iter->SharingMap.count(D)) {
@@ -189,8 +209,8 @@
     // TODO
     if (DVar.DKind == OMPD_task) {
       DSAVarData DVarTemp;
-      for (StackTy::reverse_iterator I = Iter + 1,
-                                     EE = Stack.rend() - 1;
+      for (StackTy::reverse_iterator I = std::next(Iter),
+                                     EE = std::prev(Stack.rend());
            I != EE; ++I) {
         // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
         // in a Construct, implicitly determined, p.6]
@@ -217,7 +237,7 @@
   //  For constructs other than task, if no default clause is present, these
   //  variables inherit their data-sharing attributes from the enclosing
   //  context.
-  return getDSA(Iter + 1, D);
+  return getDSA(std::next(Iter), D);
 }
 
 void DSAStackTy::addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A) {
@@ -231,23 +251,24 @@
   }
 }
 
-bool DSAStackTy::isOpenMPLocal(VarDecl *D) {
-  Scope *CurScope = getCurScope();
-  while (CurScope && !CurScope->isDeclScope(D))
-    CurScope = CurScope->getParent();
-  while (CurScope && !CurScope->isOpenMPDirectiveScope())
-    CurScope = CurScope->getParent();
-  bool isOpenMPLocal = !!CurScope;
-  if (!isOpenMPLocal) {
-    CurScope = getCurScope();
-    while (CurScope && !CurScope->isOpenMPDirectiveScope())
+bool
+DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) {
+  if (Stack.size() > 2) {
+    reverse_iterator I = Iter, E = Stack.rend() - 1;
+    Scope *TopScope = 0;
+    while (I != E &&
+           I->Directive != OMPD_parallel) {
+      ++I;
+    }
+    if (I == E) return false;
+    TopScope = I->CurScope ? I->CurScope->getParent() : 0;
+    Scope *CurScope = getCurScope();
+    while (CurScope != TopScope && !CurScope->isDeclScope(D)) {
       CurScope = CurScope->getParent();
-    isOpenMPLocal =
-      CurScope &&
-      isa<CapturedDecl>(D->getDeclContext()) &&
-      CurScope->getFnParent()->getEntity()->Encloses(D->getDeclContext());
+    }
+    return CurScope != TopScope;
   }
-  return isOpenMPLocal;
+  return false;
 }
 
 DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D) {
@@ -270,11 +291,13 @@
   // in a Construct, C/C++, predetermined, p.1]
   // Variables with automatic storage duration that are declared in a scope
   // inside the construct are private.
-  if (isOpenMPLocal(D) && D->isLocalVarDecl() &&
-      (D->getStorageClass() == SC_Auto ||
-       D->getStorageClass() == SC_None)) {
-    DVar.CKind = OMPC_private;
-    return DVar;
+  OpenMPDirectiveKind Kind = getCurrentDirective();
+  if (Kind != OMPD_parallel) {
+    if (isOpenMPLocal(D, std::next(Stack.rbegin())) && D->isLocalVarDecl() &&
+        (D->getStorageClass() == SC_Auto ||
+         D->getStorageClass() == SC_None))
+      DVar.CKind = OMPC_private;
+      return DVar;
   }
 
   // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
@@ -319,7 +342,7 @@
   // in a Construct, C/C++, predetermined, p.7]
   //  Variables with static storage duration that are declared in a scope
   //  inside the construct are shared.
-  if (isOpenMPLocal(D) && D->isStaticLocal()) {
+  if (D->isStaticLocal()) {
     DVar.CKind = OMPC_shared;
     return DVar;
   }
@@ -335,13 +358,13 @@
 }
 
 DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(VarDecl *D) {
-  return getDSA(Stack.rbegin() + 1, D);
+  return getDSA(std::next(Stack.rbegin()), D);
 }
 
 DSAStackTy::DSAVarData DSAStackTy::hasDSA(VarDecl *D, OpenMPClauseKind CKind,
                                           OpenMPDirectiveKind DKind) {
-  for (StackTy::reverse_iterator I = Stack.rbegin() + 1,
-                                 E = Stack.rend() - 1;
+  for (StackTy::reverse_iterator I = std::next(Stack.rbegin()),
+                                 E = std::prev(Stack.rend());
        I != E; ++I) {
     if (DKind != OMPD_unknown && DKind != I->Directive) continue;
     DSAVarData DVar = getDSA(I, D);
@@ -381,7 +404,7 @@
   Sema &Actions;
 public:
   VarDeclFilterCCC(Sema &S) : Actions(S) { }
-  virtual bool ValidateCandidate(const TypoCorrection &Candidate) {
+  bool ValidateCandidate(const TypoCorrection &Candidate) override {
     NamedDecl *ND = Candidate.getCorrectionDecl();
     if (VarDecl *VD = dyn_cast_or_null<VarDecl>(ND)) {
       return VD->hasGlobalStorage() &&
@@ -509,8 +532,7 @@
   }
 
   QualType ExprType = VD->getType().getNonReferenceType();
-  ExprResult DE = BuildDeclRefExpr(VD, ExprType, VK_RValue, Id.getLoc());
-  DSAStack->addDSA(VD, cast<DeclRefExpr>(DE.get()), OMPC_threadprivate);
+  ExprResult DE = BuildDeclRefExpr(VD, ExprType, VK_LValue, Id.getLoc());
   return DE;
 }
 
@@ -566,11 +588,15 @@
     }
 
     Vars.push_back(*I);
+    DSAStack->addDSA(VD, DE, OMPC_threadprivate);
   }
-  return Vars.empty() ?
-              0 : OMPThreadPrivateDecl::Create(Context,
-                                               getCurLexicalContext(),
-                                               Loc, Vars);
+  OMPThreadPrivateDecl *D = 0;
+  if (!Vars.empty()) {
+    D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
+                                     Vars);
+    D->setAccess(AS_public);
+  }
+  return D;
 }
 
 namespace {
@@ -592,7 +618,7 @@
       DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD);
       if (DVar.CKind != OMPC_unknown) {
         if (DKind == OMPD_task && DVar.CKind != OMPC_shared &&
-            DVar.CKind != OMPC_threadprivate && !DVar.RefExpr)
+            !Stack->isThreadPrivate(VD) && !DVar.RefExpr)
           ImplicitFirstprivate.push_back(DVar.RefExpr);
         return;
       }
@@ -680,6 +706,10 @@
     Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt,
                                        StartLoc, EndLoc);
     break;
+  case OMPD_simd:
+    Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt,
+                                   StartLoc, EndLoc);
+    break;
   case OMPD_threadprivate:
   case OMPD_task:
     llvm_unreachable("OpenMP Directive is not allowed");
@@ -702,6 +732,185 @@
                                             Clauses, AStmt));
 }
 
+StmtResult Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses,
+                                          Stmt *AStmt,
+                                          SourceLocation StartLoc,
+                                          SourceLocation EndLoc) {
+  Stmt *CStmt = AStmt;
+  while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(CStmt))
+    CStmt = CS->getCapturedStmt();
+  while (AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(CStmt))
+    CStmt = AS->getSubStmt();
+  ForStmt *For = dyn_cast<ForStmt>(CStmt);
+  if (!For) {
+    Diag(CStmt->getLocStart(), diag::err_omp_not_for)
+      << getOpenMPDirectiveName(OMPD_simd);
+    return StmtError();
+  }
+
+  // FIXME: Checking loop canonical form, collapsing etc.
+
+  getCurFunction()->setHasBranchProtectedScope();
+  return Owned(OMPSimdDirective::Create(Context, StartLoc, EndLoc,
+                                        Clauses, AStmt));
+}
+
+OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
+                                             Expr *Expr,
+                                             SourceLocation StartLoc,
+                                             SourceLocation LParenLoc,
+                                             SourceLocation EndLoc) {
+  OMPClause *Res = 0;
+  switch (Kind) {
+  case OMPC_if:
+    Res = ActOnOpenMPIfClause(Expr, StartLoc, LParenLoc, EndLoc);
+    break;
+  case OMPC_num_threads:
+    Res = ActOnOpenMPNumThreadsClause(Expr, StartLoc, LParenLoc, EndLoc);
+    break;
+  case OMPC_safelen:
+    Res = ActOnOpenMPSafelenClause(Expr, StartLoc, LParenLoc, EndLoc);
+    break;
+  case OMPC_default:
+  case OMPC_private:
+  case OMPC_firstprivate:
+  case OMPC_shared:
+  case OMPC_copyin:
+  case OMPC_threadprivate:
+  case OMPC_unknown:
+  case NUM_OPENMP_CLAUSES:
+    llvm_unreachable("Clause is not allowed.");
+  }
+  return Res;
+}
+
+OMPClause *Sema::ActOnOpenMPIfClause(Expr *Condition,
+                                     SourceLocation StartLoc,
+                                     SourceLocation LParenLoc,
+                                     SourceLocation EndLoc) {
+  Expr *ValExpr = Condition;
+  if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
+      !Condition->isInstantiationDependent() &&
+      !Condition->containsUnexpandedParameterPack()) {
+    ExprResult Val = ActOnBooleanCondition(DSAStack->getCurScope(),
+                                           Condition->getExprLoc(),
+                                           Condition);
+    if (Val.isInvalid())
+      return 0;
+
+    ValExpr = Val.take();
+  }
+
+  return new (Context) OMPIfClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+ExprResult Sema::PerformImplicitIntegerConversion(SourceLocation Loc,
+                                                  Expr *Op) {
+  if (!Op)
+    return ExprError();
+
+  class IntConvertDiagnoser : public ICEConvertDiagnoser {
+  public:
+    IntConvertDiagnoser()
+        : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false,
+                              false, true) {}
+    SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+                                         QualType T) override {
+      return S.Diag(Loc, diag::err_omp_not_integral) << T;
+    }
+    SemaDiagnosticBuilder diagnoseIncomplete(
+        Sema &S, SourceLocation Loc, QualType T) override {
+      return S.Diag(Loc, diag::err_omp_incomplete_type) << T;
+    }
+    SemaDiagnosticBuilder diagnoseExplicitConv(
+        Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
+      return S.Diag(Loc, diag::err_omp_explicit_conversion) << T << ConvTy;
+    }
+    SemaDiagnosticBuilder noteExplicitConv(
+        Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+      return S.Diag(Conv->getLocation(), diag::note_omp_conversion_here)
+               << ConvTy->isEnumeralType() << ConvTy;
+    }
+    SemaDiagnosticBuilder diagnoseAmbiguous(
+        Sema &S, SourceLocation Loc, QualType T) override {
+      return S.Diag(Loc, diag::err_omp_ambiguous_conversion) << T;
+    }
+    SemaDiagnosticBuilder noteAmbiguous(
+        Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+      return S.Diag(Conv->getLocation(), diag::note_omp_conversion_here)
+               << ConvTy->isEnumeralType() << ConvTy;
+    }
+    SemaDiagnosticBuilder diagnoseConversion(
+        Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
+      llvm_unreachable("conversion functions are permitted");
+    }
+  } ConvertDiagnoser;
+  return PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser);
+}
+
+OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
+                                             SourceLocation StartLoc,
+                                             SourceLocation LParenLoc,
+                                             SourceLocation EndLoc) {
+  Expr *ValExpr = NumThreads;
+  if (!NumThreads->isValueDependent() && !NumThreads->isTypeDependent() &&
+      !NumThreads->isInstantiationDependent() &&
+      !NumThreads->containsUnexpandedParameterPack()) {
+    SourceLocation NumThreadsLoc = NumThreads->getLocStart();
+    ExprResult Val =
+        PerformImplicitIntegerConversion(NumThreadsLoc, NumThreads);
+    if (Val.isInvalid())
+      return 0;
+
+    ValExpr = Val.take();
+
+    // OpenMP [2.5, Restrictions]
+    //  The num_threads expression must evaluate to a positive integer value.
+    llvm::APSInt Result;
+    if (ValExpr->isIntegerConstantExpr(Result, Context) &&
+        Result.isSigned() && !Result.isStrictlyPositive()) {
+      Diag(NumThreadsLoc, diag::err_omp_negative_expression_in_clause)
+          << "num_threads" << NumThreads->getSourceRange();
+      return 0;
+    }
+  }
+
+  return new (Context) OMPNumThreadsClause(ValExpr, StartLoc, LParenLoc,
+                                           EndLoc);
+}
+
+ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
+                                                       OpenMPClauseKind CKind) {
+  if (!E)
+    return ExprError();
+  if (E->isValueDependent() || E->isTypeDependent() ||
+      E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
+    return Owned(E);
+  llvm::APSInt Result;
+  ExprResult ICE = VerifyIntegerConstantExpression(E, &Result);
+  if (ICE.isInvalid())
+    return ExprError();
+  if (!Result.isStrictlyPositive()) {
+    Diag(E->getExprLoc(), diag::err_omp_negative_expression_in_clause)
+        << getOpenMPClauseName(CKind) << E->getSourceRange();
+    return ExprError();
+  }
+  return ICE;
+}
+
+OMPClause *Sema::ActOnOpenMPSafelenClause(Expr *Len, SourceLocation StartLoc,
+                                          SourceLocation LParenLoc,
+                                          SourceLocation EndLoc) {
+  // OpenMP [2.8.1, simd construct, Description]
+  // The parameter of the safelen clause must be a constant
+  // positive integer expression.
+  ExprResult Safelen = VerifyPositiveIntegerConstantInClause(Len, OMPC_safelen);
+  if (Safelen.isInvalid())
+    return 0;
+  return new (Context)
+      OMPSafelenClause(Safelen.take(), StartLoc, LParenLoc, EndLoc);
+}
+
 OMPClause *Sema::ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
                                          unsigned Argument,
                                          SourceLocation ArgumentLoc,
@@ -715,9 +924,13 @@
       ActOnOpenMPDefaultClause(static_cast<OpenMPDefaultClauseKind>(Argument),
                                ArgumentLoc, StartLoc, LParenLoc, EndLoc);
     break;
+  case OMPC_if:
+  case OMPC_num_threads:
+  case OMPC_safelen:
   case OMPC_private:
   case OMPC_firstprivate:
   case OMPC_shared:
+  case OMPC_copyin:
   case OMPC_threadprivate:
   case OMPC_unknown:
   case NUM_OPENMP_CLAUSES:
@@ -733,7 +946,9 @@
                                           SourceLocation EndLoc) {
   if (Kind == OMPC_DEFAULT_unknown) {
     std::string Values;
-    std::string Sep(NUM_OPENMP_DEFAULT_KINDS > 1 ? ", " : "");
+    static_assert(NUM_OPENMP_DEFAULT_KINDS > 1,
+                  "NUM_OPENMP_DEFAULT_KINDS not greater than 1");
+    std::string Sep(", ");
     for (unsigned i = OMPC_DEFAULT_unknown + 1;
          i < NUM_OPENMP_DEFAULT_KINDS; ++i) {
       Values += "'";
@@ -761,7 +976,9 @@
   case OMPC_DEFAULT_shared:
     DSAStack->setDefaultDSAShared();
     break;
-  default:
+  case OMPC_DEFAULT_unknown:
+  case NUM_OPENMP_DEFAULT_KINDS:
+    llvm_unreachable("Clause kind is not allowed.");
     break;
   }
   return new (Context) OMPDefaultClause(Kind, KindKwLoc, StartLoc, LParenLoc,
@@ -784,6 +1001,12 @@
   case OMPC_shared:
     Res = ActOnOpenMPSharedClause(VarList, StartLoc, LParenLoc, EndLoc);
     break;
+  case OMPC_copyin:
+    Res = ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc, EndLoc);
+    break;
+  case OMPC_if:
+  case OMPC_num_threads:
+  case OMPC_safelen:
   case OMPC_default:
   case OMPC_threadprivate:
   case OMPC_unknown:
@@ -1114,9 +1337,10 @@
     SourceLocation ELoc = (*I)->getExprLoc();
     // OpenMP [2.1, C/C++]
     //  A list item is a variable name.
-    // OpenMP  [2.9.3.4, Restrictions, p.1]
-    //  A variable that is part of another variable (as an array or
-    //  structure element) cannot appear in a private clause.
+    // OpenMP  [2.14.3.2, Restrictions, p.1]
+    //  A variable that is part of another variable (as an array or structure
+    //  element) cannot appear in a shared unless it is a static data member
+    //  of a C++ class.
     DeclRefExpr *DE = dyn_cast<DeclRefExpr>(*I);
     if (!DE || !isa<VarDecl>(DE->getDecl())) {
       Diag(ELoc, diag::err_omp_expected_var_name)
@@ -1159,4 +1383,84 @@
   return OMPSharedClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
 }
 
+OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
+                                         SourceLocation StartLoc,
+                                         SourceLocation LParenLoc,
+                                         SourceLocation EndLoc) {
+  SmallVector<Expr *, 8> Vars;
+  for (ArrayRef<Expr *>::iterator I = VarList.begin(), E = VarList.end();
+       I != E; ++I) {
+    assert(*I && "NULL expr in OpenMP copyin clause.");
+    if (isa<DependentScopeDeclRefExpr>(*I)) {
+      // It will be analyzed later.
+      Vars.push_back(*I);
+      continue;
+    }
+
+    SourceLocation ELoc = (*I)->getExprLoc();
+    // OpenMP [2.1, C/C++]
+    //  A list item is a variable name.
+    // OpenMP  [2.14.4.1, Restrictions, p.1]
+    //  A list item that appears in a copyin clause must be threadprivate.
+    DeclRefExpr *DE = dyn_cast<DeclRefExpr>(*I);
+    if (!DE || !isa<VarDecl>(DE->getDecl())) {
+      Diag(ELoc, diag::err_omp_expected_var_name)
+        << (*I)->getSourceRange();
+      continue;
+    }
+
+    Decl *D = DE->getDecl();
+    VarDecl *VD = cast<VarDecl>(D);
+
+    QualType Type = VD->getType();
+    if (Type->isDependentType() || Type->isInstantiationDependentType()) {
+      // It will be analyzed later.
+      Vars.push_back(DE);
+      continue;
+    }
+
+    // OpenMP [2.14.4.1, Restrictions, C/C++, p.1]
+    //  A list item that appears in a copyin clause must be threadprivate.
+    if (!DSAStack->isThreadPrivate(VD)) {
+      Diag(ELoc, diag::err_omp_required_access)
+           << getOpenMPClauseName(OMPC_copyin)
+           << getOpenMPDirectiveName(OMPD_threadprivate);
+      continue;
+    }
+
+    // OpenMP [2.14.4.1, Restrictions, C/C++, p.2]
+    //  A variable of class type (or array thereof) that appears in a
+    //  copyin clause requires an accesible, unambiguous copy assignment
+    //  operator for the class type.
+    Type = Context.getBaseElementType(Type);
+    CXXRecordDecl *RD = getLangOpts().CPlusPlus ?
+                          Type->getAsCXXRecordDecl() : 0;
+    if (RD) {
+      CXXMethodDecl *MD = LookupCopyingAssignment(RD, 0, false, 0);
+      DeclAccessPair FoundDecl = DeclAccessPair::make(MD, MD->getAccess());
+      if (!MD ||
+          CheckMemberAccess(ELoc, RD, FoundDecl) == AR_inaccessible ||
+          MD->isDeleted()) {
+        Diag(ELoc, diag::err_omp_required_method)
+             << getOpenMPClauseName(OMPC_copyin) << 2;
+        bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+                      VarDecl::DeclarationOnly;
+        Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl :
+                                         diag::note_defined_here) << VD;
+        Diag(RD->getLocation(), diag::note_previous_decl) << RD;
+        continue;
+      }
+      MarkFunctionReferenced(ELoc, MD);
+      DiagnoseUseOfDecl(MD, ELoc);
+    }
+
+    DSAStack->addDSA(VD, DE, OMPC_copyin);
+    Vars.push_back(DE);
+  }
+
+  if (Vars.empty()) return 0;
+
+  return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+}
+
 #undef DSAStack
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 802f2b7..ee1feb5 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -930,24 +930,15 @@
       (OldIsUsingDecl || NewIsUsingDecl) && CurContext->isRecord() &&
       !New->getFriendObjectKind();
 
-    if (FunctionTemplateDecl *OldT = dyn_cast<FunctionTemplateDecl>(OldD)) {
-      if (!IsOverload(New, OldT->getTemplatedDecl(), UseMemberUsingDeclRules)) {
-        if (UseMemberUsingDeclRules && OldIsUsingDecl) {
-          HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I));
-          continue;
-        }
-
-        Match = *I;
-        return Ovl_Match;
-      }
-    } else if (FunctionDecl *OldF = dyn_cast<FunctionDecl>(OldD)) {
+    if (FunctionDecl *OldF = OldD->getAsFunction()) {
       if (!IsOverload(New, OldF, UseMemberUsingDeclRules)) {
         if (UseMemberUsingDeclRules && OldIsUsingDecl) {
           HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I));
           continue;
         }
 
-        if (!shouldLinkPossiblyHiddenDecl(*I, New))
+        if (!isa<FunctionTemplateDecl>(OldD) &&
+            !shouldLinkPossiblyHiddenDecl(*I, New))
           continue;
 
         Match = *I;
@@ -1008,16 +999,16 @@
       isa<FunctionNoProtoType>(NewQType.getTypePtr()))
     return false;
 
-  const FunctionProtoType* OldType = cast<FunctionProtoType>(OldQType);
-  const FunctionProtoType* NewType = cast<FunctionProtoType>(NewQType);
+  const FunctionProtoType *OldType = cast<FunctionProtoType>(OldQType);
+  const FunctionProtoType *NewType = cast<FunctionProtoType>(NewQType);
 
   // The signature of a function includes the types of its
   // parameters (C++ 1.3.10), which includes the presence or absence
   // of the ellipsis; see C++ DR 357).
   if (OldQType != NewQType &&
-      (OldType->getNumArgs() != NewType->getNumArgs() ||
+      (OldType->getNumParams() != NewType->getNumParams() ||
        OldType->isVariadic() != NewType->isVariadic() ||
-       !FunctionArgTypesAreEqual(OldType, NewType)))
+       !FunctionParamTypesAreEqual(OldType, NewType)))
     return true;
 
   // C++ [temp.over.link]p4:
@@ -1036,7 +1027,7 @@
       (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
                                        OldTemplate->getTemplateParameters(),
                                        false, TPL_TemplateMatch) ||
-       OldType->getResultType() != NewType->getResultType()))
+       OldType->getReturnType() != NewType->getReturnType()))
     return true;
 
   // If the function is a class member, its signature includes the
@@ -1085,6 +1076,22 @@
       return true;
   }
 
+  // enable_if attributes are an order-sensitive part of the signature.
+  for (specific_attr_iterator<EnableIfAttr>
+         NewI = New->specific_attr_begin<EnableIfAttr>(),
+         NewE = New->specific_attr_end<EnableIfAttr>(),
+         OldI = Old->specific_attr_begin<EnableIfAttr>(),
+         OldE = Old->specific_attr_end<EnableIfAttr>();
+       NewI != NewE || OldI != OldE; ++NewI, ++OldI) {
+    if (NewI == NewE || OldI == OldE)
+      return true;
+    llvm::FoldingSetNodeID NewID, OldID;
+    NewI->getCond()->Profile(NewID, Context, true);
+    OldI->getCond()->Profile(OldID, Context, true);
+    if (NewID != OldID)
+      return true;
+  }
+
   // The signatures match; this is not an overload.
   return false;
 }
@@ -1127,6 +1134,7 @@
 
   if (UserDefResult == OR_Success) {
     ICS.setUserDefined();
+    ICS.UserDefined.Before.setAsIdentityConversion();
     // C++ [over.ics.user]p4:
     //   A conversion of an expression of class type to the same class
     //   type is given Exact Match rank, and a conversion of an
@@ -1298,7 +1306,9 @@
   bool AllowObjCWritebackConversion
     = getLangOpts().ObjCAutoRefCount && 
       (Action == AA_Passing || Action == AA_Sending);
-
+  if (getLangOpts().ObjC1)
+    CheckObjCBridgeRelatedConversions(From->getLocStart(),
+                                      ToType, From->getType(), From);
   ICS = clang::TryImplicitConversion(*this, From, ToType,
                                      /*SuppressUserConversions=*/false,
                                      AllowExplicit,
@@ -1362,7 +1372,7 @@
 ///
 /// \param ICK Will be set to the vector conversion kind, if this is a vector
 /// conversion.
-static bool IsVectorConversion(ASTContext &Context, QualType FromType,
+static bool IsVectorConversion(Sema &S, QualType FromType,
                                QualType ToType, ImplicitConversionKind &ICK) {
   // We need at least one of these types to be a vector type to have a vector
   // conversion.
@@ -1370,7 +1380,7 @@
     return false;
 
   // Identical types require no conversions.
-  if (Context.hasSameUnqualifiedType(FromType, ToType))
+  if (S.Context.hasSameUnqualifiedType(FromType, ToType))
     return false;
 
   // There are no conversions between extended vector types, only identity.
@@ -1392,9 +1402,8 @@
   // 2)lax vector conversions are permitted and the vector types are of the
   //   same size
   if (ToType->isVectorType() && FromType->isVectorType()) {
-    if (Context.areCompatibleVectorTypes(FromType, ToType) ||
-        (Context.getLangOpts().LaxVectorConversions &&
-         (Context.getTypeSize(FromType) == Context.getTypeSize(ToType)))) {
+    if (S.Context.areCompatibleVectorTypes(FromType, ToType) ||
+        S.isLaxVectorConversion(FromType, ToType)) {
       ICK = ICK_Vector_Conversion;
       return true;
     }
@@ -1425,7 +1434,6 @@
 
   // Standard conversions (C++ [conv])
   SCS.setAsIdentityConversion();
-  SCS.DeprecatedStringLiteralToCharPtr = false;
   SCS.IncompatibleObjC = false;
   SCS.setFromType(FromType);
   SCS.CopyConstructor = 0;
@@ -1524,7 +1532,7 @@
     FromType = S.Context.getArrayDecayedType(FromType);
 
     if (S.IsStringLiteralToNonConstPointerConversion(From, ToType)) {
-      // This conversion is deprecated. (C++ D.4).
+      // This conversion is deprecated in C++03 (D.4)
       SCS.DeprecatedStringLiteralToCharPtr = true;
 
       // For the purpose of ranking in overload resolution
@@ -1624,7 +1632,7 @@
                                          InOverloadResolution, FromType)) {
     // Pointer to member conversions (4.11).
     SCS.Second = ICK_Pointer_Member;
-  } else if (IsVectorConversion(S.Context, FromType, ToType, SecondICK)) {
+  } else if (IsVectorConversion(S, FromType, ToType, SecondICK)) {
     SCS.Second = SecondICK;
     FromType = ToType.getUnqualifiedType();
   } else if (!S.getLangOpts().CPlusPlus &&
@@ -1707,9 +1715,7 @@
   // The field to initialize within the transparent union.
   RecordDecl *UD = UT->getDecl();
   // It's compatible if the expression matches any of the fields.
-  for (RecordDecl::field_iterator it = UD->field_begin(),
-       itend = UD->field_end();
-       it != itend; ++it) {
+  for (const auto *it : UD->fields()) {
     if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS,
                              CStyle, /*ObjCWritebackConversion=*/false)) {
       ToType = it->getType();
@@ -2288,17 +2294,17 @@
 
     // Perform the quick checks that will tell us whether these
     // function types are obviously different.
-    if (FromFunctionType->getNumArgs() != ToFunctionType->getNumArgs() ||
+    if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
         FromFunctionType->isVariadic() != ToFunctionType->isVariadic() ||
         FromFunctionType->getTypeQuals() != ToFunctionType->getTypeQuals())
       return false;
 
     bool HasObjCConversion = false;
-    if (Context.getCanonicalType(FromFunctionType->getResultType())
-          == Context.getCanonicalType(ToFunctionType->getResultType())) {
+    if (Context.getCanonicalType(FromFunctionType->getReturnType()) ==
+        Context.getCanonicalType(ToFunctionType->getReturnType())) {
       // Okay, the types match exactly. Nothing to do.
-    } else if (isObjCPointerConversion(FromFunctionType->getResultType(),
-                                       ToFunctionType->getResultType(),
+    } else if (isObjCPointerConversion(FromFunctionType->getReturnType(),
+                                       ToFunctionType->getReturnType(),
                                        ConvertedType, IncompatibleObjC)) {
       // Okay, we have an Objective-C pointer conversion.
       HasObjCConversion = true;
@@ -2308,10 +2314,10 @@
     }
 
     // Check argument types.
-    for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
+    for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams();
          ArgIdx != NumArgs; ++ArgIdx) {
-      QualType FromArgType = FromFunctionType->getArgType(ArgIdx);
-      QualType ToArgType = ToFunctionType->getArgType(ArgIdx);
+      QualType FromArgType = FromFunctionType->getParamType(ArgIdx);
+      QualType ToArgType = ToFunctionType->getParamType(ArgIdx);
       if (Context.getCanonicalType(FromArgType)
             == Context.getCanonicalType(ToArgType)) {
         // Okay, the types match exactly. Nothing to do.
@@ -2436,7 +2442,7 @@
     
   // Perform the quick checks that will tell us whether these
   // function types are obviously different.
-  if (FromFunctionType->getNumArgs() != ToFunctionType->getNumArgs() ||
+  if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
       FromFunctionType->isVariadic() != ToFunctionType->isVariadic())
     return false;
     
@@ -2446,12 +2452,12 @@
     return false;
 
   bool IncompatibleObjC = false;
-  if (Context.hasSameType(FromFunctionType->getResultType(), 
-                          ToFunctionType->getResultType())) {
+  if (Context.hasSameType(FromFunctionType->getReturnType(),
+                          ToFunctionType->getReturnType())) {
     // Okay, the types match exactly. Nothing to do.
   } else {
-    QualType RHS = FromFunctionType->getResultType();
-    QualType LHS = ToFunctionType->getResultType();
+    QualType RHS = FromFunctionType->getReturnType();
+    QualType LHS = ToFunctionType->getReturnType();
     if ((!getLangOpts().CPlusPlus || !RHS->isRecordType()) &&
         !RHS.hasQualifiers() && LHS.hasQualifiers())
        LHS = LHS.getUnqualifiedType();
@@ -2469,11 +2475,11 @@
    }
     
    // Check argument types.
-   for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
+   for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams();
         ArgIdx != NumArgs; ++ArgIdx) {
      IncompatibleObjC = false;
-     QualType FromArgType = FromFunctionType->getArgType(ArgIdx);
-     QualType ToArgType = ToFunctionType->getArgType(ArgIdx);
+     QualType FromArgType = FromFunctionType->getParamType(ArgIdx);
+     QualType ToArgType = ToFunctionType->getParamType(ArgIdx);
      if (Context.hasSameType(FromArgType, ToArgType)) {
        // Okay, the types match exactly. Nothing to do.
      } else if (isObjCPointerConversion(ToArgType, FromArgType,
@@ -2558,26 +2564,26 @@
     return;
   }
 
-  if (FromFunction->getNumArgs() != ToFunction->getNumArgs()) {
-    PDiag << ft_parameter_arity << ToFunction->getNumArgs()
-          << FromFunction->getNumArgs();
+  if (FromFunction->getNumParams() != ToFunction->getNumParams()) {
+    PDiag << ft_parameter_arity << ToFunction->getNumParams()
+          << FromFunction->getNumParams();
     return;
   }
 
   // Handle different parameter types.
   unsigned ArgPos;
-  if (!FunctionArgTypesAreEqual(FromFunction, ToFunction, &ArgPos)) {
+  if (!FunctionParamTypesAreEqual(FromFunction, ToFunction, &ArgPos)) {
     PDiag << ft_parameter_mismatch << ArgPos + 1
-          << ToFunction->getArgType(ArgPos)
-          << FromFunction->getArgType(ArgPos);
+          << ToFunction->getParamType(ArgPos)
+          << FromFunction->getParamType(ArgPos);
     return;
   }
 
   // Handle different return type.
-  if (!Context.hasSameType(FromFunction->getResultType(),
-                           ToFunction->getResultType())) {
-    PDiag << ft_return_type << ToFunction->getResultType()
-          << FromFunction->getResultType();
+  if (!Context.hasSameType(FromFunction->getReturnType(),
+                           ToFunction->getReturnType())) {
+    PDiag << ft_return_type << ToFunction->getReturnType()
+          << FromFunction->getReturnType();
     return;
   }
 
@@ -2592,19 +2598,21 @@
   PDiag << ft_default;
 }
 
-/// FunctionArgTypesAreEqual - This routine checks two function proto types
+/// FunctionParamTypesAreEqual - This routine checks two function proto types
 /// for equality of their argument types. Caller has already checked that
 /// they have same number of arguments.  If the parameters are different,
 /// ArgPos will have the parameter index of the first different parameter.
-bool Sema::FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
-                                    const FunctionProtoType *NewType,
-                                    unsigned *ArgPos) {
-  for (FunctionProtoType::arg_type_iterator O = OldType->arg_type_begin(),
-       N = NewType->arg_type_begin(),
-       E = OldType->arg_type_end(); O && (O != E); ++O, ++N) {
+bool Sema::FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
+                                      const FunctionProtoType *NewType,
+                                      unsigned *ArgPos) {
+  for (FunctionProtoType::param_type_iterator O = OldType->param_type_begin(),
+                                              N = NewType->param_type_begin(),
+                                              E = OldType->param_type_end();
+       O && (O != E); ++O, ++N) {
     if (!Context.hasSameType(O->getUnqualifiedType(),
                              N->getUnqualifiedType())) {
-      if (ArgPos) *ArgPos = O - OldType->arg_type_begin();
+      if (ArgPos)
+        *ArgPos = O - OldType->param_type_begin();
       return false;
     }
   }
@@ -2919,8 +2927,8 @@
                                               QualType Type) {
   const FunctionProtoType *CtorType =
       Constructor->getType()->getAs<FunctionProtoType>();
-  if (CtorType->getNumArgs() > 0) {
-    QualType FirstArg = CtorType->getArgType(0);
+  if (CtorType->getNumParams() > 0) {
+    QualType FirstArg = CtorType->getParamType(0);
     if (Context.hasSameUnqualifiedType(Type, FirstArg.getNonReferenceType()))
       return true;
   }
@@ -3241,18 +3249,15 @@
     IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
                             CandidateSet, false, false);
   if (OvResult == OR_Ambiguous)
-    Diag(From->getLocStart(),
-         diag::err_typecheck_ambiguous_condition)
-          << From->getType() << ToType << From->getSourceRange();
+    Diag(From->getLocStart(), diag::err_typecheck_ambiguous_condition)
+        << From->getType() << ToType << From->getSourceRange();
   else if (OvResult == OR_No_Viable_Function && !CandidateSet.empty()) {
     if (!RequireCompleteType(From->getLocStart(), ToType,
-                          diag::err_typecheck_nonviable_condition_incomplete,
+                             diag::err_typecheck_nonviable_condition_incomplete,
                              From->getType(), From->getSourceRange()))
-      Diag(From->getLocStart(),
-           diag::err_typecheck_nonviable_condition)
-           << From->getType() << From->getSourceRange() << ToType;
-  }
-  else
+      Diag(From->getLocStart(), diag::err_typecheck_nonviable_condition)
+          << From->getType() << From->getSourceRange() << ToType;
+  } else
     return false;
   CandidateSet.NoteCandidates(*this, OCD_AllCandidates, From);
   return true;
@@ -3262,37 +3267,43 @@
 /// of two user-defined conversion sequences to determine whether any ordering
 /// is possible.
 static ImplicitConversionSequence::CompareKind
-compareConversionFunctions(Sema &S,
-                           FunctionDecl *Function1,
+compareConversionFunctions(Sema &S, FunctionDecl *Function1,
                            FunctionDecl *Function2) {
   if (!S.getLangOpts().ObjC1 || !S.getLangOpts().CPlusPlus11)
     return ImplicitConversionSequence::Indistinguishable;
-  
+
   // Objective-C++:
   //   If both conversion functions are implicitly-declared conversions from
-  //   a lambda closure type to a function pointer and a block pointer, 
+  //   a lambda closure type to a function pointer and a block pointer,
   //   respectively, always prefer the conversion to a function pointer,
   //   because the function pointer is more lightweight and is more likely
   //   to keep code working.
-  CXXConversionDecl *Conv1 = dyn_cast<CXXConversionDecl>(Function1);
+  CXXConversionDecl *Conv1 = dyn_cast_or_null<CXXConversionDecl>(Function1);
   if (!Conv1)
     return ImplicitConversionSequence::Indistinguishable;
-    
+
   CXXConversionDecl *Conv2 = dyn_cast<CXXConversionDecl>(Function2);
   if (!Conv2)
     return ImplicitConversionSequence::Indistinguishable;
-  
+
   if (Conv1->getParent()->isLambda() && Conv2->getParent()->isLambda()) {
     bool Block1 = Conv1->getConversionType()->isBlockPointerType();
     bool Block2 = Conv2->getConversionType()->isBlockPointerType();
     if (Block1 != Block2)
-      return Block1? ImplicitConversionSequence::Worse 
-                   : ImplicitConversionSequence::Better;
+      return Block1 ? ImplicitConversionSequence::Worse
+                    : ImplicitConversionSequence::Better;
   }
 
   return ImplicitConversionSequence::Indistinguishable;
 }
-  
+
+static bool hasDeprecatedStringLiteralToCharPtrConversion(
+    const ImplicitConversionSequence &ICS) {
+  return (ICS.isStandard() && ICS.Standard.DeprecatedStringLiteralToCharPtr) ||
+         (ICS.isUserDefined() &&
+          ICS.UserDefined.Before.DeprecatedStringLiteralToCharPtr);
+}
+
 /// CompareImplicitConversionSequences - Compare two implicit
 /// conversion sequences to determine whether one is better than the
 /// other or if they are indistinguishable (C++ 13.3.3.2).
@@ -3315,6 +3326,32 @@
   //   described in 13.3.3.2, the ambiguous conversion sequence is
   //   treated as a user-defined sequence that is indistinguishable
   //   from any other user-defined conversion sequence.
+
+  // String literal to 'char *' conversion has been deprecated in C++03. It has
+  // been removed from C++11. We still accept this conversion, if it happens at
+  // the best viable function. Otherwise, this conversion is considered worse
+  // than ellipsis conversion. Consider this as an extension; this is not in the
+  // standard. For example:
+  //
+  // int &f(...);    // #1
+  // void f(char*);  // #2
+  // void g() { int &r = f("foo"); }
+  //
+  // In C++03, we pick #2 as the best viable function.
+  // In C++11, we pick #1 as the best viable function, because ellipsis
+  // conversion is better than string-literal to char* conversion (since there
+  // is no such conversion in C++11). If there was no #1 at all or #1 couldn't
+  // convert arguments, #2 would be the best viable function in C++11.
+  // If the best viable function has this conversion, a warning will be issued
+  // in C++03, or an ExtWarn (+SFINAE failure) will be issued in C++11.
+
+  if (S.getLangOpts().CPlusPlus11 && !S.getLangOpts().WritableStrings &&
+      hasDeprecatedStringLiteralToCharPtrConversion(ICS1) !=
+      hasDeprecatedStringLiteralToCharPtrConversion(ICS2))
+    return hasDeprecatedStringLiteralToCharPtrConversion(ICS1)
+               ? ImplicitConversionSequence::Worse
+               : ImplicitConversionSequence::Better;
+
   if (ICS1.getKindRank() < ICS2.getKindRank())
     return ImplicitConversionSequence::Better;
   if (ICS2.getKindRank() < ICS1.getKindRank())
@@ -3613,11 +3650,10 @@
   // }
   // Here, MSVC will call f(int) instead of generating a compile error
   // as clang will do in standard mode.
-  if (S.getLangOpts().MicrosoftMode &&
-      SCS1.Second == ICK_Integral_Conversion &&
-      SCS2.Second == ICK_Floating_Integral && 
+  if (S.getLangOpts().MSVCCompat && SCS1.Second == ICK_Integral_Conversion &&
+      SCS2.Second == ICK_Floating_Integral &&
       S.Context.getTypeSize(SCS1.getFromType()) ==
-      S.Context.getTypeSize(SCS1.getToType(2)))
+          S.Context.getTypeSize(SCS1.getToType(2)))
     return ImplicitConversionSequence::Better;
 
   return ImplicitConversionSequence::Indistinguishable;
@@ -4227,6 +4263,7 @@
       ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
       ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
       ICS.Standard.CopyConstructor = 0;
+      ICS.Standard.DeprecatedStringLiteralToCharPtr = false;
 
       // Nothing more to do: the inaccessibility/ambiguity check for
       // derived-to-base conversions is suppressed when we're
@@ -4301,6 +4338,7 @@
     ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
     ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
     ICS.Standard.CopyConstructor = 0;
+    ICS.Standard.DeprecatedStringLiteralToCharPtr = false;
     return ICS;
   }
 
@@ -4398,9 +4436,9 @@
   } else if (ICS.isUserDefined()) {
     // Don't allow rvalue references to bind to lvalues.
     if (DeclType->isRValueReferenceType()) {
-      if (const ReferenceType *RefType
-            = ICS.UserDefined.ConversionFunction->getResultType()
-                ->getAs<LValueReferenceType>()) {
+      if (const ReferenceType *RefType =
+              ICS.UserDefined.ConversionFunction->getReturnType()
+                  ->getAs<LValueReferenceType>()) {
         if (!RefType->getPointeeType()->isFunctionType()) {
           ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, Init, 
                      DeclType);
@@ -4408,7 +4446,7 @@
         }
       }
     }
-    
+    ICS.UserDefined.Before.setAsIdentityConversion();
     ICS.UserDefined.After.ReferenceBinding = true;
     ICS.UserDefined.After.IsLvalueReference = !isRValRef;
     ICS.UserDefined.After.BindsToFunctionLvalue = T2->isFunctionType();
@@ -5291,7 +5329,7 @@
     TypeDiagnoserPartialDiag(ContextualImplicitConverter &Converter, Expr *From)
         : TypeDiagnoser(Converter.Suppress), Converter(Converter), From(From) {}
 
-    virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+    void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
       Converter.diagnoseIncomplete(S, Loc, T) << From->getSourceRange();
     }
   } IncompleteDiagnoser(Converter, From);
@@ -5450,11 +5488,11 @@
 Sema::AddOverloadCandidate(FunctionDecl *Function,
                            DeclAccessPair FoundDecl,
                            ArrayRef<Expr *> Args,
-                           OverloadCandidateSet& CandidateSet,
+                           OverloadCandidateSet &CandidateSet,
                            bool SuppressUserConversions,
                            bool PartialOverloading,
                            bool AllowExplicit) {
-  const FunctionProtoType* Proto
+  const FunctionProtoType *Proto
     = dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
   assert(Proto && "Functions without a prototype cannot be overloaded");
   assert(!Function->getDescribedFunctionTemplate() &&
@@ -5513,12 +5551,12 @@
   Candidate.IgnoreObjectArgument = false;
   Candidate.ExplicitCallArguments = Args.size();
 
-  unsigned NumArgsInProto = Proto->getNumArgs();
+  unsigned NumParams = Proto->getNumParams();
 
   // (C++ 13.3.2p2): A candidate function having fewer than m
   // parameters is viable only if it has an ellipsis in its parameter
   // list (8.3.5).
-  if ((Args.size() + (PartialOverloading && Args.size())) > NumArgsInProto &&
+  if ((Args.size() + (PartialOverloading && Args.size())) > NumParams &&
       !Proto->isVariadic()) {
     Candidate.Viable = false;
     Candidate.FailureKind = ovl_fail_too_many_arguments;
@@ -5550,12 +5588,12 @@
   // Determine the implicit conversion sequences for each of the
   // arguments.
   for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
-    if (ArgIdx < NumArgsInProto) {
+    if (ArgIdx < NumParams) {
       // (C++ 13.3.2p3): for F to be a viable function, there shall
       // exist for each argument an implicit conversion sequence
       // (13.3.3.1) that converts that argument to the corresponding
       // parameter of F.
-      QualType ParamType = Proto->getArgType(ArgIdx);
+      QualType ParamType = Proto->getParamType(ArgIdx);
       Candidate.Conversions[ArgIdx]
         = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
                                 SuppressUserConversions,
@@ -5566,7 +5604,7 @@
       if (Candidate.Conversions[ArgIdx].isBad()) {
         Candidate.Viable = false;
         Candidate.FailureKind = ovl_fail_bad_conversion;
-        break;
+        return;
       }
     } else {
       // (C++ 13.3.2p2): For the purposes of overload resolution, any
@@ -5575,6 +5613,78 @@
       Candidate.Conversions[ArgIdx].setEllipsis();
     }
   }
+
+  if (EnableIfAttr *FailedAttr = CheckEnableIf(Function, Args)) {
+    Candidate.Viable = false;
+    Candidate.FailureKind = ovl_fail_enable_if;
+    Candidate.DeductionFailure.Data = FailedAttr;
+    return;
+  }
+}
+
+static bool IsNotEnableIfAttr(Attr *A) { return !isa<EnableIfAttr>(A); }
+
+EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
+                                  bool MissingImplicitThis) {
+  // FIXME: specific_attr_iterator<EnableIfAttr> iterates in reverse order, but
+  // we need to find the first failing one.
+  if (!Function->hasAttrs())
+    return 0;
+  AttrVec Attrs = Function->getAttrs();
+  AttrVec::iterator E = std::remove_if(Attrs.begin(), Attrs.end(),
+                                       IsNotEnableIfAttr);
+  if (Attrs.begin() == E)
+    return 0;
+  std::reverse(Attrs.begin(), E);
+
+  SFINAETrap Trap(*this);
+
+  // Convert the arguments.
+  SmallVector<Expr *, 16> ConvertedArgs;
+  bool InitializationFailed = false;
+  for (unsigned i = 0, e = Args.size(); i != e; ++i) {
+    if (i == 0 && !MissingImplicitThis && isa<CXXMethodDecl>(Function) &&
+        !cast<CXXMethodDecl>(Function)->isStatic() &&
+        !isa<CXXConstructorDecl>(Function)) {
+      CXXMethodDecl *Method = cast<CXXMethodDecl>(Function);
+      ExprResult R =
+        PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/0,
+                                            Method, Method);
+      if (R.isInvalid()) {
+        InitializationFailed = true;
+        break;
+      }
+      ConvertedArgs.push_back(R.take());
+    } else {
+      ExprResult R =
+        PerformCopyInitialization(InitializedEntity::InitializeParameter(
+                                                Context,
+                                                Function->getParamDecl(i)),
+                                  SourceLocation(),
+                                  Args[i]);
+      if (R.isInvalid()) {
+        InitializationFailed = true;
+        break;
+      }
+      ConvertedArgs.push_back(R.take());
+    }
+  }
+
+  if (InitializationFailed || Trap.hasErrorOccurred())
+    return cast<EnableIfAttr>(Attrs[0]);
+
+  for (AttrVec::iterator I = Attrs.begin(); I != E; ++I) {
+    APValue Result;
+    EnableIfAttr *EIA = cast<EnableIfAttr>(*I);
+    if (!EIA->getCond()->EvaluateWithSubstitution(
+            Result, Context, Function,
+            llvm::ArrayRef<const Expr*>(ConvertedArgs.data(),
+                                        ConvertedArgs.size())) ||
+        !Result.isInt() || !Result.getInt().getBoolValue()) {
+      return EIA;
+    }
+  }
+  return 0;
 }
 
 /// \brief Add all of the function declarations in the given function set to
@@ -5656,9 +5766,9 @@
                          CXXRecordDecl *ActingContext, QualType ObjectType,
                          Expr::Classification ObjectClassification,
                          ArrayRef<Expr *> Args,
-                         OverloadCandidateSet& CandidateSet,
+                         OverloadCandidateSet &CandidateSet,
                          bool SuppressUserConversions) {
-  const FunctionProtoType* Proto
+  const FunctionProtoType *Proto
     = dyn_cast<FunctionProtoType>(Method->getType()->getAs<FunctionType>());
   assert(Proto && "Methods without a prototype cannot be overloaded");
   assert(!isa<CXXConstructorDecl>(Method) &&
@@ -5685,12 +5795,12 @@
   Candidate.IgnoreObjectArgument = false;
   Candidate.ExplicitCallArguments = Args.size();
 
-  unsigned NumArgsInProto = Proto->getNumArgs();
+  unsigned NumParams = Proto->getNumParams();
 
   // (C++ 13.3.2p2): A candidate function having fewer than m
   // parameters is viable only if it has an ellipsis in its parameter
   // list (8.3.5).
-  if (Args.size() > NumArgsInProto && !Proto->isVariadic()) {
+  if (Args.size() > NumParams && !Proto->isVariadic()) {
     Candidate.Viable = false;
     Candidate.FailureKind = ovl_fail_too_many_arguments;
     return;
@@ -5730,12 +5840,12 @@
   // Determine the implicit conversion sequences for each of the
   // arguments.
   for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
-    if (ArgIdx < NumArgsInProto) {
+    if (ArgIdx < NumParams) {
       // (C++ 13.3.2p3): for F to be a viable function, there shall
       // exist for each argument an implicit conversion sequence
       // (13.3.3.1) that converts that argument to the corresponding
       // parameter of F.
-      QualType ParamType = Proto->getArgType(ArgIdx);
+      QualType ParamType = Proto->getParamType(ArgIdx);
       Candidate.Conversions[ArgIdx + 1]
         = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
                                 SuppressUserConversions,
@@ -5745,15 +5855,22 @@
       if (Candidate.Conversions[ArgIdx + 1].isBad()) {
         Candidate.Viable = false;
         Candidate.FailureKind = ovl_fail_bad_conversion;
-        break;
+        return;
       }
     } else {
       // (C++ 13.3.2p2): For the purposes of overload resolution, any
       // argument for which there is no corresponding parameter is
-      // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+      // considered to "match the ellipsis" (C+ 13.3.3.1.3).
       Candidate.Conversions[ArgIdx + 1].setEllipsis();
     }
   }
+
+  if (EnableIfAttr *FailedAttr = CheckEnableIf(Method, Args, true)) {
+    Candidate.Viable = false;
+    Candidate.FailureKind = ovl_fail_enable_if;
+    Candidate.DeductionFailure.Data = FailedAttr;
+    return;
+  }
 }
 
 /// \brief Add a C++ member function template as a candidate to the candidate
@@ -5969,7 +6086,7 @@
     return;
   }
 
-  // We won't go through a user-define type conversion function to convert a
+  // We won't go through a user-defined type conversion function to convert a
   // derived to base as such conversions are given Conversion Rank. They only
   // go through a copy constructor. 13.3.3.1.2-p4 [over.ics.user]
   QualType FromCanon
@@ -6029,6 +6146,7 @@
         GetConversionRank(ICS.Standard.Second) != ICR_Exact_Match) {
       Candidate.Viable = false;
       Candidate.FailureKind = ovl_fail_final_conversion_not_exact;
+      return;
     }
 
     // C++0x [dcl.init.ref]p5:
@@ -6040,18 +6158,26 @@
         ICS.Standard.First == ICK_Lvalue_To_Rvalue) {
       Candidate.Viable = false;
       Candidate.FailureKind = ovl_fail_bad_final_conversion;
+      return;
     }
     break;
 
   case ImplicitConversionSequence::BadConversion:
     Candidate.Viable = false;
     Candidate.FailureKind = ovl_fail_bad_final_conversion;
-    break;
+    return;
 
   default:
     llvm_unreachable(
            "Can only end up with a standard conversion sequence or failure");
   }
+
+  if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, ArrayRef<Expr*>())) {
+    Candidate.Viable = false;
+    Candidate.FailureKind = ovl_fail_enable_if;
+    Candidate.DeductionFailure.Data = FailedAttr;
+    return;
+  }
 }
 
 /// \brief Adds a conversion function template specialization
@@ -6151,12 +6277,12 @@
   Candidate.Conversions[0].UserDefined.After.setAsIdentityConversion();
 
   // Find the
-  unsigned NumArgsInProto = Proto->getNumArgs();
+  unsigned NumParams = Proto->getNumParams();
 
   // (C++ 13.3.2p2): A candidate function having fewer than m
   // parameters is viable only if it has an ellipsis in its parameter
   // list (8.3.5).
-  if (Args.size() > NumArgsInProto && !Proto->isVariadic()) {
+  if (Args.size() > NumParams && !Proto->isVariadic()) {
     Candidate.Viable = false;
     Candidate.FailureKind = ovl_fail_too_many_arguments;
     return;
@@ -6164,7 +6290,7 @@
 
   // Function types don't have any default arguments, so just check if
   // we have enough arguments.
-  if (Args.size() < NumArgsInProto) {
+  if (Args.size() < NumParams) {
     // Not enough arguments.
     Candidate.Viable = false;
     Candidate.FailureKind = ovl_fail_too_few_arguments;
@@ -6174,12 +6300,12 @@
   // Determine the implicit conversion sequences for each of the
   // arguments.
   for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
-    if (ArgIdx < NumArgsInProto) {
+    if (ArgIdx < NumParams) {
       // (C++ 13.3.2p3): for F to be a viable function, there shall
       // exist for each argument an implicit conversion sequence
       // (13.3.3.1) that converts that argument to the corresponding
       // parameter of F.
-      QualType ParamType = Proto->getArgType(ArgIdx);
+      QualType ParamType = Proto->getParamType(ArgIdx);
       Candidate.Conversions[ArgIdx + 1]
         = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
                                 /*SuppressUserConversions=*/false,
@@ -6189,7 +6315,7 @@
       if (Candidate.Conversions[ArgIdx + 1].isBad()) {
         Candidate.Viable = false;
         Candidate.FailureKind = ovl_fail_bad_conversion;
-        break;
+        return;
       }
     } else {
       // (C++ 13.3.2p2): For the purposes of overload resolution, any
@@ -6198,6 +6324,13 @@
       Candidate.Conversions[ArgIdx + 1].setEllipsis();
     }
   }
+
+  if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, ArrayRef<Expr*>())) {
+    Candidate.Viable = false;
+    Candidate.FailureKind = ovl_fail_enable_if;
+    Candidate.DeductionFailure.Data = FailedAttr;
+    return;
+  }
 }
 
 /// \brief Add overload candidates for overloaded operators that are
@@ -8109,6 +8242,47 @@
     }
   }
 
+  // Check for enable_if value-based overload resolution.
+  if (Cand1.Function && Cand2.Function &&
+      (Cand1.Function->hasAttr<EnableIfAttr>() ||
+       Cand2.Function->hasAttr<EnableIfAttr>())) {
+    // FIXME: The next several lines are just
+    // specific_attr_iterator<EnableIfAttr> but going in declaration order,
+    // instead of reverse order which is how they're stored in the AST.
+    AttrVec Cand1Attrs;
+    AttrVec::iterator Cand1E = Cand1Attrs.end();
+    if (Cand1.Function->hasAttrs()) {
+      Cand1Attrs = Cand1.Function->getAttrs();
+      Cand1E = std::remove_if(Cand1Attrs.begin(), Cand1Attrs.end(),
+                              IsNotEnableIfAttr);
+      std::reverse(Cand1Attrs.begin(), Cand1E);
+    }
+
+    AttrVec Cand2Attrs;
+    AttrVec::iterator Cand2E = Cand2Attrs.end();
+    if (Cand2.Function->hasAttrs()) {
+      Cand2Attrs = Cand2.Function->getAttrs();
+      Cand2E = std::remove_if(Cand2Attrs.begin(), Cand2Attrs.end(),
+                              IsNotEnableIfAttr);
+      std::reverse(Cand2Attrs.begin(), Cand2E);
+    }
+    for (AttrVec::iterator
+         Cand1I = Cand1Attrs.begin(), Cand2I = Cand2Attrs.begin();
+         Cand1I != Cand1E || Cand2I != Cand2E; ++Cand1I, ++Cand2I) {
+      if (Cand1I == Cand1E)
+        return false;
+      if (Cand2I == Cand2E)
+        return true;
+      llvm::FoldingSetNodeID Cand1ID, Cand2ID;
+      cast<EnableIfAttr>(*Cand1I)->getCond()->Profile(Cand1ID,
+                                                      S.getASTContext(), true);
+      cast<EnableIfAttr>(*Cand2I)->getCond()->Profile(Cand2ID,
+                                                      S.getASTContext(), true);
+      if (Cand1ID != Cand2ID)
+        return false;
+    }
+  }
+
   return false;
 }
 
@@ -8557,18 +8731,18 @@
   // at least / at most / exactly
   unsigned mode, modeCount;
   if (NumFormalArgs < MinParams) {
-    if (MinParams != FnTy->getNumArgs() ||
-        FnTy->isVariadic() || FnTy->isTemplateVariadic())
+    if (MinParams != FnTy->getNumParams() || FnTy->isVariadic() ||
+        FnTy->isTemplateVariadic())
       mode = 0; // "at least"
     else
       mode = 2; // "exactly"
     modeCount = MinParams;
   } else {
-    if (MinParams != FnTy->getNumArgs())
+    if (MinParams != FnTy->getNumParams())
       mode = 1; // "at most"
     else
       mode = 2; // "exactly"
-    modeCount = FnTy->getNumArgs();
+    modeCount = FnTy->getNumParams();
   }
 
   std::string Description;
@@ -8817,6 +8991,15 @@
       << (unsigned) FnKind << CalleeTarget << CallerTarget;
 }
 
+void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
+  FunctionDecl *Callee = Cand->Function;
+  EnableIfAttr *Attr = static_cast<EnableIfAttr*>(Cand->DeductionFailure.Data);
+
+  S.Diag(Callee->getLocation(),
+         diag::note_ovl_candidate_disabled_by_enable_if_attr)
+      << Attr->getCond()->getSourceRange() << Attr->getMessage();
+}
+
 /// Generates a 'note' diagnostic for an overload candidate.  We've
 /// already generated a primary error at the call site.
 ///
@@ -8880,6 +9063,9 @@
 
   case ovl_fail_bad_target:
     return DiagnoseBadTarget(S, Cand);
+
+  case ovl_fail_enable_if:
+    return DiagnoseFailedEnableIfAttr(S, Cand);
   }
 }
 
@@ -9160,15 +9346,14 @@
   }
 
   // Fill in the rest of the conversions.
-  unsigned NumArgsInProto = Proto->getNumArgs();
+  unsigned NumParams = Proto->getNumParams();
   for (; ConvIdx != ConvCount; ++ConvIdx, ++ArgIdx) {
-    if (ArgIdx < NumArgsInProto) {
-      Cand->Conversions[ConvIdx]
-        = TryCopyInitialization(S, Args[ArgIdx], Proto->getArgType(ArgIdx),
-                                SuppressUserConversions,
-                                /*InOverloadResolution=*/true,
-                                /*AllowObjCWritebackConversion=*/
-                                  S.getLangOpts().ObjCAutoRefCount);
+    if (ArgIdx < NumParams) {
+      Cand->Conversions[ConvIdx] = TryCopyInitialization(
+          S, Args[ArgIdx], Proto->getParamType(ArgIdx), SuppressUserConversions,
+          /*InOverloadResolution=*/true,
+          /*AllowObjCWritebackConversion=*/
+          S.getLangOpts().ObjCAutoRefCount);
       // Store the FixIt in the candidate if it exists.
       if (!Unfixable && Cand->Conversions[ConvIdx].isBad())
         Unfixable = !Cand->TryToFixBadConversion(ConvIdx, S);
@@ -9320,7 +9505,7 @@
   for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) {
     if (Cand->Specialization)
       Cands.push_back(Cand);
-    // Otherwise, this is a non matching builtin candidate.  We do not,
+    // Otherwise, this is a non-matching builtin candidate.  We do not,
     // in general, want to list every possible builtin candidate.
   }
 
@@ -9531,7 +9716,7 @@
       // If any candidate has a placeholder return type, trigger its deduction
       // now.
       if (S.getLangOpts().CPlusPlus1y &&
-          FunDecl->getResultType()->isUndeducedType() &&
+          FunDecl->getReturnType()->isUndeducedType() &&
           S.DeduceReturnType(FunDecl, SourceExpr->getLocStart(), Complain))
         return false;
 
@@ -9836,7 +10021,7 @@
   }
 
   if (Matched && getLangOpts().CPlusPlus1y &&
-      Matched->getResultType()->isUndeducedType() &&
+      Matched->getReturnType()->isUndeducedType() &&
       DeduceReturnType(Matched, ovl->getExprLoc(), Complain))
     return 0;
 
@@ -10198,7 +10383,7 @@
   LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
                  Sema::LookupOrdinaryName);
   FunctionCallFilterCCC Validator(SemaRef, Args.size(),
-                                  ExplicitTemplateArgs != 0);
+                                  ExplicitTemplateArgs != 0, false);
   NoTypoCorrectionCCC RejectAll;
   CorrectionCandidateCallback *CCC = AllowTypoCorrection ?
       (CorrectionCandidateCallback*)&Validator :
@@ -10280,7 +10465,7 @@
     // create a type dependent CallExpr. The goal is to postpone name lookup
     // to instantiation time to be able to search into type dependent base
     // classes.
-    if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() && 
+    if (getLangOpts().MSVCCompat && CurContext->isDependentContext() &&
         (isa<FunctionDecl>(CurContext) || isa<CXXRecordDecl>(CurContext))) {
       CallExpr *CE = new (Context) CallExpr(Context, Fn, Args,
                                             Context.DependentTy, VK_RValue,
@@ -10530,7 +10715,7 @@
         return ExprError();
 
       // Determine the result type.
-      QualType ResultTy = FnDecl->getResultType();
+      QualType ResultTy = FnDecl->getReturnType();
       ExprValueKind VK = Expr::getValueKindForType(ResultTy);
       ResultTy = ResultTy.getNonLValueExprType(Context);
 
@@ -10539,8 +10724,7 @@
         new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.take(), ArgsArray,
                                           ResultTy, VK, OpLoc, false);
 
-      if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
-                              FnDecl))
+      if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl))
         return ExprError();
 
       return MaybeBindToTemporary(TheCall);
@@ -10765,7 +10949,7 @@
           return ExprError();
 
         // Determine the result type.
-        QualType ResultTy = FnDecl->getResultType();
+        QualType ResultTy = FnDecl->getReturnType();
         ExprValueKind VK = Expr::getValueKindForType(ResultTy);
         ResultTy = ResultTy.getNonLValueExprType(Context);
 
@@ -10774,7 +10958,7 @@
                                             Args, ResultTy, VK, OpLoc,
                                             FPFeatures.fp_contract);
 
-        if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
+        if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
                                 FnDecl))
           return ExprError();
 
@@ -10981,7 +11165,7 @@
           return ExprError();
 
         // Determine the result type
-        QualType ResultTy = FnDecl->getResultType();
+        QualType ResultTy = FnDecl->getReturnType();
         ExprValueKind VK = Expr::getValueKindForType(ResultTy);
         ResultTy = ResultTy.getNonLValueExprType(Context);
 
@@ -10991,8 +11175,7 @@
                                             ResultTy, VK, RLoc,
                                             false);
 
-        if (CheckCallReturnType(FnDecl->getResultType(), LLoc, TheCall,
-                                FnDecl))
+        if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl))
           return ExprError();
 
         return MaybeBindToTemporary(TheCall);
@@ -11084,7 +11267,7 @@
 
     const FunctionProtoType *proto = fnType->castAs<FunctionProtoType>();
     QualType resultType = proto->getCallResultType(Context);
-    ExprValueKind valueKind = Expr::getValueKindForType(proto->getResultType());
+    ExprValueKind valueKind = Expr::getValueKindForType(proto->getReturnType());
 
     // Check that the object type isn't more qualified than the
     // member function we're calling.
@@ -11105,13 +11288,12 @@
         << qualsString
         << (qualsString.find(' ') == std::string::npos ? 1 : 2);
     }
-              
+
     CXXMemberCallExpr *call
       = new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
                                         resultType, valueKind, RParenLoc);
 
-    if (CheckCallReturnType(proto->getResultType(),
-                            op->getRHS()->getLocStart(),
+    if (CheckCallReturnType(proto->getReturnType(), op->getRHS()->getLocStart(),
                             call, 0))
       return ExprError();
 
@@ -11250,7 +11432,7 @@
     MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens());
   }
 
-  QualType ResultType = Method->getResultType();
+  QualType ResultType = Method->getReturnType();
   ExprValueKind VK = Expr::getValueKindForType(ResultType);
   ResultType = ResultType.getNonLValueExprType(Context);
 
@@ -11260,7 +11442,7 @@
                                     ResultType, VK, RParenLoc);
 
   // Check for a valid return type.
-  if (CheckCallReturnType(Method->getResultType(), MemExpr->getMemberLoc(),
+  if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
                           TheCall, Method))
     return ExprError();
 
@@ -11489,7 +11671,7 @@
   const FunctionProtoType *Proto =
     Method->getType()->getAs<FunctionProtoType>();
 
-  unsigned NumArgsInProto = Proto->getNumArgs();
+  unsigned NumParams = Proto->getNumParams();
 
   DeclarationNameInfo OpLocInfo(
                Context.DeclarationNames.getCXXOperatorName(OO_Call), LParenLoc);
@@ -11503,13 +11685,13 @@
 
   // Build the full argument list for the method call (the implicit object
   // parameter is placed at the beginning of the list).
-  llvm::OwningArrayPtr<Expr *> MethodArgs(new Expr*[Args.size() + 1]);
+  std::unique_ptr<Expr * []> MethodArgs(new Expr *[Args.size() + 1]);
   MethodArgs[0] = Object.get();
   std::copy(Args.begin(), Args.end(), &MethodArgs[1]);
 
   // Once we've built TheCall, all of the expressions are properly
   // owned.
-  QualType ResultTy = Method->getResultType();
+  QualType ResultTy = Method->getReturnType();
   ExprValueKind VK = Expr::getValueKindForType(ResultTy);
   ResultTy = ResultTy.getNonLValueExprType(Context);
 
@@ -11519,14 +11701,13 @@
                           ResultTy, VK, RParenLoc, false);
   MethodArgs.reset();
 
-  if (CheckCallReturnType(Method->getResultType(), LParenLoc, TheCall,
-                          Method))
+  if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
     return true;
 
   // We may have default arguments. If so, we need to allocate more
   // slots in the call for them.
-  if (Args.size() < NumArgsInProto)
-    TheCall->setNumArgs(Context, NumArgsInProto + 1);
+  if (Args.size() < NumParams)
+    TheCall->setNumArgs(Context, NumParams + 1);
 
   bool IsError = false;
 
@@ -11541,7 +11722,7 @@
   TheCall->setArg(0, Object.take());
 
   // Check the argument types.
-  for (unsigned i = 0; i != NumArgsInProto; i++) {
+  for (unsigned i = 0; i != NumParams; i++) {
     Expr *Arg;
     if (i < Args.size()) {
       Arg = Args[i];
@@ -11573,7 +11754,7 @@
   // If this is a variadic call, handle args passed through "...".
   if (Proto->isVariadic()) {
     // Promote the arguments (C99 6.5.2.2p7).
-    for (unsigned i = NumArgsInProto, e = Args.size(); i < e; i++) {
+    for (unsigned i = NumParams, e = Args.size(); i < e; i++) {
       ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod, 0);
       IsError |= Arg.isInvalid();
       TheCall->setArg(i + 1, Arg.take());
@@ -11692,15 +11873,14 @@
   if (FnExpr.isInvalid())
     return ExprError();
 
-  QualType ResultTy = Method->getResultType();
+  QualType ResultTy = Method->getReturnType();
   ExprValueKind VK = Expr::getValueKindForType(ResultTy);
   ResultTy = ResultTy.getNonLValueExprType(Context);
   CXXOperatorCallExpr *TheCall =
     new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr.take(),
                                       Base, ResultTy, VK, OpLoc, false);
 
-  if (CheckCallReturnType(Method->getResultType(), OpLoc, TheCall,
-                          Method))
+  if (CheckCallReturnType(Method->getReturnType(), OpLoc, TheCall, Method))
           return ExprError();
 
   return MaybeBindToTemporary(TheCall);
@@ -11761,7 +11941,7 @@
     ConvArgs[ArgIdx] = InputInit.take();
   }
 
-  QualType ResultTy = FD->getResultType();
+  QualType ResultTy = FD->getReturnType();
   ExprValueKind VK = Expr::getValueKindForType(ResultTy);
   ResultTy = ResultTy.getNonLValueExprType(Context);
 
@@ -11770,7 +11950,7 @@
                                      llvm::makeArrayRef(ConvArgs, Args.size()),
                                      ResultTy, VK, LitEndLoc, UDSuffixLoc);
 
-  if (CheckCallReturnType(FD->getResultType(), UDSuffixLoc, UDL, FD))
+  if (CheckCallReturnType(FD->getReturnType(), UDSuffixLoc, UDL, FD))
     return ExprError();
 
   if (CheckFunctionCall(FD, UDL, NULL))
@@ -11893,7 +12073,7 @@
         // Do nothing: static member functions aren't any different
         // from non-member functions.
       } else {
-        // Fix the sub expression, which really has to be an
+        // Fix the subexpression, which really has to be an
         // UnresolvedLookupExpr holding an overloaded member function
         // or template.
         Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
diff --git a/lib/Sema/SemaPseudoObject.cpp b/lib/Sema/SemaPseudoObject.cpp
index af74f0d..de02fa9 100644
--- a/lib/Sema/SemaPseudoObject.cpp
+++ b/lib/Sema/SemaPseudoObject.cpp
@@ -235,7 +235,10 @@
     }
 
     /// Return true if assignments have a non-void result.
-    bool CanCaptureValueOfType(QualType ty) {
+    bool CanCaptureValue(Expr *exp) {
+      if (exp->isGLValue())
+        return true;
+      QualType ty = exp->getType();
       assert(!ty->isIncompleteType());
       assert(!ty->isDependentType());
 
@@ -280,10 +283,10 @@
     bool findSetter(bool warn=true);
     bool findGetter();
 
-    Expr *rebuildAndCaptureObject(Expr *syntacticBase);
-    ExprResult buildGet();
-    ExprResult buildSet(Expr *op, SourceLocation, bool);
-    ExprResult complete(Expr *SyntacticForm);
+    Expr *rebuildAndCaptureObject(Expr *syntacticBase) override;
+    ExprResult buildGet() override;
+    ExprResult buildSet(Expr *op, SourceLocation, bool) override;
+    ExprResult complete(Expr *SyntacticForm) override;
 
     bool isWeakProperty() const;
   };
@@ -311,13 +314,13 @@
                                        SourceLocation opLoc,
                                        BinaryOperatorKind opcode,
                                        Expr *LHS, Expr *RHS);
-   Expr *rebuildAndCaptureObject(Expr *syntacticBase);
-   
+   Expr *rebuildAndCaptureObject(Expr *syntacticBase) override;
+
    bool findAtIndexGetter();
    bool findAtIndexSetter();
-  
-   ExprResult buildGet();
-   ExprResult buildSet(Expr *op, SourceLocation, bool);
+
+   ExprResult buildGet() override;
+   ExprResult buildSet(Expr *op, SourceLocation, bool) override;
  };
 
  class MSPropertyOpBuilder : public PseudoOpBuilder {
@@ -328,9 +331,9 @@
      PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
      RefExpr(refExpr) {}
 
-   Expr *rebuildAndCaptureObject(Expr *);
-   ExprResult buildGet();
-   ExprResult buildSet(Expr *op, SourceLocation, bool);
+   Expr *rebuildAndCaptureObject(Expr *) override;
+   ExprResult buildGet() override;
+   ExprResult buildSet(Expr *op, SourceLocation, bool) override;
  };
 }
 
@@ -461,7 +464,7 @@
 
   // That's the postfix result.
   if (UnaryOperator::isPostfix(opcode) &&
-      (result.get()->isTypeDependent() || CanCaptureValueOfType(resultType))) {
+      (result.get()->isTypeDependent() || CanCaptureValue(result.get()))) {
     result = capture(result.take());
     setResultToLastSemantic();
   }
@@ -540,7 +543,7 @@
 
     T = Prop->getType();
   } else if (Getter) {
-    T = Getter->getResultType();
+    T = Getter->getReturnType();
   } else {
     return false;
   }
@@ -621,7 +624,7 @@
         if (ObjCPropertyDecl *prop1 = IFace->FindPropertyDeclaration(AltMember))
           if (prop != prop1 && (prop1->getSetterMethodDecl() == setter)) {
             S.Diag(RefExpr->getExprLoc(), diag::error_property_setter_ambiguous_use)
-              << prop->getName() << prop1->getName() << setter->getSelector();
+              << prop << prop1 << setter->getSelector();
             S.Diag(prop->getLocation(), diag::note_property_declare);
             S.Diag(prop1->getLocation(), diag::note_property_declare);
           }
@@ -762,7 +765,7 @@
     ObjCMessageExpr *msgExpr =
       cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
     Expr *arg = msgExpr->getArg(0);
-    if (CanCaptureValueOfType(arg->getType()))
+    if (CanCaptureValue(arg))
       msgExpr->setArg(0, captureValueAsResult(arg));
   }
 
@@ -813,7 +816,7 @@
   assert(Getter && "property has no setter and no getter!");
 
   // Only do this if the getter returns an l-value reference type.
-  QualType resultType = Getter->getResultType();
+  QualType resultType = Getter->getReturnType();
   if (!resultType->isLValueReferenceType()) return false;
 
   result = buildRValueOperation(op);
@@ -1011,8 +1014,6 @@
   
   // Look for a conversion to an integral, enumeration type, or
   // objective-C pointer type.
-  UnresolvedSet<4> ViableConversions;
-  UnresolvedSet<4> ExplicitConversions;
   std::pair<CXXRecordDecl::conversion_iterator,
             CXXRecordDecl::conversion_iterator> Conversions
     = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
@@ -1172,7 +1173,7 @@
              diag::note_parameter_type) << T;
       return false;
     }
-    QualType R = AtIndexGetter->getResultType();
+    QualType R = AtIndexGetter->getReturnType();
     if (!R->isObjCObjectPointerType()) {
       S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
              diag::err_objc_indexing_method_result_type) << R << arrayRef;
@@ -1239,18 +1240,15 @@
                          BaseT->isObjCQualifiedIdType());
 
   if (!AtIndexSetter && S.getLangOpts().DebuggerObjCLiteral) {
-    TypeSourceInfo *ResultTInfo = 0;
+    TypeSourceInfo *ReturnTInfo = 0;
     QualType ReturnType = S.Context.VoidTy;
-    AtIndexSetter = ObjCMethodDecl::Create(S.Context, SourceLocation(),
-                           SourceLocation(), AtIndexSetterSelector,
-                           ReturnType,
-                           ResultTInfo,
-                           S.Context.getTranslationUnitDecl(),
-                           true /*Instance*/, false/*isVariadic*/,
-                           /*isPropertyAccessor=*/false,
-                           /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
-                           ObjCMethodDecl::Required,
-                           false); 
+    AtIndexSetter = ObjCMethodDecl::Create(
+        S.Context, SourceLocation(), SourceLocation(), AtIndexSetterSelector,
+        ReturnType, ReturnTInfo, S.Context.getTranslationUnitDecl(),
+        true /*Instance*/, false /*isVariadic*/,
+        /*isPropertyAccessor=*/false,
+        /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+        ObjCMethodDecl::Required, false);
     SmallVector<ParmVarDecl *, 2> Params;
     ParmVarDecl *object = ParmVarDecl::Create(S.Context, AtIndexSetter,
                                                 SourceLocation(), SourceLocation(),
@@ -1373,7 +1371,7 @@
     ObjCMessageExpr *msgExpr =
       cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
     Expr *arg = msgExpr->getArg(0);
-    if (CanCaptureValueOfType(arg->getType()))
+    if (CanCaptureValue(arg))
       msgExpr->setArg(0, captureValueAsResult(arg));
   }
   
@@ -1395,8 +1393,8 @@
 
 ExprResult MSPropertyOpBuilder::buildGet() {
   if (!RefExpr->getPropertyDecl()->hasGetter()) {
-    S.Diag(RefExpr->getMemberLoc(), diag::err_no_getter_for_property)
-      << RefExpr->getPropertyDecl()->getName();
+    S.Diag(RefExpr->getMemberLoc(), diag::err_no_accessor_for_property)
+      << 0 /* getter */ << RefExpr->getPropertyDecl();
     return ExprError();
   }
 
@@ -1410,8 +1408,9 @@
     RefExpr->isArrow() ? tok::arrow : tok::period, SS, SourceLocation(),
     GetterName, 0, true);
   if (GetterExpr.isInvalid()) {
-    S.Diag(RefExpr->getMemberLoc(), diag::error_cannot_find_suitable_getter)
-      << RefExpr->getPropertyDecl()->getName();
+    S.Diag(RefExpr->getMemberLoc(),
+           diag::error_cannot_find_suitable_accessor) << 0 /* getter */
+      << RefExpr->getPropertyDecl();
     return ExprError();
   }
 
@@ -1424,8 +1423,8 @@
 ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
                                          bool captureSetValueAsResult) {
   if (!RefExpr->getPropertyDecl()->hasSetter()) {
-    S.Diag(RefExpr->getMemberLoc(), diag::err_no_setter_for_property)
-      << RefExpr->getPropertyDecl()->getName();
+    S.Diag(RefExpr->getMemberLoc(), diag::err_no_accessor_for_property)
+      << 1 /* setter */ << RefExpr->getPropertyDecl();
     return ExprError();
   }
 
@@ -1439,8 +1438,9 @@
     RefExpr->isArrow() ? tok::arrow : tok::period, SS, SourceLocation(),
     SetterName, 0, true);
   if (SetterExpr.isInvalid()) {
-    S.Diag(RefExpr->getMemberLoc(), diag::error_cannot_find_suitable_setter)
-      << RefExpr->getPropertyDecl()->getName();
+    S.Diag(RefExpr->getMemberLoc(),
+           diag::error_cannot_find_suitable_accessor) << 1 /* setter */
+      << RefExpr->getPropertyDecl();
     return ExprError();
   }
 
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index 9bd8678..2be5d5c 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -114,25 +114,38 @@
   }
 }
 
-/// \brief Diagnose unused '==' and '!=' as likely typos for '=' or '|='.
+/// \brief Diagnose unused comparisons, both builtin and overloaded operators.
+/// For '==' and '!=', suggest fixits for '=' or '|='.
 ///
 /// Adding a cast to void (or other expression wrappers) will prevent the
 /// warning from firing.
 static bool DiagnoseUnusedComparison(Sema &S, const Expr *E) {
   SourceLocation Loc;
-  bool IsNotEqual, CanAssign;
+  bool IsNotEqual, CanAssign, IsRelational;
 
   if (const BinaryOperator *Op = dyn_cast<BinaryOperator>(E)) {
-    if (Op->getOpcode() != BO_EQ && Op->getOpcode() != BO_NE)
+    if (!Op->isComparisonOp())
       return false;
 
+    IsRelational = Op->isRelationalOp();
     Loc = Op->getOperatorLoc();
     IsNotEqual = Op->getOpcode() == BO_NE;
     CanAssign = Op->getLHS()->IgnoreParenImpCasts()->isLValue();
   } else if (const CXXOperatorCallExpr *Op = dyn_cast<CXXOperatorCallExpr>(E)) {
-    if (Op->getOperator() != OO_EqualEqual &&
-        Op->getOperator() != OO_ExclaimEqual)
+    switch (Op->getOperator()) {
+    default:
       return false;
+    case OO_EqualEqual:
+    case OO_ExclaimEqual:
+      IsRelational = false;
+      break;
+    case OO_Less:
+    case OO_Greater:
+    case OO_GreaterEqual:
+    case OO_LessEqual:
+      IsRelational = true;
+      break;
+    }
 
     Loc = Op->getOperatorLoc();
     IsNotEqual = Op->getOperator() == OO_ExclaimEqual;
@@ -148,11 +161,11 @@
     return false;
 
   S.Diag(Loc, diag::warn_unused_comparison)
-    << (unsigned)IsNotEqual << E->getSourceRange();
+    << (unsigned)IsRelational << (unsigned)IsNotEqual << E->getSourceRange();
 
   // If the LHS is a plausible entity to assign to, provide a fixit hint to
   // correct common typos.
-  if (CanAssign) {
+  if (!IsRelational && CanAssign) {
     if (IsNotEqual)
       S.Diag(Loc, diag::note_inequality_comparison_to_or_assign)
         << FixItHint::CreateReplacement(Loc, "|=");
@@ -216,17 +229,17 @@
     // is written in a macro body, only warn if it has the warn_unused_result
     // attribute.
     if (const Decl *FD = CE->getCalleeDecl()) {
-      if (FD->getAttr<WarnUnusedResultAttr>()) {
+      if (FD->hasAttr<WarnUnusedResultAttr>()) {
         Diag(Loc, diag::warn_unused_result) << R1 << R2;
         return;
       }
       if (ShouldSuppress)
         return;
-      if (FD->getAttr<PureAttr>()) {
+      if (FD->hasAttr<PureAttr>()) {
         Diag(Loc, diag::warn_unused_call) << R1 << R2 << "pure";
         return;
       }
-      if (FD->getAttr<ConstAttr>()) {
+      if (FD->hasAttr<ConstAttr>()) {
         Diag(Loc, diag::warn_unused_call) << R1 << R2 << "const";
         return;
       }
@@ -240,7 +253,7 @@
       return;
     }
     const ObjCMethodDecl *MD = ME->getMethodDecl();
-    if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
+    if (MD && MD->hasAttr<WarnUnusedResultAttr>()) {
       Diag(Loc, diag::warn_unused_result) << R1 << R2;
       return;
     }
@@ -499,7 +512,6 @@
     // We don't diagnose this overflow, because it is implementation-defined
     // behavior.
     // FIXME: Introduce a second, default-ignored warning for this case?
-    llvm::APSInt OldVal(Val);
     Val.setIsSigned(NewSign);
   }
 }
@@ -589,41 +601,41 @@
         : ICEConvertDiagnoser(/*AllowScopedEnumerations*/true, false, true),
           Cond(Cond) {}
 
-    virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
-                                                 QualType T) {
+    SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+                                         QualType T) override {
       return S.Diag(Loc, diag::err_typecheck_statement_requires_integer) << T;
     }
 
-    virtual SemaDiagnosticBuilder diagnoseIncomplete(
-        Sema &S, SourceLocation Loc, QualType T) {
+    SemaDiagnosticBuilder diagnoseIncomplete(
+        Sema &S, SourceLocation Loc, QualType T) override {
       return S.Diag(Loc, diag::err_switch_incomplete_class_type)
                << T << Cond->getSourceRange();
     }
 
-    virtual SemaDiagnosticBuilder diagnoseExplicitConv(
-        Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) {
+    SemaDiagnosticBuilder diagnoseExplicitConv(
+        Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
       return S.Diag(Loc, diag::err_switch_explicit_conversion) << T << ConvTy;
     }
 
-    virtual SemaDiagnosticBuilder noteExplicitConv(
-        Sema &S, CXXConversionDecl *Conv, QualType ConvTy) {
+    SemaDiagnosticBuilder noteExplicitConv(
+        Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
       return S.Diag(Conv->getLocation(), diag::note_switch_conversion)
         << ConvTy->isEnumeralType() << ConvTy;
     }
 
-    virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
-                                                    QualType T) {
+    SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+                                            QualType T) override {
       return S.Diag(Loc, diag::err_switch_multiple_conversions) << T;
     }
 
-    virtual SemaDiagnosticBuilder noteAmbiguous(
-        Sema &S, CXXConversionDecl *Conv, QualType ConvTy) {
+    SemaDiagnosticBuilder noteAmbiguous(
+        Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
       return S.Diag(Conv->getLocation(), diag::note_switch_conversion)
       << ConvTy->isEnumeralType() << ConvTy;
     }
 
-    virtual SemaDiagnosticBuilder diagnoseConversion(
-        Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) {
+    SemaDiagnosticBuilder diagnoseConversion(
+        Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
       llvm_unreachable("conversion functions are permitted");
     }
   } SwitchDiagnoser(Cond);
@@ -660,6 +672,29 @@
   Val.setIsSigned(IsSigned);
 }
 
+/// Returns true if we should emit a diagnostic about this case expression not
+/// being a part of the enum used in the switch controlling expression.
+static bool ShouldDiagnoseSwitchCaseNotInEnum(const ASTContext &Ctx,
+                                              const EnumDecl *ED,
+                                              const Expr *CaseExpr) {
+  // Don't warn if the 'case' expression refers to a static const variable of
+  // the enum type.
+  CaseExpr = CaseExpr->IgnoreParenImpCasts();
+  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseExpr)) {
+    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+      if (!VD->hasGlobalStorage())
+        return true;
+      QualType VarType = VD->getType();
+      if (!VarType.isConstQualified())
+        return true;
+      QualType EnumType = Ctx.getTypeDeclType(ED);
+      if (Ctx.hasSameUnqualifiedType(EnumType, VarType))
+        return false;
+    }
+  }
+  return true;
+}
+
 StmtResult
 Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
                             Stmt *BodyStmt) {
@@ -993,11 +1028,10 @@
 
       // Gather all enum values, set their type and sort them,
       // allowing easier comparison with CaseVals.
-      for (EnumDecl::enumerator_iterator EDI = ED->enumerator_begin();
-           EDI != ED->enumerator_end(); ++EDI) {
+      for (auto *EDI : ED->enumerators()) {
         llvm::APSInt Val = EDI->getInitVal();
         AdjustAPSInt(Val, CondWidth, CondIsSigned);
-        EnumVals.push_back(std::make_pair(Val, *EDI));
+        EnumVals.push_back(std::make_pair(Val, EDI));
       }
       std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
       EnumValsTy::iterator EIend =
@@ -1009,9 +1043,12 @@
            CI != CaseVals.end(); CI++) {
         while (EI != EIend && EI->first < CI->first)
           EI++;
-        if (EI == EIend || EI->first > CI->first)
-          Diag(CI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
-            << CondTypeBeforePromotion;
+        if (EI == EIend || EI->first > CI->first) {
+          Expr *CaseExpr = CI->second->getLHS();
+          if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr))
+            Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+              << CondTypeBeforePromotion;
+        }
       }
       // See which of case ranges aren't in enum
       EI = EnumVals.begin();
@@ -1021,8 +1058,10 @@
           EI++;
 
         if (EI == EIend || EI->first != RI->first) {
-          Diag(RI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
-            << CondTypeBeforePromotion;
+          Expr *CaseExpr = RI->second->getLHS();
+          if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr))
+            Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+              << CondTypeBeforePromotion;
         }
 
         llvm::APSInt Hi =
@@ -1030,9 +1069,12 @@
         AdjustAPSInt(Hi, CondWidth, CondIsSigned);
         while (EI != EIend && EI->first < Hi)
           EI++;
-        if (EI == EIend || EI->first != Hi)
-          Diag(RI->second->getRHS()->getExprLoc(), diag::warn_not_in_enum)
-            << CondTypeBeforePromotion;
+        if (EI == EIend || EI->first != Hi) {
+          Expr *CaseExpr = RI->second->getRHS();
+          if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr))
+            Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+              << CondTypeBeforePromotion;
+        }
       }
 
       // Check which enum vals aren't in switch
@@ -1044,7 +1086,6 @@
 
       for (EI = EnumVals.begin(); EI != EIend; EI++){
         // Drop unneeded case values
-        llvm::APSInt CIVal;
         while (CI != CaseVals.end() && CI->first < EI->first)
           CI++;
 
@@ -1120,7 +1161,7 @@
     return;
 
   if (const EnumType *ET = DstType->getAs<EnumType>())
-    if (!Context.hasSameType(SrcType, DstType) &&
+    if (!Context.hasSameUnqualifiedType(SrcType, DstType) &&
         SrcType->isIntegerType()) {
       if (!SrcExpr->isTypeDependent() && !SrcExpr->isValueDependent() &&
           SrcExpr->isIntegerConstantExpr(Context)) {
@@ -1137,11 +1178,10 @@
 
         // Gather all enum values, set their type and sort them,
         // allowing easier comparison with rhs constant.
-        for (EnumDecl::enumerator_iterator EDI = ED->enumerator_begin();
-             EDI != ED->enumerator_end(); ++EDI) {
+        for (auto *EDI : ED->enumerators()) {
           llvm::APSInt Val = EDI->getInitVal();
           AdjustAPSInt(Val, DstWidth, DstIsSigned);
-          EnumVals.push_back(std::make_pair(Val, *EDI));
+          EnumVals.push_back(std::make_pair(Val, EDI));
         }
         if (EnumVals.empty())
           return;
@@ -1155,7 +1195,7 @@
           EI++;
         if (EI == EIend || EI->first != RhsVal) {
           Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment)
-          << DstType;
+              << DstType.getUnqualifiedType();
         }
       }
     }
@@ -1176,6 +1216,7 @@
   Expr *ConditionExpr = CondResult.take();
   if (!ConditionExpr)
     return StmtError();
+  CheckBreakContinueBinding(ConditionExpr);
 
   DiagnoseUnusedExprResult(Body);
 
@@ -1192,6 +1233,7 @@
                   Expr *Cond, SourceLocation CondRParen) {
   assert(Cond && "ActOnDoStmt(): missing expression");
 
+  CheckBreakContinueBinding(Cond);
   ExprResult CondResult = CheckBooleanCondition(Cond, DoLoc);
   if (CondResult.isInvalid())
     return StmtError();
@@ -1454,25 +1496,33 @@
     return false;
   }
 
-  // A visitor to determine if a continue statement is a subexpression.
-  class ContinueFinder : public EvaluatedExprVisitor<ContinueFinder> {
-    bool Found;
+  // A visitor to determine if a continue or break statement is a
+  // subexpression.
+  class BreakContinueFinder : public EvaluatedExprVisitor<BreakContinueFinder> {
+    SourceLocation BreakLoc;
+    SourceLocation ContinueLoc;
   public:
-    ContinueFinder(Sema &S, Stmt* Body) :
-        Inherited(S.Context),
-        Found(false) {
+    BreakContinueFinder(Sema &S, Stmt* Body) :
+        Inherited(S.Context) {
       Visit(Body);
     }
 
-    typedef EvaluatedExprVisitor<ContinueFinder> Inherited;
+    typedef EvaluatedExprVisitor<BreakContinueFinder> Inherited;
 
     void VisitContinueStmt(ContinueStmt* E) {
-      Found = true;
+      ContinueLoc = E->getContinueLoc();
     }
 
-    bool ContinueFound() { return Found; }
+    void VisitBreakStmt(BreakStmt* E) {
+      BreakLoc = E->getBreakLoc();
+    }
 
-  };  // end class ContinueFinder
+    bool ContinueFound() { return ContinueLoc.isValid(); }
+    bool BreakFound() { return BreakLoc.isValid(); }
+    SourceLocation GetContinueLoc() { return ContinueLoc; }
+    SourceLocation GetBreakLoc() { return BreakLoc; }
+
+  };  // end class BreakContinueFinder
 
   // Emit a warning when a loop increment/decrement appears twice per loop
   // iteration.  The conditions which trigger this warning are:
@@ -1501,11 +1551,11 @@
     if (!ProcessIterationStmt(S, LastStmt, LastIncrement, LastDRE)) return;
 
     // Check that the two statements are both increments or both decrements
-    // on the same varaible.
+    // on the same variable.
     if (LoopIncrement != LastIncrement ||
         LoopDRE->getDecl() != LastDRE->getDecl()) return;
 
-    if (ContinueFinder(S, Body).ContinueFound()) return;
+    if (BreakContinueFinder(S, Body).ContinueFound()) return;
 
     S.Diag(LastDRE->getLocation(), diag::warn_redundant_loop_iteration)
          << LastDRE->getDecl() << LastIncrement;
@@ -1515,6 +1565,25 @@
 
 } // end namespace
 
+
+void Sema::CheckBreakContinueBinding(Expr *E) {
+  if (!E || getLangOpts().CPlusPlus)
+    return;
+  BreakContinueFinder BCFinder(*this, E);
+  Scope *BreakParent = CurScope->getBreakParent();
+  if (BCFinder.BreakFound() && BreakParent) {
+    if (BreakParent->getFlags() & Scope::SwitchScope) {
+      Diag(BCFinder.GetBreakLoc(), diag::warn_break_binds_to_switch);
+    } else {
+      Diag(BCFinder.GetBreakLoc(), diag::warn_loop_ctrl_binds_to_inner)
+          << "break";
+    }
+  } else if (BCFinder.ContinueFound() && CurScope->getContinueParent()) {
+    Diag(BCFinder.GetContinueLoc(), diag::warn_loop_ctrl_binds_to_inner)
+        << "continue";
+  }
+}
+
 StmtResult
 Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
                    Stmt *First, FullExprArg second, Decl *secondVar,
@@ -1525,19 +1594,21 @@
       // C99 6.8.5p3: The declaration part of a 'for' statement shall only
       // declare identifiers for objects having storage class 'auto' or
       // 'register'.
-      for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
-           DI!=DE; ++DI) {
-        VarDecl *VD = dyn_cast<VarDecl>(*DI);
+      for (auto *DI : DS->decls()) {
+        VarDecl *VD = dyn_cast<VarDecl>(DI);
         if (VD && VD->isLocalVarDecl() && !VD->hasLocalStorage())
           VD = 0;
         if (VD == 0) {
-          Diag((*DI)->getLocation(), diag::err_non_local_variable_decl_in_for);
-          (*DI)->setInvalidDecl();
+          Diag(DI->getLocation(), diag::err_non_local_variable_decl_in_for);
+          DI->setInvalidDecl();
         }
       }
     }
   }
 
+  CheckBreakContinueBinding(second.get());
+  CheckBreakContinueBinding(third.get());
+
   CheckForLoopConditionalStatement(*this, second.get(), third.get(), Body);
   CheckForRedundantIteration(*this, third.get(), Body);
 
@@ -2499,7 +2570,7 @@
 static bool hasDeducedReturnType(FunctionDecl *FD) {
   const FunctionProtoType *FPT =
       FD->getTypeSourceInfo()->getType()->castAs<FunctionProtoType>();
-  return FPT->getResultType()->isUndeducedType();
+  return FPT->getReturnType()->isUndeducedType();
 }
 
 /// ActOnCapScopeReturnStmt - Utility routine to type-check return statements
@@ -2518,7 +2589,7 @@
     // FIXME: Blocks might have a return type of 'auto' explicitly specified.
     FunctionDecl *FD = CurLambda->CallOperator;
     if (CurCap->ReturnType.isNull())
-      CurCap->ReturnType = FD->getResultType();
+      CurCap->ReturnType = FD->getReturnType();
 
     AutoType *AT = CurCap->ReturnType->getContainedAutoType();
     assert(AT && "lost auto type from lambda return type");
@@ -2526,7 +2597,7 @@
       FD->setInvalidDecl();
       return StmtError();
     }
-    CurCap->ReturnType = FnRetType = FD->getResultType();
+    CurCap->ReturnType = FnRetType = FD->getReturnType();
   } else if (CurCap->HasImplicitReturnType) {
     // For blocks/lambdas with implicit return types, we check each return
     // statement individually, and deduce the common return type when the block
@@ -2621,7 +2692,7 @@
       return StmtError();
     }
     RetValExp = Res.take();
-    CheckReturnStackAddr(RetValExp, FnRetType, ReturnLoc);
+    CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc);
   }
 
   if (RetValExp) {
@@ -2651,7 +2722,7 @@
                                             Expr *&RetExpr,
                                             AutoType *AT) {
   TypeLoc OrigResultType = FD->getTypeSourceInfo()->getTypeLoc().
-    IgnoreParens().castAs<FunctionProtoTypeLoc>().getResultLoc();
+    IgnoreParens().castAs<FunctionProtoTypeLoc>().getReturnLoc();
   QualType Deduced;
 
   if (RetExpr && isa<InitListExpr>(RetExpr)) {
@@ -2745,13 +2816,21 @@
 
   QualType FnRetType;
   QualType RelatedRetType;
+  const AttrVec *Attrs = 0;
+  bool isObjCMethod = false;
+
   if (const FunctionDecl *FD = getCurFunctionDecl()) {
-    FnRetType = FD->getResultType();
+    FnRetType = FD->getReturnType();
+    if (FD->hasAttrs())
+      Attrs = &FD->getAttrs();
     if (FD->isNoReturn())
       Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr)
         << FD->getDeclName();
   } else if (ObjCMethodDecl *MD = getCurMethodDecl()) {
-    FnRetType = MD->getResultType();
+    FnRetType = MD->getReturnType();
+    isObjCMethod = true;
+    if (MD->hasAttrs())
+      Attrs = &MD->getAttrs();
     if (MD->hasRelatedResultType() && MD->getClassInterface()) {
       // In the implementation of a method with a related return type, the
       // type used to type-check the validity of return statements within the
@@ -2771,7 +2850,7 @@
         FD->setInvalidDecl();
         return StmtError();
       } else {
-        FnRetType = FD->getResultType();
+        FnRetType = FD->getReturnType();
       }
     }
   }
@@ -2803,8 +2882,14 @@
       } else if (!RetValExp->isTypeDependent()) {
         // C99 6.8.6.4p1 (ext_ since GCC warns)
         unsigned D = diag::ext_return_has_expr;
-        if (RetValExp->getType()->isVoidType())
-          D = diag::ext_return_has_void_expr;
+        if (RetValExp->getType()->isVoidType()) {
+          NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+          if (isa<CXXConstructorDecl>(CurDecl) ||
+              isa<CXXDestructorDecl>(CurDecl))
+            D = diag::err_ctor_dtor_returns_void;
+          else
+            D = diag::ext_return_has_void_expr;
+        }
         else {
           ExprResult Result = Owned(RetValExp);
           Result = IgnoredValueConversions(Result.take());
@@ -2814,9 +2899,15 @@
           RetValExp = ImpCastExprToType(RetValExp,
                                         Context.VoidTy, CK_ToVoid).take();
         }
-
+        // return of void in constructor/destructor is illegal in C++.
+        if (D == diag::err_ctor_dtor_returns_void) {
+          NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+          Diag(ReturnLoc, D)
+            << CurDecl->getDeclName() << isa<CXXDestructorDecl>(CurDecl)
+            << RetValExp->getSourceRange();
+        }
         // return (some void expression); is legal in C++.
-        if (D != diag::ext_return_has_void_expr ||
+        else if (D != diag::ext_return_has_void_expr ||
             !getLangOpts().CPlusPlus) {
           NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
 
@@ -2894,7 +2985,8 @@
         RetValExp = Res.takeAs<Expr>();
       }
 
-      CheckReturnStackAddr(RetValExp, FnRetType, ReturnLoc);
+      CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc, isObjCMethod, Attrs,
+                         getCurFunctionDecl());
     }
 
     if (RetValExp) {
@@ -3269,10 +3361,7 @@
   RecordDecl *Record = RSI->TheRecordDecl;
   Record->setInvalidDecl();
 
-  SmallVector<Decl*, 4> Fields;
-  for (RecordDecl::field_iterator I = Record->field_begin(),
-                                  E = Record->field_end(); I != E; ++I)
-    Fields.push_back(*I);
+  SmallVector<Decl*, 4> Fields(Record->fields());
   ActOnFields(/*Scope=*/0, Record->getLocation(), Record, Fields,
               SourceLocation(), SourceLocation(), /*AttributeList=*/0);
 
diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp
index eb0188a..9bb191d 100644
--- a/lib/Sema/SemaStmtAttr.cpp
+++ b/lib/Sema/SemaStmtAttr.cpp
@@ -12,7 +12,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Sema/SemaInternal.h"
-#include "TargetAttributesSema.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Lex/Lexer.h"
@@ -41,7 +40,8 @@
     S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_outside_switch);
     return 0;
   }
-  return ::new (S.Context) FallThroughAttr(A.getRange(), S.Context);
+  return ::new (S.Context) FallThroughAttr(A.getRange(), S.Context,
+                                           A.getAttributeSpellingListIndex());
 }
 
 
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 28603da..9be7189 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -10,8 +10,8 @@
 //===----------------------------------------------------------------------===/
 
 #include "TreeTransform.h"
-#include "clang/AST/ASTContext.h"
 #include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
 #include "clang/AST/DeclFriend.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/AST/Expr.h"
@@ -20,6 +20,7 @@
 #include "clang/AST/TypeVisitor.h"
 #include "clang/Basic/LangOptions.h"
 #include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
 #include "clang/Sema/DeclSpec.h"
 #include "clang/Sema/Lookup.h"
 #include "clang/Sema/ParsedTemplate.h"
@@ -193,8 +194,7 @@
     TemplateDecl *TD = cast<TemplateDecl>((*R.begin())->getUnderlyingDecl());
 
     if (SS.isSet() && !SS.isInvalid()) {
-      NestedNameSpecifier *Qualifier
-        = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+      NestedNameSpecifier *Qualifier = SS.getScopeRep();
       Template = Context.getQualifiedTemplateName(Qualifier,
                                                   hasTemplateKeyword, TD);
     } else {
@@ -968,7 +968,8 @@
           PrevDecl = (*Previous.begin())->getUnderlyingDecl();
       }
     }
-  } else if (PrevDecl && !isDeclInScope(PrevDecl, SemanticContext, S))
+  } else if (PrevDecl &&
+             !isDeclInScope(PrevDecl, SemanticContext, S, SS.isValid()))
     PrevDecl = PrevClassTemplate = 0;
 
   if (PrevClassTemplate) {
@@ -1487,6 +1488,9 @@
 
   unsigned Depth;
   bool Match;
+  SourceLocation MatchLoc;
+
+  DependencyChecker(unsigned Depth) : Depth(Depth), Match(false) {}
 
   DependencyChecker(TemplateParameterList *Params) : Match(false) {
     NamedDecl *ND = Params->getParam(0);
@@ -1500,14 +1504,19 @@
     }
   }
 
-  bool Matches(unsigned ParmDepth) {
+  bool Matches(unsigned ParmDepth, SourceLocation Loc = SourceLocation()) {
     if (ParmDepth >= Depth) {
       Match = true;
+      MatchLoc = Loc;
       return true;
     }
     return false;
   }
 
+  bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+    return !Matches(TL.getTypePtr()->getDepth(), TL.getNameLoc());
+  }
+
   bool VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
     return !Matches(T->getDepth());
   }
@@ -1515,21 +1524,28 @@
   bool TraverseTemplateName(TemplateName N) {
     if (TemplateTemplateParmDecl *PD =
           dyn_cast_or_null<TemplateTemplateParmDecl>(N.getAsTemplateDecl()))
-      if (Matches(PD->getDepth())) return false;
+      if (Matches(PD->getDepth()))
+        return false;
     return super::TraverseTemplateName(N);
   }
 
   bool VisitDeclRefExpr(DeclRefExpr *E) {
     if (NonTypeTemplateParmDecl *PD =
-          dyn_cast<NonTypeTemplateParmDecl>(E->getDecl())) {
-      if (PD->getDepth() == Depth) {
-        Match = true;
+          dyn_cast<NonTypeTemplateParmDecl>(E->getDecl()))
+      if (Matches(PD->getDepth(), E->getExprLoc()))
         return false;
-      }
-    }
     return super::VisitDeclRefExpr(E);
   }
-  
+
+  bool VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+    return TraverseType(T->getReplacementType());
+  }
+
+  bool
+  VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T) {
+    return TraverseTemplateArgument(T->getArgumentPack());
+  }
+
   bool TraverseInjectedClassNameType(const InjectedClassNameType *T) {
     return TraverseType(T->getInjectedSpecializationType());
   }
@@ -1959,7 +1975,8 @@
                                                           TemplateArgs);
 
   TemplateDecl *Template = Name.getAsTemplateDecl();
-  if (!Template || isa<FunctionTemplateDecl>(Template)) {
+  if (!Template || isa<FunctionTemplateDecl>(Template) ||
+      isa<VarTemplateDecl>(Template)) {
     // We might have a substituted template template parameter pack. If so,
     // build a template specialization type for it.
     if (Name.getAsSubstTemplateTemplateParmPack())
@@ -1974,17 +1991,15 @@
   // Check that the template argument list is well-formed for this
   // template.
   SmallVector<TemplateArgument, 4> Converted;
-  bool ExpansionIntoFixedList = false;
   if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs,
-                                false, Converted, &ExpansionIntoFixedList))
+                                false, Converted))
     return QualType();
 
   QualType CanonType;
 
   bool InstantiationDependent = false;
-  TypeAliasTemplateDecl *AliasTemplate = 0;
-  if (!ExpansionIntoFixedList &&
-      (AliasTemplate = dyn_cast<TypeAliasTemplateDecl>(Template))) {
+  if (TypeAliasTemplateDecl *AliasTemplate =
+          dyn_cast<TypeAliasTemplateDecl>(Template)) {
     // Find the canonical type for this type alias template specialization.
     TypeAliasDecl *Pattern = AliasTemplate->getTemplatedDecl();
     if (Pattern->isInvalidDecl())
@@ -2267,8 +2282,8 @@
 }
 
 static bool CheckTemplatePartialSpecializationArgs(
-    Sema &S, TemplateParameterList *TemplateParams,
-    SmallVectorImpl<TemplateArgument> &TemplateArgs);
+    Sema &S, SourceLocation NameLoc, TemplateParameterList *TemplateParams,
+    unsigned ExplicitArgs, SmallVectorImpl<TemplateArgument> &TemplateArgs);
 
 static bool CheckTemplateSpecializationScope(Sema &S, NamedDecl *Specialized,
                                              NamedDecl *PrevDecl,
@@ -2341,11 +2356,9 @@
 }
 
 DeclResult Sema::ActOnVarTemplateSpecialization(
-    Scope *S, VarTemplateDecl *VarTemplate, Declarator &D, TypeSourceInfo *DI,
-    SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
-    VarDecl::StorageClass SC, bool IsPartialSpecialization) {
-  assert(VarTemplate && "A variable template id without template?");
-
+    Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc,
+    TemplateParameterList *TemplateParams, VarDecl::StorageClass SC,
+    bool IsPartialSpecialization) {
   // D must be variable template id.
   assert(D.getName().getKind() == UnqualifiedId::IK_TemplateId &&
          "Variable template specialization is declared with a template it.");
@@ -2358,7 +2371,14 @@
                                      TemplateId->NumArgs);
   TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
   translateTemplateArguments(TemplateArgsPtr, TemplateArgs);
-  TemplateName Name(VarTemplate);
+  TemplateName Name = TemplateId->Template.get();
+
+  // The template-id must name a variable template.
+  VarTemplateDecl *VarTemplate =
+      dyn_cast<VarTemplateDecl>(Name.getAsTemplateDecl());
+  if (!VarTemplate)
+    return Diag(D.getIdentifierLoc(), diag::err_var_spec_no_template)
+             << IsPartialSpecialization;
 
   // Check for unexpanded parameter packs in any of the template arguments.
   for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
@@ -2396,7 +2416,8 @@
   // corresponds to these arguments.
   if (IsPartialSpecialization) {
     if (CheckTemplatePartialSpecializationArgs(
-            *this, VarTemplate->getTemplateParameters(), Converted))
+            *this, TemplateNameLoc, VarTemplate->getTemplateParameters(),
+            TemplateArgs.size(), Converted))
       return true;
 
     bool InstantiationDependent;
@@ -2494,7 +2515,7 @@
                 << Param->getDeclName();
           else
             Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter)
-                << "<anonymous>";
+                << "(anonymous)";
         }
       }
     }
@@ -2589,11 +2610,10 @@
 
   // Check that the template argument list is well-formed for this template.
   SmallVector<TemplateArgument, 4> Converted;
-  bool ExpansionIntoFixedList = false;
   if (CheckTemplateArgumentList(
           Template, TemplateNameLoc,
           const_cast<TemplateArgumentListInfo &>(TemplateArgs), false,
-          Converted, &ExpansionIntoFixedList))
+          Converted))
     return true;
 
   // Find the variable template specialization declaration that
@@ -2652,11 +2672,6 @@
       }
     }
 
-    // If we're dealing with a member template where the template parameters
-    // have been instantiated, this provides the original template parameters
-    // from which the member template's parameters were instantiated.
-    SmallVector<const NamedDecl *, 4> InstantiatedTemplateParameters;
-
     if (Matched.size() >= 1) {
       SmallVector<MatchResult, 4>::iterator Best = Matched.begin();
       if (Matched.size() == 1) {
@@ -2779,10 +2794,13 @@
   assert(!R.isAmbiguous() && "ambiguous lookup when building templateid");
 
   // In C++1y, check variable template ids.
-  if (R.getAsSingle<VarTemplateDecl>()) {
-    return Owned(CheckVarTemplateId(SS, R.getLookupNameInfo(),
-                                    R.getAsSingle<VarTemplateDecl>(),
-                                    TemplateKWLoc, TemplateArgs));
+  bool InstantiationDependent;
+  if (R.getAsSingle<VarTemplateDecl>() &&
+      !TemplateSpecializationType::anyDependentTemplateArguments(
+           *TemplateArgs, InstantiationDependent)) {
+    return CheckVarTemplateId(SS, R.getLookupNameInfo(),
+                              R.getAsSingle<VarTemplateDecl>(),
+                              TemplateKWLoc, TemplateArgs);
   }
 
   // We don't want lookup warnings at this point.
@@ -2829,7 +2847,7 @@
 
   if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>()) {
     Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_class_template)
-      << (NestedNameSpecifier*) SS.getScopeRep()
+      << SS.getScopeRep()
       << NameInfo.getName() << SS.getRange();
     Diag(Temp->getLocation(), diag::note_referenced_class_template);
     return ExprError();
@@ -2903,8 +2921,7 @@
     }
   }
 
-  NestedNameSpecifier *Qualifier
-    = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+  NestedNameSpecifier *Qualifier = SS.getScopeRep();
 
   switch (Name.getKind()) {
   case UnqualifiedId::IK_Identifier:
@@ -2915,11 +2932,10 @@
   case UnqualifiedId::IK_OperatorFunctionId:
     Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
                                              Name.OperatorFunctionId.Operator));
-    return TNK_Dependent_template_name;
+    return TNK_Function_template;
 
   case UnqualifiedId::IK_LiteralOperatorId:
-    llvm_unreachable(
-            "We don't support these; Parse shouldn't have allowed propagation");
+    llvm_unreachable("literal operator id cannot have a dependent scope");
 
   default:
     break;
@@ -3555,11 +3571,7 @@
                                      SourceLocation TemplateLoc,
                                      TemplateArgumentListInfo &TemplateArgs,
                                      bool PartialTemplateArgs,
-                          SmallVectorImpl<TemplateArgument> &Converted,
-                                     bool *ExpansionIntoFixedList) {
-  if (ExpansionIntoFixedList)
-    *ExpansionIntoFixedList = false;
-
+                          SmallVectorImpl<TemplateArgument> &Converted) {
   TemplateParameterList *Params = Template->getTemplateParameters();
 
   SourceLocation RAngleLoc = TemplateArgs.getRAngleLoc();
@@ -3612,6 +3624,20 @@
                                 ArgumentPack.size(), Converted))
         return true;
 
+      if (TemplateArgs[ArgIdx].getArgument().isPackExpansion() &&
+          isa<TypeAliasTemplateDecl>(Template) &&
+          !(Param + 1 == ParamEnd && (*Param)->isTemplateParameterPack() &&
+            !getExpandedPackSize(*Param))) {
+        // Core issue 1430: we have a pack expansion as an argument to an
+        // alias template, and it's not part of a final parameter pack. This
+        // can't be canonicalized, so reject it now.
+        Diag(TemplateArgs[ArgIdx].getLocation(),
+             diag::err_alias_template_expansion_into_fixed_list)
+          << TemplateArgs[ArgIdx].getSourceRange();
+        Diag((*Param)->getLocation(), diag::note_template_param_here);
+        return true;
+      }
+
       // We're now done with this argument.
       ++ArgIdx;
 
@@ -3658,9 +3684,6 @@
                                              ArgumentPack.data(),
                                              ArgumentPack.size()));
           ArgumentPack.clear();
-        } else if (ExpansionIntoFixedList) {
-          // We have expanded a pack into a fixed list.
-          *ExpansionIntoFixedList = true;
         }
 
         return false;
@@ -3889,19 +3912,17 @@
 
 bool UnnamedLocalNoLinkageFinder::VisitFunctionProtoType(
                                                   const FunctionProtoType* T) {
-  for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
-                                         AEnd = T->arg_type_end();
-       A != AEnd; ++A) {
-    if (Visit(*A))
+  for (const auto &A : T->param_types()) {
+    if (Visit(A))
       return true;
   }
 
-  return Visit(T->getResultType());
+  return Visit(T->getReturnType());
 }
 
 bool UnnamedLocalNoLinkageFinder::VisitFunctionNoProtoType(
                                                const FunctionNoProtoType* T) {
-  return Visit(T->getResultType());
+  return Visit(T->getReturnType());
 }
 
 bool UnnamedLocalNoLinkageFinder::VisitUnresolvedUsingType(
@@ -4523,6 +4544,8 @@
   case NPV_NullPointer:
     S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
     Converted = TemplateArgument(ParamType, /*isNullPtr*/true);
+    if (S.Context.getTargetInfo().getCXXABI().isMicrosoft())
+      S.RequireCompleteType(Arg->getExprLoc(), ParamType, 0);
     return false;
   case NPV_NotNullPointer:
     break;
@@ -4756,9 +4779,9 @@
         
       public:
         TmplArgICEDiagnoser(QualType T) : T(T) { }
-        
-        virtual void diagnoseNotICE(Sema &S, SourceLocation Loc,
-                                    SourceRange SR) {
+
+        void diagnoseNotICE(Sema &S, SourceLocation Loc,
+                            SourceRange SR) override {
           S.Diag(Loc, diag::err_template_arg_not_ice) << T << SR;
         }
       } Diagnoser(ArgType);
@@ -5450,18 +5473,19 @@
          (S->getFlags() & Scope::TemplateParamScope) != 0)
     S = S->getParent();
 
-  // C++ [temp]p2:
-  //   A template-declaration can appear only as a namespace scope or
-  //   class scope declaration.
+  // C++ [temp]p4:
+  //   A template [...] shall not have C linkage.
   DeclContext *Ctx = S->getEntity();
-  if (Ctx && isa<LinkageSpecDecl>(Ctx) &&
-      cast<LinkageSpecDecl>(Ctx)->getLanguage() != LinkageSpecDecl::lang_cxx)
+  if (Ctx && Ctx->isExternCContext())
     return Diag(TemplateParams->getTemplateLoc(), diag::err_template_linkage)
              << TemplateParams->getSourceRange();
 
   while (Ctx && isa<LinkageSpecDecl>(Ctx))
     Ctx = Ctx->getParent();
 
+  // C++ [temp]p2:
+  //   A template-declaration can appear only as a namespace scope or
+  //   class scope declaration.
   if (Ctx) {
     if (Ctx->isFileContext())
       return false;
@@ -5598,13 +5622,37 @@
   //   A class template partial specialization may be declared or redeclared
   //   in any namespace scope in which its definition may be defined (14.5.1
   //   and 14.5.2).
-  bool ComplainedAboutScope = false;
-  DeclContext *SpecializedContext 
+  DeclContext *SpecializedContext
     = Specialized->getDeclContext()->getEnclosingNamespaceContext();
   DeclContext *DC = S.CurContext->getEnclosingNamespaceContext();
-  if ((!PrevDecl ||
-       getTemplateSpecializationKind(PrevDecl) == TSK_Undeclared ||
-       getTemplateSpecializationKind(PrevDecl) == TSK_ImplicitInstantiation)){
+
+  // Make sure that this redeclaration (or definition) occurs in an enclosing
+  // namespace.
+  // Note that HandleDeclarator() performs this check for explicit
+  // specializations of function templates, static data members, and member
+  // functions, so we skip the check here for those kinds of entities.
+  // FIXME: HandleDeclarator's diagnostics aren't quite as good, though.
+  // Should we refactor that check, so that it occurs later?
+  if (!DC->Encloses(SpecializedContext) &&
+      !(isa<FunctionTemplateDecl>(Specialized) ||
+        isa<FunctionDecl>(Specialized) ||
+        isa<VarTemplateDecl>(Specialized) ||
+        isa<VarDecl>(Specialized))) {
+    if (isa<TranslationUnitDecl>(SpecializedContext))
+      S.Diag(Loc, diag::err_template_spec_redecl_global_scope)
+        << EntityKind << Specialized;
+    else if (isa<NamespaceDecl>(SpecializedContext))
+      S.Diag(Loc, diag::err_template_spec_redecl_out_of_scope)
+        << EntityKind << Specialized
+        << cast<NamedDecl>(SpecializedContext);
+    else
+      llvm_unreachable("unexpected namespace context for specialization");
+
+    S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
+  } else if ((!PrevDecl ||
+              getTemplateSpecializationKind(PrevDecl) == TSK_Undeclared ||
+              getTemplateSpecializationKind(PrevDecl) ==
+                  TSK_ImplicitInstantiation)) {
     // C++ [temp.exp.spec]p2:
     //   An explicit specialization shall be declared in the namespace of which
     //   the template is a member, or, for member templates, in the namespace
@@ -5613,9 +5661,12 @@
     //   static data member of a class template shall be declared in the
     //   namespace of which the class template is a member.
     //
-    // C++0x [temp.expl.spec]p2:
+    // C++11 [temp.expl.spec]p2:
     //   An explicit specialization shall be declared in a namespace enclosing
     //   the specialized template.
+    // C++11 [temp.explicit]p3:
+    //   An explicit instantiation shall appear in an enclosing namespace of its
+    //   template.
     if (!DC->InEnclosingNamespaceSetOf(SpecializedContext)) {
       bool IsCPlusPlus11Extension = DC->Encloses(SpecializedContext);
       if (isa<TranslationUnitDecl>(SpecializedContext)) {
@@ -5636,46 +5687,42 @@
       }
 
       S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
-      ComplainedAboutScope =
-        !(IsCPlusPlus11Extension && S.getLangOpts().CPlusPlus11);
     }
   }
 
-  // Make sure that this redeclaration (or definition) occurs in an enclosing
-  // namespace.
-  // Note that HandleDeclarator() performs this check for explicit
-  // specializations of function templates, static data members, and member
-  // functions, so we skip the check here for those kinds of entities.
-  // FIXME: HandleDeclarator's diagnostics aren't quite as good, though.
-  // Should we refactor that check, so that it occurs later?
-  if (!ComplainedAboutScope && !DC->Encloses(SpecializedContext) &&
-      !(isa<FunctionTemplateDecl>(Specialized) || isa<VarDecl>(Specialized) ||
-        isa<FunctionDecl>(Specialized))) {
-    if (isa<TranslationUnitDecl>(SpecializedContext))
-      S.Diag(Loc, diag::err_template_spec_redecl_global_scope)
-        << EntityKind << Specialized;
-    else if (isa<NamespaceDecl>(SpecializedContext))
-      S.Diag(Loc, diag::err_template_spec_redecl_out_of_scope)
-        << EntityKind << Specialized
-        << cast<NamedDecl>(SpecializedContext);
-
-    S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
-  }
-
-  // FIXME: check for specialization-after-instantiation errors and such.
-
   return false;
 }
 
+static SourceRange findTemplateParameter(unsigned Depth, Expr *E) {
+  if (!E->isInstantiationDependent())
+    return SourceLocation();
+  DependencyChecker Checker(Depth);
+  Checker.TraverseStmt(E);
+  if (Checker.Match && Checker.MatchLoc.isInvalid())
+    return E->getSourceRange();
+  return Checker.MatchLoc;
+}
+
+static SourceRange findTemplateParameter(unsigned Depth, TypeLoc TL) {
+  if (!TL.getType()->isDependentType())
+    return SourceLocation();
+  DependencyChecker Checker(Depth);
+  Checker.TraverseTypeLoc(TL);
+  if (Checker.Match && Checker.MatchLoc.isInvalid())
+    return TL.getSourceRange();
+  return Checker.MatchLoc;
+}
+
 /// \brief Subroutine of Sema::CheckTemplatePartialSpecializationArgs
 /// that checks non-type template partial specialization arguments.
 static bool CheckNonTypeTemplatePartialSpecializationArgs(
-    Sema &S, NonTypeTemplateParmDecl *Param, const TemplateArgument *Args,
-    unsigned NumArgs) {
+    Sema &S, SourceLocation TemplateNameLoc, NonTypeTemplateParmDecl *Param,
+    const TemplateArgument *Args, unsigned NumArgs, bool IsDefaultArgument) {
   for (unsigned I = 0; I != NumArgs; ++I) {
     if (Args[I].getKind() == TemplateArgument::Pack) {
       if (CheckNonTypeTemplatePartialSpecializationArgs(
-              S, Param, Args[I].pack_begin(), Args[I].pack_size()))
+              S, TemplateNameLoc, Param, Args[I].pack_begin(),
+              Args[I].pack_size(), IsDefaultArgument))
         return true;
 
       continue;
@@ -5713,22 +5760,43 @@
     //        shall not involve a template parameter of the partial
     //        specialization except when the argument expression is a
     //        simple identifier.
-    if (ArgExpr->isTypeDependent() || ArgExpr->isValueDependent()) {
-      S.Diag(ArgExpr->getLocStart(),
-           diag::err_dependent_non_type_arg_in_partial_spec)
-        << ArgExpr->getSourceRange();
+    SourceRange ParamUseRange =
+        findTemplateParameter(Param->getDepth(), ArgExpr);
+    if (ParamUseRange.isValid()) {
+      if (IsDefaultArgument) {
+        S.Diag(TemplateNameLoc,
+               diag::err_dependent_non_type_arg_in_partial_spec);
+        S.Diag(ParamUseRange.getBegin(),
+               diag::note_dependent_non_type_default_arg_in_partial_spec)
+          << ParamUseRange;
+      } else {
+        S.Diag(ParamUseRange.getBegin(),
+               diag::err_dependent_non_type_arg_in_partial_spec)
+          << ParamUseRange;
+      }
       return true;
     }
 
     //     -- The type of a template parameter corresponding to a
     //        specialized non-type argument shall not be dependent on a
     //        parameter of the specialization.
-    if (Param->getType()->isDependentType()) {
-      S.Diag(ArgExpr->getLocStart(),
-           diag::err_dependent_typed_non_type_arg_in_partial_spec)
-        << Param->getType()
-        << ArgExpr->getSourceRange();
-      S.Diag(Param->getLocation(), diag::note_template_param_here);
+    //
+    // FIXME: We need to delay this check until instantiation in some cases:
+    //
+    //   template<template<typename> class X> struct A {
+    //     template<typename T, X<T> N> struct B;
+    //     template<typename T> struct B<T, 0>;
+    //   };
+    //   template<typename> using X = int;
+    //   A<X>::B<int, 0> b;
+    ParamUseRange = findTemplateParameter(
+            Param->getDepth(), Param->getTypeSourceInfo()->getTypeLoc());
+    if (ParamUseRange.isValid()) {
+      S.Diag(IsDefaultArgument ? TemplateNameLoc : ArgExpr->getLocStart(),
+             diag::err_dependent_typed_non_type_arg_in_partial_spec)
+        << Param->getType() << ParamUseRange;
+      S.Diag(Param->getLocation(), diag::note_template_param_here)
+        << (IsDefaultArgument ? ParamUseRange : SourceRange());
       return true;
     }
   }
@@ -5739,15 +5807,17 @@
 /// \brief Check the non-type template arguments of a class template
 /// partial specialization according to C++ [temp.class.spec]p9.
 ///
+/// \param TemplateNameLoc the location of the template name.
 /// \param TemplateParams the template parameters of the primary class
-/// template.
-///
+///        template.
+/// \param NumExplicit the number of explicitly-specified template arguments.
 /// \param TemplateArgs the template arguments of the class template
-/// partial specialization.
+///        partial specialization.
 ///
-/// \returns true if there was an error, false otherwise.
+/// \returns \c true if there was an error, \c false otherwise.
 static bool CheckTemplatePartialSpecializationArgs(
-    Sema &S, TemplateParameterList *TemplateParams,
+    Sema &S, SourceLocation TemplateNameLoc,
+    TemplateParameterList *TemplateParams, unsigned NumExplicit,
     SmallVectorImpl<TemplateArgument> &TemplateArgs) {
   const TemplateArgument *ArgList = TemplateArgs.data();
 
@@ -5757,7 +5827,8 @@
     if (!Param)
       continue;
 
-    if (CheckNonTypeTemplatePartialSpecializationArgs(S, Param, &ArgList[I], 1))
+    if (CheckNonTypeTemplatePartialSpecializationArgs(
+            S, TemplateNameLoc, Param, &ArgList[I], 1, I >= NumExplicit))
       return true;
   }
 
@@ -5904,7 +5975,8 @@
   // corresponds to these arguments.
   if (isPartialSpecialization) {
     if (CheckTemplatePartialSpecializationArgs(
-            *this, ClassTemplate->getTemplateParameters(), Converted))
+            *this, TemplateNameLoc, ClassTemplate->getTemplateParameters(),
+            TemplateArgs.size(), Converted))
       return true;
 
     bool InstantiationDependent;
@@ -6043,7 +6115,7 @@
           else
             Diag(Param->getLocation(),
                  diag::note_partial_spec_unused_parameter)
-              << "<anonymous>";
+              << "(anonymous)";
         }
       }
     }
@@ -6210,10 +6282,8 @@
   if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
     FD->setInlineSpecified(false);
 
-    for (FunctionDecl::param_iterator I = FD->param_begin(),
-                                      E = FD->param_end();
-         I != E; ++I)
-      (*I)->dropAttrs();
+    for (auto I : FD->params())
+      I->dropAttrs();
   }
 }
 
@@ -6509,8 +6579,8 @@
           const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>();
           FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
           EPI.TypeQuals |= Qualifiers::Const;
-          FT = Context.getFunctionType(FPT->getResultType(), FPT->getArgTypes(),
-                                       EPI);
+          FT = Context.getFunctionType(FPT->getReturnType(),
+                                       FPT->getParamTypes(), EPI);
         }
       }
 
@@ -6656,7 +6726,10 @@
            I != E; ++I) {
       NamedDecl *D = (*I)->getUnderlyingDecl();
       if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
-        if (Context.hasSameType(Function->getType(), Method->getType())) {
+        QualType Adjusted = Function->getType();
+        if (!hasExplicitCallingConv(Adjusted))
+          Adjusted = adjustCCAndNoReturn(Adjusted, Method->getType());
+        if (Context.hasSameType(Adjusted, Method->getType())) {
           Instantiation = Method;
           InstantiatedFrom = Method->getInstantiatedFromMemberFunction();
           MSInfo = Method->getMemberSpecializationInfo();
@@ -6877,8 +6950,8 @@
   //   name shall be a simple-template-id.
   //
   // C++98 has the same restriction, just worded differently.
-  for (NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
-       NNS; NNS = NNS->getPrefix())
+  for (NestedNameSpecifier *NNS = SS.getScopeRep(); NNS;
+       NNS = NNS->getPrefix())
     if (const Type *T = NNS->getAsType())
       if (isa<TemplateSpecializationType>(T))
         return true;
@@ -7102,7 +7175,8 @@
                         KWLoc, SS, Name, NameLoc, Attr, AS_none,
                         /*ModulePrivateLoc=*/SourceLocation(),
                         MultiTemplateParamsArg(), Owned, IsDependent,
-                        SourceLocation(), false, TypeResult());
+                        SourceLocation(), false, TypeResult(),
+                        /*IsTypeSpecifier*/false);
   assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
 
   if (!TagD)
@@ -7554,8 +7628,7 @@
   // This has to hold, because SS is expected to be defined.
   assert(Name && "Expected a name in a dependent tag");
 
-  NestedNameSpecifier *NNS
-    = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+  NestedNameSpecifier *NNS = SS.getScopeRep();
   if (!NNS)
     return true;
 
@@ -7641,8 +7714,7 @@
   if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
     // Construct a dependent template specialization type.
     assert(DTN && "dependent template has non-dependent name?");
-    assert(DTN->getQualifier()
-           == static_cast<NestedNameSpecifier*>(SS.getScopeRep()));
+    assert(DTN->getQualifier() == SS.getScopeRep());
     QualType T = Context.getDependentTemplateSpecializationType(ETK_Typename,
                                                           DTN->getQualifier(),
                                                           DTN->getIdentifier(),
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index 8d66ff6..8a0589a 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -783,7 +783,7 @@
       S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
       for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
         unsigned Depth, Index;
-        llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+        std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
         if (Depth == 0 && !SawIndices[Index]) {
           SawIndices[Index] = true;
           PackIndices.push_back(Index);
@@ -981,6 +981,17 @@
         Comparison.Qualifiers = ParamMoreQualified;
       else if (ArgQuals.isStrictSupersetOf(ParamQuals))
         Comparison.Qualifiers = ArgMoreQualified;
+      else if (ArgQuals.getObjCLifetime() != ParamQuals.getObjCLifetime() &&
+               ArgQuals.withoutObjCLifetime()
+                 == ParamQuals.withoutObjCLifetime()) {
+        // Prefer binding to non-__unsafe_autoretained parameters.
+        if (ArgQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone &&
+            ParamQuals.getObjCLifetime())
+          Comparison.Qualifiers = ParamMoreQualified;
+        else if (ParamQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone &&
+                 ArgQuals.getObjCLifetime())
+          Comparison.Qualifiers = ArgMoreQualified;
+      }
       RefParamComparisons->push_back(Comparison);
     }
 
@@ -1364,19 +1375,17 @@
         return Sema::TDK_NonDeducedMismatch;
 
       // Check return types.
-      if (Sema::TemplateDeductionResult Result
-            = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
-                                            FunctionProtoParam->getResultType(),
-                                            FunctionProtoArg->getResultType(),
-                                            Info, Deduced, 0))
+      if (Sema::TemplateDeductionResult Result =
+              DeduceTemplateArgumentsByTypeMatch(
+                  S, TemplateParams, FunctionProtoParam->getReturnType(),
+                  FunctionProtoArg->getReturnType(), Info, Deduced, 0))
         return Result;
 
-      return DeduceTemplateArguments(S, TemplateParams,
-                                     FunctionProtoParam->arg_type_begin(),
-                                     FunctionProtoParam->getNumArgs(),
-                                     FunctionProtoArg->arg_type_begin(),
-                                     FunctionProtoArg->getNumArgs(),
-                                     Info, Deduced, SubTDF);
+      return DeduceTemplateArguments(
+          S, TemplateParams, FunctionProtoParam->param_type_begin(),
+          FunctionProtoParam->getNumParams(),
+          FunctionProtoArg->param_type_begin(),
+          FunctionProtoArg->getNumParams(), Info, Deduced, SubTDF);
     }
 
     case Type::InjectedClassName: {
@@ -1463,12 +1472,10 @@
 
             // Visit base classes
             CXXRecordDecl *Next = cast<CXXRecordDecl>(NextT->getDecl());
-            for (CXXRecordDecl::base_class_iterator Base = Next->bases_begin(),
-                                                 BaseEnd = Next->bases_end();
-                 Base != BaseEnd; ++Base) {
-              assert(Base->getType()->isRecordType() &&
+            for (const auto &Base : Next->bases()) {
+              assert(Base.getType()->isRecordType() &&
                      "Base class that isn't a record?");
-              ToVisit.push_back(Base->getType()->getAs<RecordType>());
+              ToVisit.push_back(Base.getType()->getAs<RecordType>());
             }
           }
 
@@ -1854,7 +1861,7 @@
       S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
       for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
         unsigned Depth, Index;
-        llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+        std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
         if (Depth == 0 && !SawIndices[Index]) {
           SawIndices[Index] = true;
           PackIndices.push_back(Index);
@@ -2274,8 +2281,8 @@
     return Result;
 
   SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
-  InstantiatingTemplate Inst(*this, Partial->getLocation(), Partial,
-                             DeducedArgs, Info);
+  InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs,
+                             Info);
   if (Inst.isInvalid())
     return TDK_InstantiationDepth;
 
@@ -2438,8 +2445,8 @@
     return Result;
 
   SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
-  InstantiatingTemplate Inst(*this, Partial->getLocation(), Partial,
-                             DeducedArgs, Info);
+  InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs,
+                             Info);
   if (Inst.isInvalid())
     return TDK_InstantiationDepth;
 
@@ -2498,11 +2505,8 @@
   if (ExplicitTemplateArgs.size() == 0) {
     // No arguments to substitute; just copy over the parameter types and
     // fill in the function type.
-    for (FunctionDecl::param_iterator P = Function->param_begin(),
-                                   PEnd = Function->param_end();
-         P != PEnd;
-         ++P)
-      ParamTypes.push_back((*P)->getType());
+    for (auto P : Function->params())
+      ParamTypes.push_back(P->getType());
 
     if (FunctionType)
       *FunctionType = Function->getType();
@@ -2524,8 +2528,8 @@
   // explicitly-specified template arguments against this function template,
   // and then substitute them into the function parameter types.
   SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
-  InstantiatingTemplate Inst(*this, FunctionTemplate->getLocation(),
-                             FunctionTemplate, DeducedArgs,
+  InstantiatingTemplate Inst(*this, Info.getLocation(), FunctionTemplate,
+                             DeducedArgs,
            ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution,
                              Info);
   if (Inst.isInvalid())
@@ -2604,11 +2608,11 @@
       
     CXXThisScopeRAII ThisScope(*this, ThisContext, ThisTypeQuals,
                                getLangOpts().CPlusPlus11);
-    
-    ResultType = SubstType(Proto->getResultType(),
-                   MultiLevelTemplateArgumentList(*ExplicitArgumentList),
-                   Function->getTypeSpecStartLoc(),
-                   Function->getDeclName());
+
+    ResultType =
+        SubstType(Proto->getReturnType(),
+                  MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+                  Function->getTypeSpecStartLoc(), Function->getDeclName());
     if (ResultType.isNull() || Trap.hasErrorOccurred())
       return TDK_SubstitutionFailure;
   }
@@ -2778,8 +2782,8 @@
   // Enter a new template instantiation context while we instantiate the
   // actual function declaration.
   SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
-  InstantiatingTemplate Inst(*this, FunctionTemplate->getLocation(),
-                             FunctionTemplate, DeducedArgs,
+  InstantiatingTemplate Inst(*this, Info.getLocation(), FunctionTemplate,
+                             DeducedArgs,
               ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
                              Info);
   if (Inst.isInvalid())
@@ -2976,8 +2980,8 @@
 static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
                                   FunctionDecl *Fn) {
   // We may need to deduce the return type of the function now.
-  if (S.getLangOpts().CPlusPlus1y && Fn->getResultType()->isUndeducedType() &&
-      S.DeduceReturnType(Fn, R.Expression->getExprLoc(), /*Diagnose*/false))
+  if (S.getLangOpts().CPlusPlus1y && Fn->getReturnType()->isUndeducedType() &&
+      S.DeduceReturnType(Fn, R.Expression->getExprLoc(), /*Diagnose*/ false))
     return QualType();
 
   if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn))
@@ -3406,7 +3410,7 @@
       collectUnexpandedParameterPacks(ParamPattern, Unexpanded);
       for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
         unsigned Depth, Index;
-        llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+        std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
         if (Depth == 0 && !SawIndices[Index]) {
           SawIndices[Index] = true;
           PackIndices.push_back(Index);
@@ -3589,7 +3593,7 @@
   // type so that we treat it as a non-deduced context in what follows.
   bool HasDeducedReturnType = false;
   if (getLangOpts().CPlusPlus1y && InOverloadResolution &&
-      Function->getResultType()->getContainedAutoType()) {
+      Function->getReturnType()->getContainedAutoType()) {
     FunctionType = SubstAutoType(FunctionType, Context.DependentTy);
     HasDeducedReturnType = true;
   }
@@ -3614,7 +3618,7 @@
   // If the function has a deduced return type, deduce it now, so we can check
   // that the deduced function type matches the requested type.
   if (HasDeducedReturnType &&
-      Specialization->getResultType()->isUndeducedType() &&
+      Specialization->getReturnType()->isUndeducedType() &&
       DeduceReturnType(Specialization, Info.getLocation(), false))
     return TDK_MiscellaneousDeductionFailure;
 
@@ -3643,7 +3647,7 @@
 SubstAutoWithinFunctionReturnType(FunctionDecl *F, 
                                     QualType TypeToReplaceAutoWith, Sema &S) {
   assert(!TypeToReplaceAutoWith->getContainedAutoType());
-  QualType AutoResultType = F->getResultType();
+  QualType AutoResultType = F->getReturnType();
   assert(AutoResultType->getContainedAutoType()); 
   QualType DeducedResultType = S.SubstAutoType(AutoResultType, 
                                                TypeToReplaceAutoWith);
@@ -3668,7 +3672,7 @@
   assert(LambdaClass && LambdaClass->isGenericLambda()); 
   
   CXXMethodDecl *CallOpGeneric = LambdaClass->getLambdaCallOperator();
-  QualType CallOpResultType = CallOpGeneric->getResultType(); 
+  QualType CallOpResultType = CallOpGeneric->getReturnType();
   const bool GenericLambdaCallOperatorHasDeducedReturnType = 
       CallOpResultType->getContainedAutoType();
   
@@ -3685,15 +3689,15 @@
     return Result;
  
   // If we need to deduce the return type, do so (instantiates the callop).
-  if (GenericLambdaCallOperatorHasDeducedReturnType && 
-                CallOpSpecialized->getResultType()->isUndeducedType())
+  if (GenericLambdaCallOperatorHasDeducedReturnType &&
+      CallOpSpecialized->getReturnType()->isUndeducedType())
     S.DeduceReturnType(CallOpSpecialized, 
                        CallOpSpecialized->getPointOfInstantiation(),
                        /*Diagnose*/ true);
     
   // Check to see if the return type of the destination ptr-to-function
   // matches the return type of the call operator.
-  if (!S.Context.hasSameType(CallOpSpecialized->getResultType(), 
+  if (!S.Context.hasSameType(CallOpSpecialized->getReturnType(),
                              ReturnTypeOfDestFunctionPtr))
     return Sema::TDK_NonDeducedMismatch;
   // Since we have succeeded in matching the source and destination
@@ -3712,8 +3716,8 @@
     "If the call operator succeeded so should the invoker!");
   // Set the result type to match the corresponding call operator
   // specialization's result type.
-  if (GenericLambdaCallOperatorHasDeducedReturnType && 
-      InvokerSpecialized->getResultType()->isUndeducedType()) {
+  if (GenericLambdaCallOperatorHasDeducedReturnType &&
+      InvokerSpecialized->getReturnType()->isUndeducedType()) {
     // Be sure to get the type to replace 'auto' with and not
     // the full result type of the call op specialization 
     // to substitute into the 'auto' of the invoker and conversion
@@ -3722,9 +3726,9 @@
     //  int* (*fp)(int*) = [](auto* a) -> auto* { return a; };
     // We don't want to subst 'int*' into 'auto' to get int**.
 
-    QualType TypeToReplaceAutoWith = 
-        CallOpSpecialized->getResultType()->
-            getContainedAutoType()->getDeducedType();
+    QualType TypeToReplaceAutoWith = CallOpSpecialized->getReturnType()
+                                         ->getContainedAutoType()
+                                         ->getDeducedType();
     SubstAutoWithinFunctionReturnType(InvokerSpecialized,
         TypeToReplaceAutoWith, S);
     SubstAutoWithinFunctionReturnType(ConversionSpecialized, 
@@ -3740,7 +3744,7 @@
   FunctionProtoType::ExtProtoInfo EPI = InvokerFPT->getExtProtoInfo();
   EPI.TypeQuals = 0;
   InvokerSpecialized->setType(S.Context.getFunctionType(
-      InvokerFPT->getResultType(), InvokerFPT->getArgTypes(),EPI));
+      InvokerFPT->getReturnType(), InvokerFPT->getParamTypes(), EPI));
   return Sema::TDK_Success;
 }
 /// \brief Deduce template arguments for a templated conversion
@@ -3863,8 +3867,8 @@
         "Can only convert from lambda to ptr-to-function");
     const FunctionType *ToFunType = 
         A->getPointeeType().getTypePtr()->getAs<FunctionType>();
-    const QualType DestFunctionPtrReturnType = ToFunType->getResultType();
-    
+    const QualType DestFunctionPtrReturnType = ToFunType->getReturnType();
+
     // Create the corresponding specializations of the call operator and 
     // the static-invoker; and if the return type is auto, 
     // deduce the return type and check if it matches the 
@@ -4109,12 +4113,12 @@
 
 bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
                             bool Diagnose) {
-  assert(FD->getResultType()->isUndeducedType());
+  assert(FD->getReturnType()->isUndeducedType());
 
   if (FD->getTemplateInstantiationPattern())
     InstantiateFunctionDefinition(Loc, FD);
 
-  bool StillUndeduced = FD->getResultType()->isUndeducedType();
+  bool StillUndeduced = FD->getReturnType()->isUndeducedType();
   if (StillUndeduced && Diagnose && !FD->isInvalidDecl()) {
     Diag(Loc, diag::err_auto_fn_used_before_defined) << FD;
     Diag(FD->getLocation(), diag::note_callee_decl) << FD;
@@ -4220,10 +4224,10 @@
         ++Skip1;
     }
 
-    Args1.insert(Args1.end(),
-                 Proto1->arg_type_begin() + Skip1, Proto1->arg_type_end());
-    Args2.insert(Args2.end(),
-                 Proto2->arg_type_begin() + Skip2, Proto2->arg_type_end());
+    Args1.insert(Args1.end(), Proto1->param_type_begin() + Skip1,
+                 Proto1->param_type_end());
+    Args2.insert(Args2.end(), Proto2->param_type_begin() + Skip2,
+                 Proto2->param_type_end());
 
     // C++ [temp.func.order]p5:
     //   The presence of unused ellipsis and default arguments has no effect on
@@ -4244,12 +4248,10 @@
   case TPOC_Conversion:
     //   - In the context of a call to a conversion operator, the return types
     //     of the conversion function templates are used.
-    if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
-                                           Proto2->getResultType(),
-                                           Proto1->getResultType(),
-                                           Info, Deduced, TDF_None,
-                                           /*PartialOrdering=*/true,
-                                           RefParamComparisons))
+    if (DeduceTemplateArgumentsByTypeMatch(
+            S, TemplateParams, Proto2->getReturnType(), Proto1->getReturnType(),
+            Info, Deduced, TDF_None,
+            /*PartialOrdering=*/true, RefParamComparisons))
       return false;
     break;
 
@@ -4293,9 +4295,8 @@
     break;
 
   case TPOC_Conversion:
-    ::MarkUsedTemplateParameters(S.Context, Proto2->getResultType(), false,
-                                 TemplateParams->getDepth(),
-                                 UsedParameters);
+    ::MarkUsedTemplateParameters(S.Context, Proto2->getReturnType(), false,
+                                 TemplateParams->getDepth(), UsedParameters);
     break;
 
   case TPOC_Other:
@@ -4613,8 +4614,7 @@
                                             /*RefParamComparisons=*/0);
   if (Better1) {
     SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end());
-    InstantiatingTemplate Inst(*this, PS2->getLocation(), PS2, DeducedArgs,
-                               Info);
+    InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info);
     Better1 = !::FinishTemplateArgumentDeduction(
         *this, PS2, PS1->getTemplateArgs(), Deduced, Info);
   }
@@ -4629,8 +4629,7 @@
   if (Better2) {
     SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),
                                                  Deduced.end());
-    InstantiatingTemplate Inst(*this, PS1->getLocation(), PS1, DeducedArgs,
-                               Info);
+    InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info);
     Better2 = !::FinishTemplateArgumentDeduction(
         *this, PS1, PS2->getTemplateArgs(), Deduced, Info);
   }
@@ -4653,7 +4652,7 @@
   SmallVector<DeducedTemplateArgument, 4> Deduced;
   TemplateDeductionInfo Info(Loc);
 
-  assert(PS1->getSpecializedTemplate() == PS1->getSpecializedTemplate() &&
+  assert(PS1->getSpecializedTemplate() == PS2->getSpecializedTemplate() &&
          "the partial specializations being compared should specialize"
          " the same template.");
   TemplateName Name(PS1->getSpecializedTemplate());
@@ -4674,8 +4673,7 @@
   if (Better1) {
     SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),
                                                  Deduced.end());
-    InstantiatingTemplate Inst(*this, PS2->getLocation(), PS2,
-                               DeducedArgs, Info);
+    InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info);
     Better1 = !::FinishTemplateArgumentDeduction(*this, PS2,
                                                  PS1->getTemplateArgs(),
                                                  Deduced, Info);
@@ -4691,8 +4689,7 @@
                                             /*RefParamComparisons=*/0);
   if (Better2) {
     SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end());
-    InstantiatingTemplate Inst(*this, PS1->getLocation(), PS1,
-                               DeducedArgs, Info);
+    InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info);
     Better2 = !::FinishTemplateArgumentDeduction(*this, PS1,
                                                  PS2->getTemplateArgs(),
                                                  Deduced, Info);
@@ -4874,10 +4871,10 @@
 
   case Type::FunctionProto: {
     const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
-    MarkUsedTemplateParameters(Ctx, Proto->getResultType(), OnlyDeduced,
-                               Depth, Used);
-    for (unsigned I = 0, N = Proto->getNumArgs(); I != N; ++I)
-      MarkUsedTemplateParameters(Ctx, Proto->getArgType(I), OnlyDeduced,
+    MarkUsedTemplateParameters(Ctx, Proto->getReturnType(), OnlyDeduced, Depth,
+                               Used);
+    for (unsigned I = 0, N = Proto->getNumParams(); I != N; ++I)
+      MarkUsedTemplateParameters(Ctx, Proto->getParamType(I), OnlyDeduced,
                                  Depth, Used);
     break;
   }
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index 8904f37..1ee3e4b 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -76,8 +76,18 @@
       // If this variable template specialization was instantiated from a
       // specialized member that is a variable template, we're done.
       assert(Spec->getSpecializedTemplate() && "No variable template?");
-      if (Spec->getSpecializedTemplate()->isMemberSpecialization())
-        return Result;
+      llvm::PointerUnion<VarTemplateDecl*,
+                         VarTemplatePartialSpecializationDecl*> Specialized
+                             = Spec->getSpecializedTemplateOrPartial();
+      if (VarTemplatePartialSpecializationDecl *Partial =
+              Specialized.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
+        if (Partial->isMemberSpecialization())
+          return Result;
+      } else {
+        VarTemplateDecl *Tmpl = Specialized.get<VarTemplateDecl *>();
+        if (Tmpl->isMemberSpecialization())
+          return Result;
+      }
     }
 
     // If we have a template template parameter with translation unit context,
@@ -190,50 +200,49 @@
   llvm_unreachable("Invalid InstantiationKind!");
 }
 
+void Sema::InstantiatingTemplate::Initialize(
+    ActiveTemplateInstantiation::InstantiationKind Kind,
+    SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
+    Decl *Entity, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs,
+    sema::TemplateDeductionInfo *DeductionInfo) {
+  SavedInNonInstantiationSFINAEContext =
+      SemaRef.InNonInstantiationSFINAEContext;
+  Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
+  if (!Invalid) {
+    ActiveTemplateInstantiation Inst;
+    Inst.Kind = Kind;
+    Inst.PointOfInstantiation = PointOfInstantiation;
+    Inst.Entity = Entity;
+    Inst.Template = Template;
+    Inst.TemplateArgs = TemplateArgs.data();
+    Inst.NumTemplateArgs = TemplateArgs.size();
+    Inst.DeductionInfo = DeductionInfo;
+    Inst.InstantiationRange = InstantiationRange;
+    SemaRef.InNonInstantiationSFINAEContext = false;
+    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+    if (!Inst.isInstantiationRecord())
+      ++SemaRef.NonInstantiationEntries;
+  }
+}
+
 Sema::InstantiatingTemplate::
 InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
                       Decl *Entity,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                        SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation,
-                                    InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind = ActiveTemplateInstantiation::TemplateInstantiation;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Entity = Entity;
-    Inst.TemplateArgs = 0;
-    Inst.NumTemplateArgs = 0;
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  }
+  Initialize(ActiveTemplateInstantiation::TemplateInstantiation,
+             PointOfInstantiation, InstantiationRange, Entity);
 }
 
 Sema::InstantiatingTemplate::
 InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
                       FunctionDecl *Entity, ExceptionSpecification,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                        SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation,
-                                    InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind = ActiveTemplateInstantiation::ExceptionSpecInstantiation;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Entity = Entity;
-    Inst.TemplateArgs = 0;
-    Inst.NumTemplateArgs = 0;
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  }
+  Initialize(ActiveTemplateInstantiation::ExceptionSpecInstantiation,
+             PointOfInstantiation, InstantiationRange, Entity);
 }
 
 Sema::InstantiatingTemplate::
@@ -241,24 +250,11 @@
                       TemplateDecl *Template,
                       ArrayRef<TemplateArgument> TemplateArgs,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                     SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation,
-                                    InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind
-      = ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Entity = Template;
-    Inst.TemplateArgs = TemplateArgs.data();
-    Inst.NumTemplateArgs = TemplateArgs.size();
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  }
+  Initialize(ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation,
+             PointOfInstantiation, InstantiationRange,
+             Template, nullptr, TemplateArgs);
 }
 
 Sema::InstantiatingTemplate::
@@ -268,26 +264,10 @@
                       ActiveTemplateInstantiation::InstantiationKind Kind,
                       sema::TemplateDeductionInfo &DeductionInfo,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                     SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind = Kind;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Entity = FunctionTemplate;
-    Inst.TemplateArgs = TemplateArgs.data();
-    Inst.NumTemplateArgs = TemplateArgs.size();
-    Inst.DeductionInfo = &DeductionInfo;
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-    
-    if (!Inst.isInstantiationRecord())
-      ++SemaRef.NonInstantiationEntries;
-  }
+  Initialize(Kind, PointOfInstantiation, InstantiationRange,
+             FunctionTemplate, nullptr, TemplateArgs, &DeductionInfo);
 }
 
 Sema::InstantiatingTemplate::
@@ -296,23 +276,11 @@
                       ArrayRef<TemplateArgument> TemplateArgs,
                       sema::TemplateDeductionInfo &DeductionInfo,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                     SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind = ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Entity = PartialSpec;
-    Inst.TemplateArgs = TemplateArgs.data();
-    Inst.NumTemplateArgs = TemplateArgs.size();
-    Inst.DeductionInfo = &DeductionInfo;
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  }
+  Initialize(ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
+             PointOfInstantiation, InstantiationRange,
+             PartialSpec, nullptr, TemplateArgs, &DeductionInfo);
 }
 
 Sema::InstantiatingTemplate::InstantiatingTemplate(
@@ -320,22 +288,11 @@
     VarTemplatePartialSpecializationDecl *PartialSpec,
     ArrayRef<TemplateArgument> TemplateArgs,
     sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
-    : SemaRef(SemaRef), SavedInNonInstantiationSFINAEContext(
-                            SemaRef.InNonInstantiationSFINAEContext) {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind =
-        ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Entity = PartialSpec;
-    Inst.TemplateArgs = TemplateArgs.data();
-    Inst.NumTemplateArgs = TemplateArgs.size();
-    Inst.DeductionInfo = &DeductionInfo;
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  }
+  : SemaRef(SemaRef)
+{
+  Initialize(ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
+             PointOfInstantiation, InstantiationRange,
+             PartialSpec, nullptr, TemplateArgs, &DeductionInfo);
 }
 
 Sema::InstantiatingTemplate::
@@ -343,47 +300,24 @@
                       ParmVarDecl *Param,
                       ArrayRef<TemplateArgument> TemplateArgs,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                     SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind
-      = ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Entity = Param;
-    Inst.TemplateArgs = TemplateArgs.data();
-    Inst.NumTemplateArgs = TemplateArgs.size();
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  }
+  Initialize(ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation,
+             PointOfInstantiation, InstantiationRange,
+             Param, nullptr, TemplateArgs);
 }
 
+
 Sema::InstantiatingTemplate::
 InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
                       NamedDecl *Template, NonTypeTemplateParmDecl *Param,
                       ArrayRef<TemplateArgument> TemplateArgs,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                     SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Template = Template;
-    Inst.Entity = Param;
-    Inst.TemplateArgs = TemplateArgs.data();
-    Inst.NumTemplateArgs = TemplateArgs.size();
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  }
+  Initialize(ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution,
+             PointOfInstantiation, InstantiationRange,
+             Param, Template, TemplateArgs);
 }
 
 Sema::InstantiatingTemplate::
@@ -391,23 +325,11 @@
                       NamedDecl *Template, TemplateTemplateParmDecl *Param,
                       ArrayRef<TemplateArgument> TemplateArgs,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                     SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
-  if (!Invalid) {
-    ActiveTemplateInstantiation Inst;
-    Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
-    Inst.PointOfInstantiation = PointOfInstantiation;
-    Inst.Template = Template;
-    Inst.Entity = Param;
-    Inst.TemplateArgs = TemplateArgs.data();
-    Inst.NumTemplateArgs = TemplateArgs.size();
-    Inst.InstantiationRange = InstantiationRange;
-    SemaRef.InNonInstantiationSFINAEContext = false;
-    SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  }
+  Initialize(ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution,
+             PointOfInstantiation, InstantiationRange,
+             Param, Template, TemplateArgs);
 }
 
 Sema::InstantiatingTemplate::
@@ -415,25 +337,11 @@
                       TemplateDecl *Template, NamedDecl *Param,
                       ArrayRef<TemplateArgument> TemplateArgs,
                       SourceRange InstantiationRange)
-  : SemaRef(SemaRef),
-    SavedInNonInstantiationSFINAEContext(
-                                     SemaRef.InNonInstantiationSFINAEContext)
+  : SemaRef(SemaRef)
 {
-  Invalid = false;
-  
-  ActiveTemplateInstantiation Inst;
-  Inst.Kind = ActiveTemplateInstantiation::DefaultTemplateArgumentChecking;
-  Inst.PointOfInstantiation = PointOfInstantiation;
-  Inst.Template = Template;
-  Inst.Entity = Param;
-  Inst.TemplateArgs = TemplateArgs.data();
-  Inst.NumTemplateArgs = TemplateArgs.size();
-  Inst.InstantiationRange = InstantiationRange;
-  SemaRef.InNonInstantiationSFINAEContext = false;
-  SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-  
-  assert(!Inst.isInstantiationRecord());
-  ++SemaRef.NonInstantiationEntries;
+  Initialize(ActiveTemplateInstantiation::DefaultTemplateArgumentChecking,
+             PointOfInstantiation, InstantiationRange,
+             Param, Template, TemplateArgs);
 }
 
 void Sema::InstantiatingTemplate::Clear() {
@@ -789,7 +697,7 @@
         MultiLevelTemplateArgumentList &TemplateArgs
           = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
         unsigned Depth, Index;
-        llvm::tie(Depth, Index) = getDepthAndIndex(PartialPack);
+        std::tie(Depth, Index) = getDepthAndIndex(PartialPack);
         if (TemplateArgs.hasTemplateArgument(Depth, Index)) {
           Result = TemplateArgs(Depth, Index);
           TemplateArgs.setArgument(Depth, Index, TemplateArgument());
@@ -808,7 +716,7 @@
         MultiLevelTemplateArgumentList &TemplateArgs
         = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
         unsigned Depth, Index;
-        llvm::tie(Depth, Index) = getDepthAndIndex(PartialPack);
+        std::tie(Depth, Index) = getDepthAndIndex(PartialPack);
         TemplateArgs.setArgument(Depth, Index, Arg);
       }
     }
@@ -929,8 +837,6 @@
                               OldCallOperator->getDescribedFunctionTemplate();
         NewCallOperatorTemplate->setInstantiatedFromMemberTemplate(
                                                      OldCallOperatorTemplate);
-        // Mark the NewCallOperatorTemplate a specialization.  
-        NewCallOperatorTemplate->setMemberSpecialization();
       } else 
         // For a non-generic lambda we set the NewCallOperator to 
         // be an instantiation of the OldCallOperator.
@@ -940,7 +846,7 @@
       return inherited::TransformLambdaScope(E, NewCallOperator, 
           InitCaptureExprsAndTypes);
     }
-    TemplateParameterList *TransformTemplateParameterList(

+    TemplateParameterList *TransformTemplateParameterList(
                               TemplateParameterList *OrigTPL)  {
       if (!OrigTPL || !OrigTPL->size()) return OrigTPL;
          
@@ -1628,8 +1534,8 @@
     return false;
 
   FunctionProtoTypeLoc FP = TL.castAs<FunctionProtoTypeLoc>();
-  for (unsigned I = 0, E = FP.getNumArgs(); I != E; ++I) {
-    ParmVarDecl *P = FP.getArg(I);
+  for (unsigned I = 0, E = FP.getNumParams(); I != E; ++I) {
+    ParmVarDecl *P = FP.getParam(I);
 
     // This must be synthesized from a typedef.
     if (!P) continue;
@@ -1808,31 +1714,29 @@
                           const MultiLevelTemplateArgumentList &TemplateArgs) {
   bool Invalid = false;
   SmallVector<CXXBaseSpecifier*, 4> InstantiatedBases;
-  for (ClassTemplateSpecializationDecl::base_class_iterator
-         Base = Pattern->bases_begin(), BaseEnd = Pattern->bases_end();
-       Base != BaseEnd; ++Base) {
-    if (!Base->getType()->isDependentType()) {
-      if (const CXXRecordDecl *RD = Base->getType()->getAsCXXRecordDecl()) {
+  for (const auto Base : Pattern->bases()) {
+    if (!Base.getType()->isDependentType()) {
+      if (const CXXRecordDecl *RD = Base.getType()->getAsCXXRecordDecl()) {
         if (RD->isInvalidDecl())
           Instantiation->setInvalidDecl();
       }
-      InstantiatedBases.push_back(new (Context) CXXBaseSpecifier(*Base));
+      InstantiatedBases.push_back(new (Context) CXXBaseSpecifier(Base));
       continue;
     }
 
     SourceLocation EllipsisLoc;
     TypeSourceInfo *BaseTypeLoc;
-    if (Base->isPackExpansion()) {
+    if (Base.isPackExpansion()) {
       // This is a pack expansion. See whether we should expand it now, or
       // wait until later.
       SmallVector<UnexpandedParameterPack, 2> Unexpanded;
-      collectUnexpandedParameterPacks(Base->getTypeSourceInfo()->getTypeLoc(),
+      collectUnexpandedParameterPacks(Base.getTypeSourceInfo()->getTypeLoc(),
                                       Unexpanded);
       bool ShouldExpand = false;
       bool RetainExpansion = false;
       Optional<unsigned> NumExpansions;
-      if (CheckParameterPacksForExpansion(Base->getEllipsisLoc(), 
-                                          Base->getSourceRange(),
+      if (CheckParameterPacksForExpansion(Base.getEllipsisLoc(), 
+                                          Base.getSourceRange(),
                                           Unexpanded,
                                           TemplateArgs, ShouldExpand, 
                                           RetainExpansion,
@@ -1846,9 +1750,9 @@
         for (unsigned I = 0; I != *NumExpansions; ++I) {
             Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
           
-          TypeSourceInfo *BaseTypeLoc = SubstType(Base->getTypeSourceInfo(),
+          TypeSourceInfo *BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
                                                   TemplateArgs,
-                                              Base->getSourceRange().getBegin(),
+                                              Base.getSourceRange().getBegin(),
                                                   DeclarationName());
           if (!BaseTypeLoc) {
             Invalid = true;
@@ -1857,9 +1761,9 @@
           
           if (CXXBaseSpecifier *InstantiatedBase
                 = CheckBaseSpecifier(Instantiation,
-                                     Base->getSourceRange(),
-                                     Base->isVirtual(),
-                                     Base->getAccessSpecifierAsWritten(),
+                                     Base.getSourceRange(),
+                                     Base.isVirtual(),
+                                     Base.getAccessSpecifierAsWritten(),
                                      BaseTypeLoc,
                                      SourceLocation()))
             InstantiatedBases.push_back(InstantiatedBase);
@@ -1871,16 +1775,16 @@
       }
       
       // The resulting base specifier will (still) be a pack expansion.
-      EllipsisLoc = Base->getEllipsisLoc();
+      EllipsisLoc = Base.getEllipsisLoc();
       Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
-      BaseTypeLoc = SubstType(Base->getTypeSourceInfo(),
+      BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
                               TemplateArgs,
-                              Base->getSourceRange().getBegin(),
+                              Base.getSourceRange().getBegin(),
                               DeclarationName());
     } else {
-      BaseTypeLoc = SubstType(Base->getTypeSourceInfo(),
+      BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
                               TemplateArgs,
-                              Base->getSourceRange().getBegin(),
+                              Base.getSourceRange().getBegin(),
                               DeclarationName());
     }
     
@@ -1891,9 +1795,9 @@
 
     if (CXXBaseSpecifier *InstantiatedBase
           = CheckBaseSpecifier(Instantiation,
-                               Base->getSourceRange(),
-                               Base->isVirtual(),
-                               Base->getAccessSpecifierAsWritten(),
+                               Base.getSourceRange(),
+                               Base.isVirtual(),
+                               Base.getAccessSpecifierAsWritten(),
                                BaseTypeLoc,
                                EllipsisLoc))
       InstantiatedBases.push_back(InstantiatedBase);
@@ -2008,7 +1912,7 @@
     Spec->setTemplateSpecializationKind(TSK);
     Spec->setPointOfInstantiation(PointOfInstantiation);
   }
-  
+
   InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
   if (Inst.isInvalid())
     return true;
@@ -2030,7 +1934,12 @@
 
   // Start the definition of this instantiation.
   Instantiation->startDefinition();
-  
+
+  // The instantiation is visible here, even if it was first declared in an
+  // unimported module.
+  Instantiation->setHidden(false);
+
+  // FIXME: This loses the as-written tag kind for an explicit instantiation.
   Instantiation->setTagKind(Pattern->getTagKind());
 
   // Do substitution on the base class specifiers.
@@ -2045,9 +1954,7 @@
   LateInstantiatedAttrVec LateAttrs;
   Instantiator.enableLateAttributeInstantiation(&LateAttrs);
 
-  for (RecordDecl::decl_iterator Member = Pattern->decls_begin(),
-         MemberEnd = Pattern->decls_end();
-       Member != MemberEnd; ++Member) {
+  for (auto *Member : Pattern->decls()) {
     // Don't instantiate members not belonging in this semantic context.
     // e.g. for:
     // @code
@@ -2057,19 +1964,19 @@
     // @endcode
     // 'class B' has the template as lexical context but semantically it is
     // introduced in namespace scope.
-    if ((*Member)->getDeclContext() != Pattern)
+    if (Member->getDeclContext() != Pattern)
       continue;
 
-    if ((*Member)->isInvalidDecl()) {
+    if (Member->isInvalidDecl()) {
       Instantiation->setInvalidDecl();
       continue;
     }
 
-    Decl *NewMember = Instantiator.Visit(*Member);
+    Decl *NewMember = Instantiator.Visit(Member);
     if (NewMember) {
       if (FieldDecl *Field = dyn_cast<FieldDecl>(NewMember)) {
         Fields.push_back(Field);
-        FieldDecl *OldField = cast<FieldDecl>(*Member);
+        FieldDecl *OldField = cast<FieldDecl>(Member);
         if (OldField->getInClassInitializer())
           FieldsWithMemberInitializers.push_back(std::make_pair(OldField,
                                                                 Field));
@@ -2124,16 +2031,14 @@
       FieldDecl *NewField = FieldsWithMemberInitializers[I].second;
       Expr *OldInit = OldField->getInClassInitializer();
 
+      ActOnStartCXXInClassMemberInitializer();
       ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
                                             /*CXXDirectInit=*/false);
-      if (NewInit.isInvalid())
-        NewField->setInvalidDecl();
-      else {
-        Expr *Init = NewInit.take();
-        assert(Init && "no-argument initializer in class");
-        assert(!isa<ParenListExpr>(Init) && "call-style init in class");
-        ActOnCXXInClassMemberInitializer(NewField, Init->getLocStart(), Init);
-      }
+      Expr *Init = NewInit.take();
+      assert((!Init || !isa<ParenListExpr>(Init)) &&
+             "call-style init in class");
+      ActOnFinishCXXInClassMemberInitializer(NewField, Init->getLocStart(),
+                                             Init);
     }
   }
   // Instantiate late parsed attributes, and attach them to their decls.
@@ -2161,6 +2066,8 @@
 
   ActOnFinishDelayedMemberInitializers(Instantiation);
 
+  // FIXME: We should do something similar for explicit instantiations so they
+  // end up in the right module.
   if (TSK == TSK_ImplicitInstantiation) {
     Instantiation->setLocation(Pattern->getLocation());
     Instantiation->setLocStart(Pattern->getInnerLocStart());
@@ -2250,6 +2157,10 @@
   if (Inst.isInvalid())
     return true;
 
+  // The instantiation is visible here, even if it was first declared in an
+  // unimported module.
+  Instantiation->setHidden(false);
+
   // Enter the scope of this instantiation. We don't use
   // PushDeclContext because we don't have a scope.
   ContextRAII SavedContext(*this, Instantiation);
@@ -2354,8 +2265,7 @@
   // If we're dealing with a member template where the template parameters
   // have been instantiated, this provides the original template parameters
   // from which the member template's parameters were instantiated.
-  SmallVector<const NamedDecl *, 4> InstantiatedTemplateParameters;
-  
+
   if (Matched.size() >= 1) {
     SmallVectorImpl<MatchResult>::iterator Best = Matched.begin();
     if (Matched.size() == 1) {
@@ -2464,11 +2374,9 @@
        TSK == TSK_ExplicitInstantiationDeclaration ||
        (TSK == TSK_ImplicitInstantiation && Instantiation->isLocalClass())) &&
       "Unexpected template specialization kind!");
-  for (DeclContext::decl_iterator D = Instantiation->decls_begin(),
-                               DEnd = Instantiation->decls_end();
-       D != DEnd; ++D) {
+  for (auto *D : Instantiation->decls()) {
     bool SuppressNew = false;
-    if (FunctionDecl *Function = dyn_cast<FunctionDecl>(*D)) {
+    if (auto *Function = dyn_cast<FunctionDecl>(D)) {
       if (FunctionDecl *Pattern
             = Function->getInstantiatedFromMemberFunction()) {
         MemberSpecializationInfo *MSInfo 
@@ -2509,7 +2417,7 @@
                 std::make_pair(Function, PointOfInstantiation));
         }
       }
-    } else if (VarDecl *Var = dyn_cast<VarDecl>(*D)) {
+    } else if (auto *Var = dyn_cast<VarDecl>(D)) {
       if (isa<VarTemplateSpecializationDecl>(Var))
         continue;
 
@@ -2545,7 +2453,7 @@
           Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
         }
       }      
-    } else if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(*D)) {
+    } else if (auto *Record = dyn_cast<CXXRecordDecl>(D)) {
       // Always skip the injected-class-name, along with any
       // redeclarations of nested classes, since both would cause us
       // to try to instantiate the members of a class twice.
@@ -2602,7 +2510,7 @@
       if (Pattern)
         InstantiateClassMembers(PointOfInstantiation, Pattern, TemplateArgs, 
                                 TSK);
-    } else if (EnumDecl *Enum = dyn_cast<EnumDecl>(*D)) {
+    } else if (auto *Enum = dyn_cast<EnumDecl>(D)) {
       MemberSpecializationInfo *MSInfo = Enum->getMemberSpecializationInfo();
       assert(MSInfo && "No member specialization information?");
 
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 5c28e3b..316b574 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -12,6 +12,7 @@
 #include "clang/Sema/SemaInternal.h"
 #include "clang/AST/ASTConsumer.h"
 #include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/AST/DeclVisitor.h"
 #include "clang/AST/DependentDiagnostic.h"
@@ -129,14 +130,46 @@
   }
 }
 
+static void instantiateDependentEnableIfAttr(
+    Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+    const EnableIfAttr *A, const Decl *Tmpl, Decl *New) {
+  Expr *Cond = 0;
+  {
+    EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+    ExprResult Result = S.SubstExpr(A->getCond(), TemplateArgs);
+    if (Result.isInvalid())
+      return;
+    Cond = Result.takeAs<Expr>();
+  }
+  if (A->getCond()->isTypeDependent() && !Cond->isTypeDependent()) {
+    ExprResult Converted = S.PerformContextuallyConvertToBool(Cond);
+    if (Converted.isInvalid())
+      return;
+    Cond = Converted.take();
+  }
+
+  SmallVector<PartialDiagnosticAt, 8> Diags;
+  if (A->getCond()->isValueDependent() && !Cond->isValueDependent() &&
+      !Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(Tmpl),
+                                                Diags)) {
+    S.Diag(A->getLocation(), diag::err_enable_if_never_constant_expr);
+    for (int I = 0, N = Diags.size(); I != N; ++I)
+      S.Diag(Diags[I].first, Diags[I].second);
+    return;
+  }
+
+  EnableIfAttr *EIA = new (S.getASTContext())
+                        EnableIfAttr(A->getLocation(), S.getASTContext(), Cond,
+                                     A->getMessage(),
+                                     A->getSpellingListIndex());
+  New->addAttr(EIA);
+}
+
 void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
                             const Decl *Tmpl, Decl *New,
                             LateInstantiatedAttrVec *LateAttrs,
                             LocalInstantiationScope *OuterMostScope) {
-  for (AttrVec::const_iterator i = Tmpl->attr_begin(), e = Tmpl->attr_end();
-       i != e; ++i) {
-    const Attr *TmplAttr = *i;
-
+  for (const auto *TmplAttr : Tmpl->attrs()) {
     // FIXME: This should be generalized to more than just the AlignedAttr.
     const AlignedAttr *Aligned = dyn_cast<AlignedAttr>(TmplAttr);
     if (Aligned && Aligned->isAlignmentDependent()) {
@@ -144,6 +177,13 @@
       continue;
     }
 
+    const EnableIfAttr *EnableIf = dyn_cast<EnableIfAttr>(TmplAttr);
+    if (EnableIf && EnableIf->getCond()->isValueDependent()) {
+      instantiateDependentEnableIfAttr(*this, TemplateArgs, EnableIf, Tmpl,
+                                       New);
+      continue;
+    }
+
     assert(!TmplAttr->isPackExpansion());
     if (TmplAttr->isLateParsed() && LateAttrs) {
       // Late parsed attributes must be instantiated and attached after the
@@ -476,7 +516,7 @@
 
   if (DI->getType()->isVariablyModifiedType()) {
     SemaRef.Diag(D->getLocation(), diag::err_property_is_variably_modified)
-    << D->getName();
+      << D;
     Invalid = true;
   } else if (DI->getType()->isInstantiationDependentType())  {
     DI = SemaRef.SubstType(DI, TemplateArgs,
@@ -499,11 +539,9 @@
     SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType());
   }
 
-  MSPropertyDecl *Property = new (SemaRef.Context)
-      MSPropertyDecl(Owner, D->getLocation(),
-                     D->getDeclName(), DI->getType(), DI,
-                     D->getLocStart(),
-                     D->getGetterId(), D->getSetterId());
+  MSPropertyDecl *Property = MSPropertyDecl::Create(
+      SemaRef.Context, Owner, D->getLocation(), D->getDeclName(), DI->getType(),
+      DI, D->getLocStart(), D->getGetterId(), D->getSetterId());
 
   SemaRef.InstantiateAttrs(TemplateArgs, D, Property, LateAttrs,
                            StartingScope);
@@ -522,10 +560,8 @@
     new (SemaRef.Context)NamedDecl*[D->getChainingSize()];
 
   int i = 0;
-  for (IndirectFieldDecl::chain_iterator PI =
-       D->chain_begin(), PE = D->chain_end();
-       PI != PE; ++PI) {
-    NamedDecl *Next = SemaRef.FindInstantiatedDecl(D->getLocation(), *PI,
+  for (auto *PI : D->chain()) {
+    NamedDecl *Next = SemaRef.FindInstantiatedDecl(D->getLocation(), PI,
                                               TemplateArgs);
     if (!Next)
       return 0;
@@ -650,6 +686,8 @@
 
   Enum->setInstantiationOfMemberEnum(D, TSK_ImplicitInstantiation);
   Enum->setAccess(D->getAccess());
+  // Forward the mangling number from the template to the instantiated decl.
+  SemaRef.Context.setManglingNumber(Enum, SemaRef.Context.getManglingNumber(D));
   if (SubstQualifier(D, Enum)) return 0;
   Owner->addDecl(Enum);
 
@@ -693,9 +731,7 @@
   SmallVector<Decl*, 4> Enumerators;
 
   EnumConstantDecl *LastEnumConst = 0;
-  for (EnumDecl::enumerator_iterator EC = Pattern->enumerator_begin(),
-         ECEnd = Pattern->enumerator_end();
-       EC != ECEnd; ++EC) {
+  for (auto *EC : Pattern->enumerators()) {
     // The specified value for the enumerator.
     ExprResult Value = SemaRef.Owned((Expr *)0);
     if (Expr *UninstValue = EC->getInitExpr()) {
@@ -725,7 +761,7 @@
     }
 
     if (EnumConst) {
-      SemaRef.InstantiateAttrs(TemplateArgs, *EC, EnumConst);
+      SemaRef.InstantiateAttrs(TemplateArgs, EC, EnumConst);
 
       EnumConst->setAccess(Enum->getAccess());
       Enum->addDecl(EnumConst);
@@ -736,7 +772,7 @@
           !Enum->isScoped()) {
         // If the enumeration is within a function or method, record the enum
         // constant as a local.
-        SemaRef.CurrentInstantiationScope->InstantiatedLocal(*EC, EnumConst);
+        SemaRef.CurrentInstantiationScope->InstantiatedLocal(EC, EnumConst);
       }
     }
   }
@@ -991,8 +1027,9 @@
 
   VarTemplateDecl *Inst = VarTemplateDecl::Create(
       SemaRef.Context, DC, D->getLocation(), D->getIdentifier(), InstParams,
-      VarInst, PrevVarTemplate);
+      VarInst);
   VarInst->setDescribedVarTemplate(Inst);
+  Inst->setPreviousDecl(PrevVarTemplate);
 
   Inst->setAccess(D->getAccess());
   if (!PrevVarTemplate)
@@ -1135,19 +1172,20 @@
   if (D->isLocalClass())
     SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Record);
 
+  // Forward the mangling number from the template to the instantiated decl.
+  SemaRef.Context.setManglingNumber(Record,
+                                    SemaRef.Context.getManglingNumber(D));
+
   Owner->addDecl(Record);
 
   // DR1484 clarifies that the members of a local class are instantiated as part
   // of the instantiation of their enclosing entity.
   if (D->isCompleteDefinition() && D->isLocalClass()) {
-    if (SemaRef.InstantiateClass(D->getLocation(), Record, D, TemplateArgs,
-                                 TSK_ImplicitInstantiation,
-                                 /*Complain=*/true)) {
-      llvm_unreachable("InstantiateClass shouldn't fail here!");
-    } else {
-      SemaRef.InstantiateClassMembers(D->getLocation(), Record, TemplateArgs,
-                                      TSK_ImplicitInstantiation);
-    }
+    SemaRef.InstantiateClass(D->getLocation(), Record, D, TemplateArgs,
+                             TSK_ImplicitInstantiation,
+                             /*Complain=*/true);
+    SemaRef.InstantiateClassMembers(D->getLocation(), Record, TemplateArgs,
+                                    TSK_ImplicitInstantiation);
   }
   return Record;
 }
@@ -1170,15 +1208,14 @@
 
   FunctionProtoType::ExtProtoInfo NewEPI = NewFunc->getExtProtoInfo();
   NewEPI.ExtInfo = OrigFunc->getExtInfo();
-  return Context.getFunctionType(NewFunc->getResultType(),
-                                 NewFunc->getArgTypes(), NewEPI);
+  return Context.getFunctionType(NewFunc->getReturnType(),
+                                 NewFunc->getParamTypes(), NewEPI);
 }
 
 /// Normal class members are of more specific types and therefore
 /// don't make it here.  This function serves two purposes:
 ///   1) instantiating function templates
 ///   2) substituting friend declarations
-/// FIXME: preserve function definitions in case #2
 Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
                                        TemplateParameterList *TemplateParams) {
   // Check whether there is already a function template specialization for
@@ -1388,73 +1425,56 @@
     PrincipalDecl->setObjectOfFriendDecl();
     DC->makeDeclVisibleInContext(PrincipalDecl);
 
-    bool queuedInstantiation = false;
+    bool QueuedInstantiation = false;
 
-    // C++98 [temp.friend]p5: When a function is defined in a friend function
-    //   declaration in a class template, the function is defined at each
-    //   instantiation of the class template. The function is defined even if it
-    //   is never used.
-    // C++11 [temp.friend]p4: When a function is defined in a friend function
-    //   declaration in a class template, the function is instantiated when the
-    //   function is odr-used.
-    //
-    // If -Wc++98-compat is enabled, we go through the motions of checking for a
-    // redefinition, but don't instantiate the function.
-    if ((!SemaRef.getLangOpts().CPlusPlus11 ||
-         SemaRef.Diags.getDiagnosticLevel(
-             diag::warn_cxx98_compat_friend_redefinition,
-             Function->getLocation())
-           != DiagnosticsEngine::Ignored) &&
-        D->isThisDeclarationADefinition()) {
+    // C++11 [temp.friend]p4 (DR329):
+    //   When a function is defined in a friend function declaration in a class
+    //   template, the function is instantiated when the function is odr-used.
+    //   The same restrictions on multiple declarations and definitions that
+    //   apply to non-template function declarations and definitions also apply
+    //   to these implicit definitions.
+    if (D->isThisDeclarationADefinition()) {
       // Check for a function body.
       const FunctionDecl *Definition = 0;
       if (Function->isDefined(Definition) &&
           Definition->getTemplateSpecializationKind() == TSK_Undeclared) {
-        SemaRef.Diag(Function->getLocation(),
-                     SemaRef.getLangOpts().CPlusPlus11 ?
-                       diag::warn_cxx98_compat_friend_redefinition :
-                       diag::err_redefinition) << Function->getDeclName();
+        SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
+            << Function->getDeclName();
         SemaRef.Diag(Definition->getLocation(), diag::note_previous_definition);
-        if (!SemaRef.getLangOpts().CPlusPlus11)
-          Function->setInvalidDecl();
       }
       // Check for redefinitions due to other instantiations of this or
       // a similar friend function.
-      else for (FunctionDecl::redecl_iterator R = Function->redecls_begin(),
-                                           REnd = Function->redecls_end();
-                R != REnd; ++R) {
-        if (*R == Function)
+      else for (auto R : Function->redecls()) {
+        if (R == Function)
           continue;
-        switch (R->getFriendObjectKind()) {
-        case Decl::FOK_None:
-          if (!SemaRef.getLangOpts().CPlusPlus11 &&
-              !queuedInstantiation && R->isUsed(false)) {
-            if (MemberSpecializationInfo *MSInfo
-                = Function->getMemberSpecializationInfo()) {
-              if (MSInfo->getPointOfInstantiation().isInvalid()) {
-                SourceLocation Loc = R->getLocation(); // FIXME
-                MSInfo->setPointOfInstantiation(Loc);
-                SemaRef.PendingLocalImplicitInstantiations.push_back(
-                                                 std::make_pair(Function, Loc));
-                queuedInstantiation = true;
-              }
+
+        // If some prior declaration of this function has been used, we need
+        // to instantiate its definition.
+        if (!QueuedInstantiation && R->isUsed(false)) {
+          if (MemberSpecializationInfo *MSInfo =
+                  Function->getMemberSpecializationInfo()) {
+            if (MSInfo->getPointOfInstantiation().isInvalid()) {
+              SourceLocation Loc = R->getLocation(); // FIXME
+              MSInfo->setPointOfInstantiation(Loc);
+              SemaRef.PendingLocalImplicitInstantiations.push_back(
+                                               std::make_pair(Function, Loc));
+              QueuedInstantiation = true;
             }
           }
-          break;
-        default:
-          if (const FunctionDecl *RPattern
-              = R->getTemplateInstantiationPattern())
+        }
+
+        // If some prior declaration of this function was a friend with an
+        // uninstantiated definition, reject it.
+        if (R->getFriendObjectKind()) {
+          if (const FunctionDecl *RPattern =
+                  R->getTemplateInstantiationPattern()) {
             if (RPattern->isDefined(RPattern)) {
-              SemaRef.Diag(Function->getLocation(),
-                           SemaRef.getLangOpts().CPlusPlus11 ?
-                             diag::warn_cxx98_compat_friend_redefinition :
-                             diag::err_redefinition)
+              SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
                 << Function->getDeclName();
               SemaRef.Diag(R->getLocation(), diag::note_previous_definition);
-              if (!SemaRef.getLangOpts().CPlusPlus11)
-                Function->setInvalidDecl();
               break;
             }
+          }
         }
       }
     }
@@ -2149,7 +2169,7 @@
   }
 
   if (!NewUD->isInvalidDecl() &&
-      SemaRef.CheckUsingDeclQualifier(D->getUsingLoc(), SS,
+      SemaRef.CheckUsingDeclQualifier(D->getUsingLoc(), SS, NameInfo,
                                       D->getLocation()))
     NewUD->setInvalidDecl();
 
@@ -2170,9 +2190,7 @@
   bool isFunctionScope = Owner->isFunctionOrMethod();
 
   // Process the shadow decls.
-  for (UsingDecl::shadow_iterator I = D->shadow_begin(), E = D->shadow_end();
-         I != E; ++I) {
-    UsingShadowDecl *Shadow = *I;
+  for (auto *Shadow : D->shadows()) {
     NamedDecl *InstTarget =
         cast_or_null<NamedDecl>(SemaRef.FindInstantiatedDecl(
             Shadow->getLocation(), Shadow->getTargetDecl(), TemplateArgs));
@@ -2288,10 +2306,8 @@
 Decl *TemplateDeclInstantiator::VisitOMPThreadPrivateDecl(
                                      OMPThreadPrivateDecl *D) {
   SmallVector<Expr *, 5> Vars;
-  for (ArrayRef<Expr *>::iterator I = D->varlist_begin(),
-                                  E = D->varlist_end();
-       I != E; ++I) {
-    Expr *Var = SemaRef.SubstExpr(*I, TemplateArgs).take();
+  for (auto *I : D->varlists()) {
+    Expr *Var = SemaRef.SubstExpr(I, TemplateArgs).take();
     assert(isa<DeclRefExpr>(Var) && "threadprivate arg is not a DeclRefExpr");
     Vars.push_back(Var);
   }
@@ -2299,6 +2315,9 @@
   OMPThreadPrivateDecl *TD =
     SemaRef.CheckOMPThreadPrivateDecl(D->getLocation(), Vars);
 
+  TD->setAccess(AS_public);
+  Owner->addDecl(TD);
+
   return TD;
 }
 
@@ -2476,11 +2495,10 @@
 
   // Check that the template argument list is well-formed for this template.
   SmallVector<TemplateArgument, 4> Converted;
-  bool ExpansionIntoFixedList = false;
   if (SemaRef.CheckTemplateArgumentList(
           VarTemplate, VarTemplate->getLocStart(),
           const_cast<TemplateArgumentListInfo &>(VarTemplateArgsInfo), false,
-          Converted, &ExpansionIntoFixedList))
+          Converted))
     return 0;
 
   // Find the variable template specialization declaration that
@@ -2887,9 +2905,9 @@
       TypeLoc NewTL = NewTInfo->getTypeLoc().IgnoreParens();
       FunctionProtoTypeLoc NewProtoLoc = NewTL.castAs<FunctionProtoTypeLoc>();
       unsigned NewIdx = 0;
-      for (unsigned OldIdx = 0, NumOldParams = OldProtoLoc.getNumArgs();
+      for (unsigned OldIdx = 0, NumOldParams = OldProtoLoc.getNumParams();
            OldIdx != NumOldParams; ++OldIdx) {
-        ParmVarDecl *OldParam = OldProtoLoc.getArg(OldIdx);
+        ParmVarDecl *OldParam = OldProtoLoc.getParam(OldIdx);
         LocalInstantiationScope *Scope = SemaRef.CurrentInstantiationScope;
 
         Optional<unsigned> NumArgumentsInExpansion;
@@ -2900,14 +2918,14 @@
         if (!NumArgumentsInExpansion) {
           // Simple case: normal parameter, or a parameter pack that's
           // instantiated to a (still-dependent) parameter pack.
-          ParmVarDecl *NewParam = NewProtoLoc.getArg(NewIdx++);
+          ParmVarDecl *NewParam = NewProtoLoc.getParam(NewIdx++);
           Params.push_back(NewParam);
           Scope->InstantiatedLocal(OldParam, NewParam);
         } else {
           // Parameter pack expansion: make the instantiation an argument pack.
           Scope->MakeInstantiatedLocalArgPack(OldParam);
           for (unsigned I = 0; I != *NumArgumentsInExpansion; ++I) {
-            ParmVarDecl *NewParam = NewProtoLoc.getArg(NewIdx++);
+            ParmVarDecl *NewParam = NewProtoLoc.getParam(NewIdx++);
             Params.push_back(NewParam);
             Scope->InstantiatedLocalPackArg(OldParam, NewParam);
           }
@@ -2919,11 +2937,12 @@
       // the function parameters themselves.
       const FunctionProtoType *OldProto =
           cast<FunctionProtoType>(OldProtoLoc.getType());
-      for (unsigned i = 0, i_end = OldProtoLoc.getNumArgs(); i != i_end; ++i) {
-        ParmVarDecl *OldParam = OldProtoLoc.getArg(i);
+      for (unsigned i = 0, i_end = OldProtoLoc.getNumParams(); i != i_end;
+           ++i) {
+        ParmVarDecl *OldParam = OldProtoLoc.getParam(i);
         if (!OldParam) {
           Params.push_back(SemaRef.BuildParmVarDeclForTypedef(
-              D, D->getLocation(), OldProto->getArgType(i)));
+              D, D->getLocation(), OldProto->getParamType(i)));
           continue;
         }
 
@@ -2969,6 +2988,14 @@
       // Simple case: not a parameter pack.
       assert(FParamIdx < Function->getNumParams());
       ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+      // If the parameter's type is not dependent, update it to match the type
+      // in the pattern. They can differ in top-level cv-qualifiers, and we want
+      // the pattern's type here. If the type is dependent, they can't differ,
+      // per core issue 1668.
+      // FIXME: Updating the type to work around this is at best fragile.
+      if (!PatternDecl->getType()->isDependentType())
+        FunctionParam->setType(PatternParam->getType());
+
       FunctionParam->setDeclName(PatternParam->getDeclName());
       Scope.InstantiatedLocal(PatternParam, FunctionParam);
       ++FParamIdx;
@@ -2983,6 +3010,9 @@
            "should only be called when all template arguments are known");
     for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg) {
       ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+      if (!PatternDecl->getType()->isDependentType())
+        FunctionParam->setType(PatternParam->getType());
+
       FunctionParam->setDeclName(PatternParam->getDeclName());
       Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
       ++FParamIdx;
@@ -3102,19 +3132,13 @@
     }
   }
 
-  // Rebuild the function type
-  const FunctionProtoType *NewProto
-    = New->getType()->getAs<FunctionProtoType>();
-  assert(NewProto && "Template instantiation without function prototype?");
-
-  FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
+  FunctionProtoType::ExtProtoInfo EPI;
   EPI.ExceptionSpecType = Proto->getExceptionSpecType();
   EPI.NumExceptions = Exceptions.size();
   EPI.Exceptions = Exceptions.data();
   EPI.NoexceptExpr = NoexceptExpr;
 
-  New->setType(SemaRef.Context.getFunctionType(NewProto->getResultType(),
-                                               NewProto->getArgTypes(), EPI));
+  SemaRef.UpdateExceptionSpec(New, EPI);
 }
 
 void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
@@ -3128,10 +3152,9 @@
   if (Inst.isInvalid()) {
     // We hit the instantiation depth limit. Clear the exception specification
     // so that our callers don't have to cope with EST_Uninstantiated.
-    FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+    FunctionProtoType::ExtProtoInfo EPI;
     EPI.ExceptionSpecType = EST_None;
-    Decl->setType(Context.getFunctionType(Proto->getResultType(),
-                                          Proto->getArgTypes(), EPI));
+    UpdateExceptionSpec(Decl, EPI);
     return;
   }
 
@@ -3161,6 +3184,10 @@
   if (Tmpl->isDeleted())
     New->setDeletedAsWritten();
 
+  // Forward the mangling number from the template to the instantiated decl.
+  SemaRef.Context.setManglingNumber(New,
+                                    SemaRef.Context.getManglingNumber(Tmpl));
+
   // If we are performing substituting explicitly-specified template arguments
   // or deduced template arguments into a function template and we reach this
   // point, we are now past the point where SFINAE applies and have committed
@@ -3211,7 +3238,7 @@
       EPI.ExceptionSpecDecl = New;
       EPI.ExceptionSpecTemplate = ExceptionSpecTemplate;
       New->setType(SemaRef.Context.getFunctionType(
-          NewProto->getResultType(), NewProto->getArgTypes(), EPI));
+          NewProto->getReturnType(), NewProto->getParamTypes(), EPI));
     } else {
       ::InstantiateExceptionSpec(SemaRef, New, Proto, TemplateArgs);
     }
@@ -3339,10 +3366,10 @@
   //   initializer or return value, and class template specializations, other
   //   explicit instantiation declarations have the effect of suppressing the
   //   implicit instantiation of the entity to which they refer.
-  if (Function->getTemplateSpecializationKind()
-        == TSK_ExplicitInstantiationDeclaration &&
+  if (Function->getTemplateSpecializationKind() ==
+          TSK_ExplicitInstantiationDeclaration &&
       !PatternDecl->isInlined() &&
-      !PatternDecl->getResultType()->getContainedAutoType())
+      !PatternDecl->getReturnType()->getContainedAutoType())
     return;
 
   if (PatternDecl->isInlined())
@@ -3413,6 +3440,9 @@
 
     PerformDependentDiagnostics(PatternDecl, TemplateArgs);
 
+    if (auto *Listener = getASTMutationListener())
+      Listener->FunctionDefinitionInstantiated(Function);
+
     savedContext.pop();
   }
 
@@ -3467,7 +3497,18 @@
   // or may not be the declaration in the class; if it's in the class, we want
   // to instantiate a member in the class (a declaration), and if it's outside,
   // we want to instantiate a definition.
-  FromVar = FromVar->getFirstDecl();
+  //
+  // If we're instantiating an explicitly-specialized member template or member
+  // partial specialization, don't do this. The member specialization completely
+  // replaces the original declaration in this case.
+  bool IsMemberSpec = false;
+  if (VarTemplatePartialSpecializationDecl *PartialSpec =
+          dyn_cast<VarTemplatePartialSpecializationDecl>(FromVar))
+    IsMemberSpec = PartialSpec->isMemberSpecialization();
+  else if (VarTemplateDecl *FromTemplate = FromVar->getDescribedVarTemplate())
+    IsMemberSpec = FromTemplate->isMemberSpecialization();
+  if (!IsMemberSpec)
+    FromVar = FromVar->getFirstDecl();
 
   MultiLevelTemplateArgumentList MultiLevelList(TemplateArgList);
   TemplateDeclInstantiator Instantiator(*this, FromVar->getDeclContext(),
@@ -3552,16 +3593,15 @@
 
   InstantiateAttrs(TemplateArgs, OldVar, NewVar, LateAttrs, StartingScope);
 
-  if (NewVar->hasAttrs())
-    CheckAlignasUnderalignment(NewVar);
-
   LookupResult Previous(
       *this, NewVar->getDeclName(), NewVar->getLocation(),
       NewVar->isLocalExternDecl() ? Sema::LookupRedeclarationWithLinkage
                                   : Sema::LookupOrdinaryName,
       Sema::ForRedeclaration);
 
-  if (NewVar->isLocalExternDecl() && OldVar->getPreviousDecl()) {
+  if (NewVar->isLocalExternDecl() && OldVar->getPreviousDecl() &&
+      (!OldVar->getPreviousDecl()->getDeclContext()->isDependentContext() ||
+       OldVar->getPreviousDecl()->getDeclContext()==OldVar->getDeclContext())) {
     // We have a previous declaration. Use that one, so we merge with the
     // right type.
     if (NamedDecl *NewPrev = FindInstantiatedDecl(
@@ -3589,9 +3629,16 @@
     NewVar->setInstantiationOfStaticDataMember(OldVar,
                                                TSK_ImplicitInstantiation);
 
+  // Forward the mangling number from the template to the instantiated decl.
+  Context.setManglingNumber(NewVar, Context.getManglingNumber(OldVar));
+  Context.setStaticLocalNumber(NewVar, Context.getStaticLocalNumber(OldVar));
+
   // Delay instantiation of the initializer for variable templates until a
-  // definition of the variable is needed.
-  if (!isa<VarTemplateSpecializationDecl>(NewVar) && !InstantiatingVarTemplate)
+  // definition of the variable is needed. We need it right away if the type
+  // contains 'auto'.
+  if ((!isa<VarTemplateSpecializationDecl>(NewVar) &&
+       !InstantiatingVarTemplate) ||
+      NewVar->getType()->isUndeducedType())
     InstantiateVariableInitializer(NewVar, OldVar, TemplateArgs);
 
   // Diagnose unused local variables with dependent types, where the diagnostic
@@ -3951,11 +3998,7 @@
   bool AnyErrors = Tmpl->isInvalidDecl();
 
   // Instantiate all the initializers.
-  for (CXXConstructorDecl::init_const_iterator Inits = Tmpl->init_begin(),
-                                            InitsEnd = Tmpl->init_end();
-       Inits != InitsEnd; ++Inits) {
-    CXXCtorInitializer *Init = *Inits;
-
+  for (const auto *Init : Tmpl->inits()) {
     // Only instantiate written initializers, let Sema re-construct implicit
     // ones.
     if (!Init->isWritten())
@@ -4339,7 +4382,7 @@
   // find an instantiated decl for (T y) when the ParentDC for y is
   // the translation unit.  
   //   e.g. template <class T> void Foo(auto (*p)(T y) -> decltype(y())) {} 
-  //   float baz(float(*)()) { return 0.0; }

+  //   float baz(float(*)()) { return 0.0; }
   //   Foo(baz);
   // The better fix here is perhaps to ensure that a ParmVarDecl, by the time
   // it gets here, always has a FunctionOrMethod as its ParentDC??
@@ -4550,14 +4593,6 @@
 /// \brief Performs template instantiation for all implicit template
 /// instantiations we have seen until this point.
 void Sema::PerformPendingInstantiations(bool LocalOnly) {
-  // Load pending instantiations from the external source.
-  if (!LocalOnly && ExternalSource) {
-    SmallVector<PendingImplicitInstantiation, 4> Pending;
-    ExternalSource->ReadPendingInstantiations(Pending);
-    PendingInstantiations.insert(PendingInstantiations.begin(),
-                                 Pending.begin(), Pending.end());
-  }
-
   while (!PendingLocalImplicitInstantiations.empty() ||
          (!LocalOnly && !PendingInstantiations.empty())) {
     PendingImplicitInstantiation Inst;
@@ -4624,10 +4659,7 @@
 
 void Sema::PerformDependentDiagnostics(const DeclContext *Pattern,
                        const MultiLevelTemplateArgumentList &TemplateArgs) {
-  for (DeclContext::ddiag_iterator I = Pattern->ddiag_begin(),
-         E = Pattern->ddiag_end(); I != E; ++I) {
-    DependentDiagnostic *DD = *I;
-
+  for (auto DD : Pattern->ddiags()) {
     switch (DD->getKind()) {
     case DependentDiagnostic::Access:
       HandleDependentAccessCheck(*DD, TemplateArgs);
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index 78aa7f8..de850e3 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -10,6 +10,7 @@
 //===----------------------------------------------------------------------===/
 
 #include "clang/Sema/Sema.h"
+#include "TypeLocBuilder.h"
 #include "clang/AST/Expr.h"
 #include "clang/AST/RecursiveASTVisitor.h"
 #include "clang/AST/TypeLoc.h"
@@ -18,7 +19,6 @@
 #include "clang/Sema/ScopeInfo.h"
 #include "clang/Sema/SemaInternal.h"
 #include "clang/Sema/Template.h"
-#include "TypeLocBuilder.h"
 
 using namespace clang;
 
@@ -554,8 +554,8 @@
       if (isa<ParmVarDecl>(ND))
         IsFunctionParameterPack = true;
       else
-        llvm::tie(Depth, Index) = getDepthAndIndex(ND);        
-      
+        std::tie(Depth, Index) = getDepthAndIndex(ND);
+
       Name = ND->getIdentifier();
     }
     
@@ -599,7 +599,7 @@
       if (NamedDecl *PartialPack 
                     = CurrentInstantiationScope->getPartiallySubstitutedPack()){
         unsigned PartialDepth, PartialIndex;
-        llvm::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack);
+        std::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack);
         if (PartialDepth == Depth && PartialIndex == Index)
           RetainExpansion = true;
       }
@@ -669,8 +669,8 @@
         Result = Size;
         continue;
       }
-      
-      llvm::tie(Depth, Index) = getDepthAndIndex(ND);        
+
+      std::tie(Depth, Index) = getDepthAndIndex(ND);
     }
     if (Depth >= TemplateArgs.getNumLevels() ||
         !TemplateArgs.hasTemplateArgument(Depth, Index))
@@ -730,14 +730,6 @@
   case TST_auto:
   case TST_decltype_auto:
   case TST_unknown_anytype:
-  case TST_image1d_t:
-  case TST_image1d_array_t:
-  case TST_image1d_buffer_t:
-  case TST_image2d_t:
-  case TST_image2d_array_t:
-  case TST_image3d_t:
-  case TST_sampler_t:
-  case TST_event_t:
   case TST_error:
     break;
   }
@@ -775,7 +767,7 @@
 // Callback to only accept typo corrections that refer to parameter packs.
 class ParameterPackValidatorCCC : public CorrectionCandidateCallback {
  public:
-  virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+  bool ValidateCandidate(const TypoCorrection &candidate) override {
     NamedDecl *ND = candidate.getCorrectionDecl();
     return ND && ND->isParameterPack();
   }
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index aa7459d..e273cb6 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -12,6 +12,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Sema/SemaInternal.h"
+#include "TypeLocBuilder.h"
 #include "clang/AST/ASTConsumer.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/ASTMutationListener.h"
@@ -21,7 +22,6 @@
 #include "clang/AST/Expr.h"
 #include "clang/AST/TypeLoc.h"
 #include "clang/AST/TypeLocVisitor.h"
-#include "clang/Basic/OpenCL.h"
 #include "clang/Basic/PartialDiagnostic.h"
 #include "clang/Basic/TargetInfo.h"
 #include "clang/Lex/Preprocessor.h"
@@ -34,7 +34,6 @@
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "TypeLocBuilder.h"
 
 using namespace clang;
 
@@ -728,13 +727,15 @@
       Result = Context.WCharTy;
     else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) {
       S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
-        << DS.getSpecifierName(DS.getTypeSpecType());
+        << DS.getSpecifierName(DS.getTypeSpecType(),
+                               Context.getPrintingPolicy());
       Result = Context.getSignedWCharType();
     } else {
       assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
         "Unknown TSS value");
       S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
-        << DS.getSpecifierName(DS.getTypeSpecType());
+        << DS.getSpecifierName(DS.getTypeSpecType(),
+                               Context.getPrintingPolicy());
       Result = Context.getUnsignedWCharType();
     }
     break;
@@ -1058,38 +1059,6 @@
     }
     break;
 
-  case DeclSpec::TST_image1d_t:
-    Result = Context.OCLImage1dTy;
-    break;
-
-  case DeclSpec::TST_image1d_array_t:
-    Result = Context.OCLImage1dArrayTy;
-    break;
-
-  case DeclSpec::TST_image1d_buffer_t:
-    Result = Context.OCLImage1dBufferTy;
-    break;
-
-  case DeclSpec::TST_image2d_t:
-    Result = Context.OCLImage2dTy;
-    break;
-
-  case DeclSpec::TST_image2d_array_t:
-    Result = Context.OCLImage2dArrayTy;
-    break;
-
-  case DeclSpec::TST_image3d_t:
-    Result = Context.OCLImage3dTy;
-    break;
-
-  case DeclSpec::TST_sampler_t:
-    Result = Context.OCLSamplerTy;
-    break;
-
-  case DeclSpec::TST_event_t:
-    Result = Context.OCLEventTy;
-    break;
-
   case DeclSpec::TST_error:
     Result = Context.IntTy;
     declarator.setInvalidType(true);
@@ -1148,17 +1117,33 @@
       }
     }
 
-    // C++ [dcl.ref]p1:
+    // C++11 [dcl.ref]p1:
     //   Cv-qualified references are ill-formed except when the
-    //   cv-qualifiers are introduced through the use of a typedef
-    //   (7.1.3) or of a template type argument (14.3), in which
-    //   case the cv-qualifiers are ignored.
-    // FIXME: Shouldn't we be checking SCS_typedef here?
+    //   cv-qualifiers are introduced through the use of a typedef-name
+    //   or decltype-specifier, in which case the cv-qualifiers are ignored.
+    //
+    // There don't appear to be any other contexts in which a cv-qualified
+    // reference type could be formed, so the 'ill-formed' clause here appears
+    // to never happen.
     if (DS.getTypeSpecType() == DeclSpec::TST_typename &&
         TypeQuals && Result->isReferenceType()) {
-      TypeQuals &= ~DeclSpec::TQ_const;
-      TypeQuals &= ~DeclSpec::TQ_volatile;
-      TypeQuals &= ~DeclSpec::TQ_atomic;
+      // If this occurs outside a template instantiation, warn the user about
+      // it; they probably didn't mean to specify a redundant qualifier.
+      typedef std::pair<DeclSpec::TQ, SourceLocation> QualLoc;
+      QualLoc Quals[] = {
+        QualLoc(DeclSpec::TQ_const, DS.getConstSpecLoc()),
+        QualLoc(DeclSpec::TQ_volatile, DS.getVolatileSpecLoc()),
+        QualLoc(DeclSpec::TQ_atomic, DS.getAtomicSpecLoc())
+      };
+      for (unsigned I = 0, N = llvm::array_lengthof(Quals); I != N; ++I) {
+        if (S.ActiveTemplateInstantiations.empty()) {
+          if (TypeQuals & Quals[I].first)
+            S.Diag(Quals[I].second, diag::warn_typecheck_reference_qualifiers)
+              << DeclSpec::getSpecifierName(Quals[I].first) << Result
+              << FixItHint::CreateRemoval(Quals[I].second);
+        }
+        TypeQuals &= ~Quals[I].first;
+      }
     }
 
     // C90 6.5.3 constraints: "The same type qualifier shall not appear more
@@ -1427,10 +1412,10 @@
   public:
     VLADiagnoser() : Sema::VerifyICEDiagnoser(true) {}
 
-    virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+    void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
     }
 
-    virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) {
+    void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) override {
       S.Diag(Loc, diag::ext_vla_folded_to_constant) << SR;
     }
   } Diagnoser;
@@ -1487,6 +1472,13 @@
                                diag::err_array_of_abstract_type))
       return QualType();
 
+    // Mentioning a member pointer type for an array type causes us to lock in
+    // an inheritance model, even if it's inside an unused typedef.
+    if (Context.getTargetInfo().getCXXABI().isMicrosoft())
+      if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>())
+        if (!MPTy->getClass()->isDependentType())
+          RequireCompleteType(Loc, T, 0);
+
   } else {
     // C99 6.7.5.2p1: If the element type is an incomplete or function type,
     // reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
@@ -1614,6 +1606,7 @@
       // Prohibit the use of non-POD types in VLAs.
       QualType BaseT = Context.getBaseElementType(T);
       if (!T->isDependentType() &&
+          !RequireCompleteType(Loc, BaseT, 0) &&
           !BaseT.isPODType(Context) &&
           !BaseT->isObjCLifetimeType()) {
         Diag(Loc, diag::err_vla_non_pod)
@@ -1769,13 +1762,13 @@
   //   with reference type, or "cv void."
   if (T->isReferenceType()) {
     Diag(Loc, diag::err_illegal_decl_mempointer_to_reference)
-      << (Entity? Entity.getAsString() : "type name") << T;
+      << getPrintableNameForEntity(Entity) << T;
     return QualType();
   }
 
   if (T->isVoidType()) {
     Diag(Loc, diag::err_illegal_decl_mempointer_to_void)
-      << (Entity? Entity.getAsString() : "type name");
+      << getPrintableNameForEntity(Entity);
     return QualType();
   }
 
@@ -1784,35 +1777,10 @@
     return QualType();
   }
 
-  // C++ allows the class type in a member pointer to be an incomplete type.
-  // In the Microsoft ABI, the size of the member pointer can vary
-  // according to the class type, which means that we really need a
-  // complete type if possible, which means we need to instantiate templates.
-  //
-  // If template instantiation fails or the type is just incomplete, we have to
-  // add an extra slot to the member pointer.  Yes, this does cause problems
-  // when passing pointers between TUs that disagree about the size.
-  if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
-    CXXRecordDecl *RD = Class->getAsCXXRecordDecl();
-    if (RD && !RD->hasAttr<MSInheritanceAttr>()) {
-      // Lock in the inheritance model on the first use of a member pointer.
-      // Otherwise we may disagree about the size at different points in the TU.
-      // FIXME: MSVC picks a model on the first use that needs to know the size,
-      // rather than on the first mention of the type, e.g. typedefs.
-      if (RequireCompleteType(Loc, Class, 0) && !RD->isBeingDefined()) {
-        // We know it doesn't have an attribute and it's incomplete, so use the
-        // unspecified inheritance model.  If we're in the record body, we can
-        // figure out the inheritance model.
-        for (CXXRecordDecl::redecl_iterator I = RD->redecls_begin(),
-             E = RD->redecls_end(); I != E; ++I) {
-          I->addAttr(::new (Context) UnspecifiedInheritanceAttr(
-              RD->getSourceRange(), Context));
-        }
-      }
-    }
-  }
-
-  // FIXME: Adjust member function pointer calling conventions.
+  // Adjust the default free function calling convention to the default method
+  // calling convention.
+  if (T->isFunctionType())
+    adjustMemberFunctionCC(T, /*IsStatic=*/false);
 
   return Context.getMemberPointerType(T, Class.getTypePtr());
 }
@@ -2375,11 +2343,11 @@
     return;
 
   // An initializer for a non-class type can have at most one argument.
-  if (!RT->isRecordType() && FTI.NumArgs > 1)
+  if (!RT->isRecordType() && FTI.NumParams > 1)
     return;
 
   // An initializer for a reference must have exactly one argument.
-  if (RT->isReferenceType() && FTI.NumArgs != 1)
+  if (RT->isReferenceType() && FTI.NumParams != 1)
     return;
 
   // Only warn if this declarator is declaring a function at block scope, and
@@ -2399,9 +2367,9 @@
   SourceRange ParenRange(DeclType.Loc, DeclType.EndLoc);
 
   S.Diag(DeclType.Loc,
-         FTI.NumArgs ? diag::warn_parens_disambiguated_as_function_declaration
-                     : diag::warn_empty_parens_are_function_decl)
-    << ParenRange;
+         FTI.NumParams ? diag::warn_parens_disambiguated_as_function_declaration
+                       : diag::warn_empty_parens_are_function_decl)
+      << ParenRange;
 
   // If the declaration looks like:
   //   T var1,
@@ -2422,11 +2390,11 @@
     }
   }
 
-  if (FTI.NumArgs > 0) {
+  if (FTI.NumParams > 0) {
     // For a declaration with parameters, eg. "T var(T());", suggest adding parens
     // around the first parameter to turn the declaration into a variable
     // declaration.
-    SourceRange Range = FTI.ArgInfo[0].Param->getSourceRange();
+    SourceRange Range = FTI.Params[0].Param->getSourceRange();
     SourceLocation B = Range.getBegin();
     SourceLocation E = S.PP.getLocForEndOfToken(Range.getEnd());
     // FIXME: Maybe we should suggest adding braces instead of parens
@@ -2625,7 +2593,6 @@
       }
       T = S.BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name);
 
-      Qualifiers Quals;
       if (DeclType.Ref.HasRestrict)
         T = S.BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict);
       break;
@@ -2727,11 +2694,13 @@
       if (!D.isInvalidType()) {
         // trailing-return-type is only required if we're declaring a function,
         // and not, for instance, a pointer to a function.
-        if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
+        if (D.getDeclSpec().containsPlaceholderType() &&
             !FTI.hasTrailingReturnType() && chunkIndex == 0 &&
             !S.getLangOpts().CPlusPlus1y) {
           S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
-               diag::err_auto_missing_trailing_return);
+                 D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto
+                     ? diag::err_auto_missing_trailing_return
+                     : diag::err_deduced_return_type);
           T = Context.IntTy;
           D.setInvalidType(true);
         } else if (FTI.hasTrailingReturnType()) {
@@ -2882,14 +2851,14 @@
 
       FunctionType::ExtInfo EI(getCCForDeclaratorChunk(S, D, FTI, chunkIndex));
 
-      if (!FTI.NumArgs && !FTI.isVariadic && !LangOpts.CPlusPlus) {
+      if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus) {
         // Simple void foo(), where the incoming T is the result type.
         T = Context.getFunctionNoProtoType(T, EI);
       } else {
         // We allow a zero-parameter variadic function in C if the
         // function is marked with the "overloadable" attribute. Scan
         // for this attribute now.
-        if (!FTI.NumArgs && FTI.isVariadic && !LangOpts.CPlusPlus) {
+        if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus) {
           bool Overloadable = false;
           for (const AttributeList *Attrs = D.getAttributes();
                Attrs; Attrs = Attrs->getNext()) {
@@ -2903,10 +2872,11 @@
             S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_arg);
         }
 
-        if (FTI.NumArgs && FTI.ArgInfo[0].Param == 0) {
+        if (FTI.NumParams && FTI.Params[0].Param == 0) {
           // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function
           // definition.
-          S.Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration);
+          S.Diag(FTI.Params[0].IdentLoc,
+                 diag::err_ident_list_in_fn_declaration);
           D.setInvalidType(true);
           // Recover by creating a K&R-style function type.
           T = Context.getFunctionNoProtoType(T, EI);
@@ -2925,14 +2895,14 @@
         // Otherwise, we have a function with an argument list that is
         // potentially variadic.
         SmallVector<QualType, 16> ArgTys;
-        ArgTys.reserve(FTI.NumArgs);
+        ArgTys.reserve(FTI.NumParams);
 
-        SmallVector<bool, 16> ConsumedArguments;
-        ConsumedArguments.reserve(FTI.NumArgs);
-        bool HasAnyConsumedArguments = false;
+        SmallVector<bool, 16> ConsumedParameters;
+        ConsumedParameters.reserve(FTI.NumParams);
+        bool HasAnyConsumedParameters = false;
 
-        for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
-          ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
+        for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) {
+          ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
           QualType ArgTy = Param->getType();
           assert(!ArgTy.isNull() && "Couldn't parse type?");
 
@@ -2943,14 +2913,13 @@
             // If this is something like 'float(int, void)', reject it.  'void'
             // is an incomplete type (C99 6.2.5p19) and function decls cannot
             // have arguments of incomplete type.
-            if (FTI.NumArgs != 1 || FTI.isVariadic) {
+            if (FTI.NumParams != 1 || FTI.isVariadic) {
               S.Diag(DeclType.Loc, diag::err_void_only_param);
               ArgTy = Context.IntTy;
               Param->setType(ArgTy);
-            } else if (FTI.ArgInfo[i].Ident) {
+            } else if (FTI.Params[i].Ident) {
               // Reject, but continue to parse 'int(void abc)'.
-              S.Diag(FTI.ArgInfo[i].IdentLoc,
-                   diag::err_param_with_void_type);
+              S.Diag(FTI.Params[i].IdentLoc, diag::err_param_with_void_type);
               ArgTy = Context.IntTy;
               Param->setType(ArgTy);
             } else {
@@ -2990,15 +2959,15 @@
 
           if (LangOpts.ObjCAutoRefCount) {
             bool Consumed = Param->hasAttr<NSConsumedAttr>();
-            ConsumedArguments.push_back(Consumed);
-            HasAnyConsumedArguments |= Consumed;
+            ConsumedParameters.push_back(Consumed);
+            HasAnyConsumedParameters |= Consumed;
           }
 
           ArgTys.push_back(ArgTy);
         }
 
-        if (HasAnyConsumedArguments)
-          EPI.ConsumedArguments = ConsumedArguments.data();
+        if (HasAnyConsumedParameters)
+          EPI.ConsumedParameters = ConsumedParameters.data();
 
         SmallVector<QualType, 4> Exceptions;
         SmallVector<ParsedType, 2> DynamicExceptions;
@@ -3040,8 +3009,7 @@
         D.setInvalidType(true);
       } else if (S.isDependentScopeSpecifier(SS) ||
                  dyn_cast_or_null<CXXRecordDecl>(S.computeDeclContext(SS))) {
-        NestedNameSpecifier *NNS
-          = static_cast<NestedNameSpecifier*>(SS.getScopeRep());
+        NestedNameSpecifier *NNS = SS.getScopeRep();
         NestedNameSpecifier *NNSPrefix = NNS->getPrefix();
         switch (NNS->getKind()) {
         case NestedNameSpecifier::Identifier:
@@ -3165,7 +3133,7 @@
       EPI.TypeQuals = 0;
       EPI.RefQualifier = RQ_None;
 
-      T = Context.getFunctionType(FnTy->getResultType(), FnTy->getArgTypes(),
+      T = Context.getFunctionType(FnTy->getReturnType(), FnTy->getParamTypes(),
                                   EPI);
       // Rebuild any parens around the identifier in the function type.
       for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
@@ -3476,10 +3444,17 @@
   }
 
   TL.setAttrNameLoc(attrs->getLoc());
-  if (TL.hasAttrExprOperand() && attrs->isArgExpr(0))
+  if (TL.hasAttrExprOperand()) {
+    assert(attrs->isArgExpr(0) && "mismatched attribute operand kind");
     TL.setAttrExprOperand(attrs->getArgAsExpr(0));
-  else if (TL.hasAttrEnumOperand() && attrs->isArgIdent(0))
-    TL.setAttrEnumOperandLoc(attrs->getArgAsIdent(0)->Loc);
+  } else if (TL.hasAttrEnumOperand()) {
+    assert((attrs->isArgIdent(0) || attrs->isArgExpr(0)) &&
+           "unexpected attribute operand kind");
+    if (attrs->isArgIdent(0))
+      TL.setAttrEnumOperandLoc(attrs->getArgAsIdent(0)->Loc);
+    else
+      TL.setAttrEnumOperandLoc(attrs->getArgAsExpr(0)->getExprLoc());
+  }
 
   // FIXME: preserve this information to here.
   if (TL.hasAttrOperand())
@@ -3682,6 +3657,9 @@
     void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
       fillAttributedTypeLoc(TL, Chunk.getAttrs());
     }
+    void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
+      // nothing
+    }
     void VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
       assert(Chunk.Kind == DeclaratorChunk::BlockPointer);
       TL.setCaretLoc(Chunk.Loc);
@@ -3763,9 +3741,9 @@
       const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun;
       TL.setLParenLoc(FTI.getLParenLoc());
       TL.setRParenLoc(FTI.getRParenLoc());
-      for (unsigned i = 0, e = TL.getNumArgs(), tpi = 0; i != e; ++i) {
-        ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
-        TL.setArg(tpi++, Param);
+      for (unsigned i = 0, e = TL.getNumParams(), tpi = 0; i != e; ++i) {
+        ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
+        TL.setParam(tpi++, Param);
       }
       // FIXME: exception specs
     }
@@ -3837,6 +3815,10 @@
       CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
     }
 
+    // FIXME: Ordering here?
+    while (AdjustedTypeLoc TL = CurrTL.getAs<AdjustedTypeLoc>())
+      CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
+
     DeclaratorLocFiller(Context, D.getTypeObject(i)).Visit(CurrTL);
     CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
   }
@@ -3936,44 +3918,60 @@
     return;
   }
 
-  // Check the attribute arguments.
-  if (Attr.getNumArgs() != 1) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-      << Attr.getName() << 1;
-    Attr.setInvalid();
-    return;
-  }
-  Expr *ASArgExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
-  llvm::APSInt addrSpace(32);
-  if (ASArgExpr->isTypeDependent() || ASArgExpr->isValueDependent() ||
-      !ASArgExpr->isIntegerConstantExpr(addrSpace, S.Context)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
-      << Attr.getName() << AANT_ArgumentIntegerConstant
-      << ASArgExpr->getSourceRange();
-    Attr.setInvalid();
-    return;
-  }
-
-  // Bounds checking.
-  if (addrSpace.isSigned()) {
-    if (addrSpace.isNegative()) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_address_space_negative)
+  unsigned ASIdx;
+  if (Attr.getKind() == AttributeList::AT_AddressSpace) {
+    // Check the attribute arguments.
+    if (Attr.getNumArgs() != 1) {
+      S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+        << Attr.getName() << 1;
+      Attr.setInvalid();
+      return;
+    }
+    Expr *ASArgExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+    llvm::APSInt addrSpace(32);
+    if (ASArgExpr->isTypeDependent() || ASArgExpr->isValueDependent() ||
+        !ASArgExpr->isIntegerConstantExpr(addrSpace, S.Context)) {
+      S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+        << Attr.getName() << AANT_ArgumentIntegerConstant
         << ASArgExpr->getSourceRange();
       Attr.setInvalid();
       return;
     }
-    addrSpace.setIsSigned(false);
-  }
-  llvm::APSInt max(addrSpace.getBitWidth());
-  max = Qualifiers::MaxAddressSpace;
-  if (addrSpace > max) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_address_space_too_high)
-      << int(Qualifiers::MaxAddressSpace) << ASArgExpr->getSourceRange();
-    Attr.setInvalid();
-    return;
-  }
 
-  unsigned ASIdx = static_cast<unsigned>(addrSpace.getZExtValue());
+    // Bounds checking.
+    if (addrSpace.isSigned()) {
+      if (addrSpace.isNegative()) {
+        S.Diag(Attr.getLoc(), diag::err_attribute_address_space_negative)
+          << ASArgExpr->getSourceRange();
+        Attr.setInvalid();
+        return;
+      }
+      addrSpace.setIsSigned(false);
+    }
+    llvm::APSInt max(addrSpace.getBitWidth());
+    max = Qualifiers::MaxAddressSpace;
+    if (addrSpace > max) {
+      S.Diag(Attr.getLoc(), diag::err_attribute_address_space_too_high)
+        << int(Qualifiers::MaxAddressSpace) << ASArgExpr->getSourceRange();
+      Attr.setInvalid();
+      return;
+    }
+    ASIdx = static_cast<unsigned>(addrSpace.getZExtValue());
+  } else {
+    // The keyword-based type attributes imply which address space to use.
+    switch (Attr.getKind()) {
+    case AttributeList::AT_OpenCLGlobalAddressSpace:
+      ASIdx = LangAS::opencl_global; break;
+    case AttributeList::AT_OpenCLLocalAddressSpace:
+      ASIdx = LangAS::opencl_local; break;
+    case AttributeList::AT_OpenCLConstantAddressSpace:
+      ASIdx = LangAS::opencl_constant; break;
+    default:
+      assert(Attr.getKind() == AttributeList::AT_OpenCLPrivateAddressSpace);
+      ASIdx = 0; break;
+    }
+  }
+  
   Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
 }
 
@@ -4586,73 +4584,38 @@
   return true;
 }
 
+bool Sema::hasExplicitCallingConv(QualType &T) {
+  QualType R = T.IgnoreParens();
+  while (const AttributedType *AT = dyn_cast<AttributedType>(R)) {
+    if (AT->isCallingConv())
+      return true;
+    R = AT->getModifiedType().IgnoreParens();
+  }
+  return false;
+}
+
 void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic) {
-  const FunctionType *FT = T->castAs<FunctionType>();
+  FunctionTypeUnwrapper Unwrapped(*this, T);
+  const FunctionType *FT = Unwrapped.get();
   bool IsVariadic = (isa<FunctionProtoType>(FT) &&
                      cast<FunctionProtoType>(FT)->isVariadic());
-  CallingConv CC = FT->getCallConv();
 
   // Only adjust types with the default convention.  For example, on Windows we
   // should adjust a __cdecl type to __thiscall for instance methods, and a
   // __thiscall type to __cdecl for static methods.
-  CallingConv DefaultCC =
+  CallingConv CurCC = FT->getCallConv();
+  CallingConv FromCC =
       Context.getDefaultCallingConvention(IsVariadic, IsStatic);
-  if (CC != DefaultCC)
+  CallingConv ToCC = Context.getDefaultCallingConvention(IsVariadic, !IsStatic);
+  if (CurCC != FromCC || FromCC == ToCC)
     return;
 
-  // Check if there was an explicit attribute, but only look through parens.
-  // The intent is to look for an attribute on the current declarator, but not
-  // one that came from a typedef.
-  QualType R = T.IgnoreParens();
-  while (const AttributedType *AT = dyn_cast<AttributedType>(R)) {
-    if (AT->isCallingConv())
-      return;
-    R = AT->getModifiedType().IgnoreParens();
-  }
-
-  // FIXME: This loses sugar.  This should probably be fixed with an implicit
-  // AttributedType node that adjusts the convention.
-  CC = Context.getDefaultCallingConvention(IsVariadic, !IsStatic);
-  FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(CC));
-  FunctionTypeUnwrapper Unwrapped(*this, T);
-  T = Unwrapped.wrap(*this, FT);
-}
-
-/// Handle OpenCL image access qualifiers: read_only, write_only, read_write
-static void HandleOpenCLImageAccessAttribute(QualType& CurType,
-                                             const AttributeList &Attr,
-                                             Sema &S) {
-  // Check the attribute arguments.
-  if (Attr.getNumArgs() != 1) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-      << Attr.getName() << 1;
-    Attr.setInvalid();
+  if (hasExplicitCallingConv(T))
     return;
-  }
-  Expr *sizeExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
-  llvm::APSInt arg(32);
-  if (sizeExpr->isTypeDependent() || sizeExpr->isValueDependent() ||
-      !sizeExpr->isIntegerConstantExpr(arg, S.Context)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
-      << Attr.getName() << AANT_ArgumentIntegerConstant
-      << sizeExpr->getSourceRange();
-    Attr.setInvalid();
-    return;
-  }
-  unsigned iarg = static_cast<unsigned>(arg.getZExtValue());
-  switch (iarg) {
-  case CLIA_read_only:
-  case CLIA_write_only:
-  case CLIA_read_write:
-    // Implemented in a separate patch
-    break;
-  default:
-    // Implemented in a separate patch
-    S.Diag(Attr.getLoc(), diag::err_attribute_invalid_size)
-      << sizeExpr->getSourceRange();
-    Attr.setInvalid();
-    break;
-  }
+
+  FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(ToCC));
+  QualType Wrapped = Unwrapped.wrap(*this, FT);
+  T = Context.getAdjustedType(T, Wrapped);
 }
 
 /// HandleVectorSizeAttribute - this attribute is only applicable to integral
@@ -4757,17 +4720,24 @@
 }
 
 static bool isPermittedNeonBaseType(QualType &Ty,
-                                    VectorType::VectorKind VecKind,
-                                    bool IsAArch64) {
+                                    VectorType::VectorKind VecKind, Sema &S) {
   const BuiltinType *BTy = Ty->getAs<BuiltinType>();
   if (!BTy)
     return false;
 
+  llvm::Triple Triple = S.Context.getTargetInfo().getTriple();
+
+  // Signed poly is mathematically wrong, but has been baked into some ABIs by
+  // now.
+  bool IsPolyUnsigned = Triple.getArch() == llvm::Triple::aarch64 ||
+                        Triple.getArch() == llvm::Triple::aarch64_be ||
+                        Triple.getArch() == llvm::Triple::arm64;
   if (VecKind == VectorType::NeonPolyVector) {
-    if (IsAArch64) {
+    if (IsPolyUnsigned) {
       // AArch64 polynomial vectors are unsigned and support poly64.
       return BTy->getKind() == BuiltinType::UChar ||
              BTy->getKind() == BuiltinType::UShort ||
+             BTy->getKind() == BuiltinType::ULong ||
              BTy->getKind() == BuiltinType::ULongLong;
     } else {
       // AArch32 polynomial vector are signed.
@@ -4778,7 +4748,11 @@
 
   // Non-polynomial vector types: the usual suspects are allowed, as well as
   // float64_t on AArch64.
-  if (IsAArch64 && BTy->getKind() == BuiltinType::Double)
+  bool Is64Bit = Triple.getArch() == llvm::Triple::aarch64 ||
+                 Triple.getArch() == llvm::Triple::aarch64_be ||
+                 Triple.getArch() == llvm::Triple::arm64;
+
+  if (Is64Bit && BTy->getKind() == BuiltinType::Double)
     return true;
 
   return BTy->getKind() == BuiltinType::SChar ||
@@ -4787,6 +4761,8 @@
          BTy->getKind() == BuiltinType::UShort ||
          BTy->getKind() == BuiltinType::Int ||
          BTy->getKind() == BuiltinType::UInt ||
+         BTy->getKind() == BuiltinType::Long ||
+         BTy->getKind() == BuiltinType::ULong ||
          BTy->getKind() == BuiltinType::LongLong ||
          BTy->getKind() == BuiltinType::ULongLong ||
          BTy->getKind() == BuiltinType::Float ||
@@ -4828,10 +4804,7 @@
     return;
   }
   // Only certain element types are supported for Neon vectors.
-  llvm::Triple::ArchType Arch =
-        S.Context.getTargetInfo().getTriple().getArch();
-  if (!isPermittedNeonBaseType(CurType, VecKind,
-                               Arch == llvm::Triple::aarch64)) {
+  if (!isPermittedNeonBaseType(CurType, VecKind, S)) {
     S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
     Attr.setInvalid();
     return;
@@ -4911,6 +4884,10 @@
       // it it breaks large amounts of Linux software.
       attr.setUsedAsTypeAttr();
       break;
+    case AttributeList::AT_OpenCLPrivateAddressSpace:
+    case AttributeList::AT_OpenCLGlobalAddressSpace:
+    case AttributeList::AT_OpenCLLocalAddressSpace:
+    case AttributeList::AT_OpenCLConstantAddressSpace:
     case AttributeList::AT_AddressSpace:
       HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
       attr.setUsedAsTypeAttr();
@@ -4939,13 +4916,11 @@
       attr.setUsedAsTypeAttr();
       break;
     case AttributeList::AT_OpenCLImageAccess:
-      HandleOpenCLImageAccessAttribute(type, attr, state.getSema());
+      // FIXME: there should be some type checking happening here, I would
+      // imagine, but the original handler's checking was entirely superfluous.
       attr.setUsedAsTypeAttr();
       break;
 
-    case AttributeList::AT_Win64:
-      attr.setUsedAsTypeAttr();
-      break;
     MS_TYPE_ATTRS_CASELIST:
       if (!handleMSPointerTypeQualifierAttr(state, attr, type))
         attr.setUsedAsTypeAttr();
@@ -5058,7 +5033,7 @@
     TypeDiagnoserDiag(unsigned DiagID)
       : Sema::TypeDiagnoser(DiagID == 0), DiagID(DiagID) {}
 
-    virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+    void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
       if (Suppressed) return;
       S.Diag(Loc, DiagID) << T;
     }
@@ -5132,6 +5107,46 @@
       }
     }
 
+    // We lock in the inheritance model once somebody has asked us to ensure
+    // that a pointer-to-member type is complete.
+    if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+      if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
+        if (!MPTy->getClass()->isDependentType()) {
+          RequireCompleteType(Loc, QualType(MPTy->getClass(), 0), 0);
+
+          CXXRecordDecl *RD = MPTy->getMostRecentCXXRecordDecl();
+          if (!RD->hasAttr<MSInheritanceAttr>()) {
+            MSInheritanceAttr::Spelling InheritanceModel;
+
+            switch (MSPointerToMemberRepresentationMethod) {
+            case LangOptions::PPTMK_BestCase:
+              InheritanceModel = RD->calculateInheritanceModel();
+              break;
+            case LangOptions::PPTMK_FullGeneralitySingleInheritance:
+              InheritanceModel = MSInheritanceAttr::Keyword_single_inheritance;
+              break;
+            case LangOptions::PPTMK_FullGeneralityMultipleInheritance:
+              InheritanceModel =
+                  MSInheritanceAttr::Keyword_multiple_inheritance;
+              break;
+            case LangOptions::PPTMK_FullGeneralityVirtualInheritance:
+              InheritanceModel =
+                  MSInheritanceAttr::Keyword_unspecified_inheritance;
+              break;
+            }
+
+            RD->addAttr(MSInheritanceAttr::CreateImplicit(
+                getASTContext(), InheritanceModel,
+                /*BestCase=*/MSPointerToMemberRepresentationMethod ==
+                    LangOptions::PPTMK_BestCase,
+                ImplicitMSInheritanceAttrLoc.isValid()
+                    ? ImplicitMSInheritanceAttrLoc
+                    : RD->getSourceRange()));
+          }
+        }
+      }
+    }
+
     return false;
   }
 
@@ -5310,29 +5325,26 @@
   if (RD->getNumVBases()) {
     Diag(RD->getLocation(), diag::note_non_literal_virtual_base)
       << getLiteralDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
-    for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
-           E = RD->vbases_end(); I != E; ++I)
-      Diag(I->getLocStart(),
-           diag::note_constexpr_virtual_base_here) << I->getSourceRange();
+    for (const auto &I : RD->vbases())
+      Diag(I.getLocStart(), diag::note_constexpr_virtual_base_here)
+          << I.getSourceRange();
   } else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor() &&
              !RD->hasTrivialDefaultConstructor()) {
     Diag(RD->getLocation(), diag::note_non_literal_no_constexpr_ctors) << RD;
   } else if (RD->hasNonLiteralTypeFieldsOrBases()) {
-    for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
-         E = RD->bases_end(); I != E; ++I) {
-      if (!I->getType()->isLiteralType(Context)) {
-        Diag(I->getLocStart(),
+    for (const auto &I : RD->bases()) {
+      if (!I.getType()->isLiteralType(Context)) {
+        Diag(I.getLocStart(),
              diag::note_non_literal_base_class)
-          << RD << I->getType() << I->getSourceRange();
+          << RD << I.getType() << I.getSourceRange();
         return true;
       }
     }
-    for (CXXRecordDecl::field_iterator I = RD->field_begin(),
-         E = RD->field_end(); I != E; ++I) {
+    for (const auto *I : RD->fields()) {
       if (!I->getType()->isLiteralType(Context) ||
           I->getType().isVolatileQualified()) {
         Diag(I->getLocation(), diag::note_non_literal_field)
-          << RD << *I << I->getType()
+          << RD << I << I->getType()
           << I->getType().isVolatileQualified();
         return true;
       }
@@ -5368,7 +5380,7 @@
     return T;
   NestedNameSpecifier *NNS;
   if (SS.isValid())
-    NNS = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+    NNS = SS.getScopeRep();
   else {
     if (Keyword == ETK_None)
       return T;
diff --git a/lib/Sema/TargetAttributesSema.cpp b/lib/Sema/TargetAttributesSema.cpp
deleted file mode 100644
index 45067de..0000000
--- a/lib/Sema/TargetAttributesSema.cpp
+++ /dev/null
@@ -1,353 +0,0 @@
-//===-- TargetAttributesSema.cpp - Encapsulate target attributes-*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains semantic analysis implementation for target-specific
-// attributes.
-//
-//===----------------------------------------------------------------------===//
-
-#include "TargetAttributesSema.h"
-#include "clang/AST/DeclCXX.h"
-#include "clang/Basic/TargetInfo.h"
-#include "clang/Sema/SemaInternal.h"
-#include "llvm/ADT/Triple.h"
-
-using namespace clang;
-
-TargetAttributesSema::~TargetAttributesSema() {}
-bool TargetAttributesSema::ProcessDeclAttribute(Scope *scope, Decl *D,
-                                    const AttributeList &Attr, Sema &S) const {
-  return false;
-}
-
-static void HandleARMInterruptAttr(Decl *d,
-                                   const AttributeList &Attr, Sema &S) {
-  // Check the attribute arguments.
-  if (Attr.getNumArgs() > 1) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
-        << 1;
-    return;
-  }
-
-  StringRef Str;
-  SourceLocation ArgLoc;
-
-  if (Attr.getNumArgs() == 0)
-    Str = "";
-  else if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc))
-    return;
-
-  ARMInterruptAttr::InterruptType Kind;
-  if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
-        << Attr.getName() << Str << ArgLoc;
-    return;
-  }
-
-  unsigned Index = Attr.getAttributeSpellingListIndex();
-  d->addAttr(::new (S.Context)
-             ARMInterruptAttr(Attr.getLoc(), S.Context, Kind, Index));
-}
-
-namespace {
-  class ARMAttributesSema : public TargetAttributesSema {
-  public:
-    ARMAttributesSema() { }
-    bool ProcessDeclAttribute(Scope *scope, Decl *D,
-                              const AttributeList &Attr, Sema &S) const {
-      if (Attr.getName()->getName() == "interrupt") {
-        HandleARMInterruptAttr(D, Attr, S);
-        return true;
-      }
-      return false;
-    }
-  };
-}
-
-static void HandleMSP430InterruptAttr(Decl *d,
-                                      const AttributeList &Attr, Sema &S) {
-    // Check the attribute arguments.
-    if (Attr.getNumArgs() != 1) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-        << Attr.getName() << 1;
-      return;
-    }
-
-    // FIXME: Check for decl - it should be void ()(void).
-
-    Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
-    llvm::APSInt NumParams(32);
-    if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
-        << Attr.getName() << AANT_ArgumentIntegerConstant
-        << NumParamsExpr->getSourceRange();
-      return;
-    }
-
-    unsigned Num = NumParams.getLimitedValue(255);
-    if ((Num & 1) || Num > 30) {
-      S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
-        << "interrupt" << (int)NumParams.getSExtValue()
-        << NumParamsExpr->getSourceRange();
-      return;
-    }
-
-    d->addAttr(::new (S.Context) MSP430InterruptAttr(Attr.getLoc(), S.Context, Num));
-    d->addAttr(::new (S.Context) UsedAttr(Attr.getLoc(), S.Context));
-  }
-
-namespace {
-  class MSP430AttributesSema : public TargetAttributesSema {
-  public:
-    MSP430AttributesSema() { }
-    bool ProcessDeclAttribute(Scope *scope, Decl *D,
-                              const AttributeList &Attr, Sema &S) const {
-      if (Attr.getName()->getName() == "interrupt") {
-        HandleMSP430InterruptAttr(D, Attr, S);
-        return true;
-      }
-      return false;
-    }
-  };
-}
-
-static void HandleX86ForceAlignArgPointerAttr(Decl *D,
-                                              const AttributeList& Attr,
-                                              Sema &S) {
-  // Check the attribute arguments.
-  if (Attr.getNumArgs() != 0) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-      << Attr.getName() << 0;
-    return;
-  }
-
-  // If we try to apply it to a function pointer, don't warn, but don't
-  // do anything, either. It doesn't matter anyway, because there's nothing
-  // special about calling a force_align_arg_pointer function.
-  ValueDecl *VD = dyn_cast<ValueDecl>(D);
-  if (VD && VD->getType()->isFunctionPointerType())
-    return;
-  // Also don't warn on function pointer typedefs.
-  TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
-  if (TD && (TD->getUnderlyingType()->isFunctionPointerType() ||
-             TD->getUnderlyingType()->isFunctionType()))
-    return;
-  // Attribute can only be applied to function types.
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << /* function */0;
-    return;
-  }
-
-  D->addAttr(::new (S.Context) X86ForceAlignArgPointerAttr(Attr.getRange(),
-                                                           S.Context));
-}
-
-DLLImportAttr *Sema::mergeDLLImportAttr(Decl *D, SourceRange Range,
-                                        unsigned AttrSpellingListIndex) {
-  if (D->hasAttr<DLLExportAttr>()) {
-    Diag(Range.getBegin(), diag::warn_attribute_ignored) << "dllimport";
-    return NULL;
-  }
-
-  if (D->hasAttr<DLLImportAttr>())
-    return NULL;
-
-  if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
-    if (VD->hasDefinition()) {
-      // dllimport cannot be applied to definitions.
-      Diag(D->getLocation(), diag::warn_attribute_invalid_on_definition)
-        << "dllimport";
-      return NULL;
-    }
-  }
-
-  return ::new (Context) DLLImportAttr(Range, Context,
-                                       AttrSpellingListIndex);
-}
-
-static void HandleDLLImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
-  // check the attribute arguments.
-  if (Attr.getNumArgs() != 0) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-      << Attr.getName() << 0;
-    return;
-  }
-
-  // Attribute can be applied only to functions or variables.
-  FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
-  if (!FD && !isa<VarDecl>(D)) {
-    // Apparently Visual C++ thinks it is okay to not emit a warning
-    // in this case, so only emit a warning when -fms-extensions is not
-    // specified.
-    if (!S.getLangOpts().MicrosoftExt)
-      S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-        << Attr.getName() << 2 /*variable and function*/;
-    return;
-  }
-
-  // Currently, the dllimport attribute is ignored for inlined functions.
-  // Warning is emitted.
-  if (FD && FD->isInlineSpecified()) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
-    return;
-  }
-
-  unsigned Index = Attr.getAttributeSpellingListIndex();
-  DLLImportAttr *NewAttr = S.mergeDLLImportAttr(D, Attr.getRange(), Index);
-  if (NewAttr)
-    D->addAttr(NewAttr);
-}
-
-DLLExportAttr *Sema::mergeDLLExportAttr(Decl *D, SourceRange Range,
-                                        unsigned AttrSpellingListIndex) {
-  if (DLLImportAttr *Import = D->getAttr<DLLImportAttr>()) {
-    Diag(Import->getLocation(), diag::warn_attribute_ignored) << "dllimport";
-    D->dropAttr<DLLImportAttr>();
-  }
-
-  if (D->hasAttr<DLLExportAttr>())
-    return NULL;
-
-  return ::new (Context) DLLExportAttr(Range, Context,
-                                       AttrSpellingListIndex);
-}
-
-static void HandleDLLExportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
-  // check the attribute arguments.
-  if (Attr.getNumArgs() != 0) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-      << Attr.getName() << 0;
-    return;
-  }
-
-  // Attribute can be applied only to functions or variables.
-  FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
-  if (!FD && !isa<VarDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
-      << Attr.getName() << 2 /*variable and function*/;
-    return;
-  }
-
-  // Currently, the dllexport attribute is ignored for inlined functions, unless
-  // the -fkeep-inline-functions flag has been used. Warning is emitted;
-  if (FD && FD->isInlineSpecified()) {
-    // FIXME: ... unless the -fkeep-inline-functions flag has been used.
-    S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllexport";
-    return;
-  }
-
-  unsigned Index = Attr.getAttributeSpellingListIndex();
-  DLLExportAttr *NewAttr = S.mergeDLLExportAttr(D, Attr.getRange(), Index);
-  if (NewAttr)
-    D->addAttr(NewAttr);
-}
-
-namespace {
-  class X86AttributesSema : public TargetAttributesSema {
-  public:
-    X86AttributesSema() { }
-    bool ProcessDeclAttribute(Scope *scope, Decl *D,
-                              const AttributeList &Attr, Sema &S) const {
-      const llvm::Triple &Triple(S.Context.getTargetInfo().getTriple());
-      if (Triple.getOS() == llvm::Triple::Win32 ||
-          Triple.getOS() == llvm::Triple::MinGW32) {
-        switch (Attr.getKind()) {
-        case AttributeList::AT_DLLImport: HandleDLLImportAttr(D, Attr, S);
-                                          return true;
-        case AttributeList::AT_DLLExport: HandleDLLExportAttr(D, Attr, S);
-                                          return true;
-        default:                          break;
-        }
-      }
-      if (Triple.getArch() != llvm::Triple::x86_64 &&
-          (Attr.getName()->getName() == "force_align_arg_pointer" ||
-           Attr.getName()->getName() == "__force_align_arg_pointer__")) {
-        HandleX86ForceAlignArgPointerAttr(D, Attr, S);
-        return true;
-      }
-      return false;
-    }
-  };
-}
-
-static void HandleMips16Attr(Decl *D, const AttributeList &Attr, Sema &S) {
-  // check the attribute arguments.
-  if (Attr.getNumArgs()) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-      << Attr.getName() << 0;
-    return;
-  }
-  // Attribute can only be applied to function types.
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << /* function */0;
-    return;
-  }
-  D->addAttr(::new (S.Context) Mips16Attr(Attr.getRange(), S.Context,
-                                          Attr.getAttributeSpellingListIndex()));
-}
-
-static void HandleNoMips16Attr(Decl *D, const AttributeList &Attr, Sema &S) {
-  // check the attribute arguments.
-  if (Attr.getNumArgs()) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
-      << Attr.getName() << 0;
-    return;
-  }
-  // Attribute can only be applied to function types.
-  if (!isa<FunctionDecl>(D)) {
-    S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
-      << Attr.getName() << /* function */0;
-    return;
-  }
-  D->addAttr(::new (S.Context)
-             NoMips16Attr(Attr.getRange(), S.Context,
-                          Attr.getAttributeSpellingListIndex()));
-}
-
-namespace {
-  class MipsAttributesSema : public TargetAttributesSema {
-  public:
-    MipsAttributesSema() { }
-    bool ProcessDeclAttribute(Scope *scope, Decl *D, const AttributeList &Attr,
-                              Sema &S) const {
-      if (Attr.getName()->getName() == "mips16") {
-        HandleMips16Attr(D, Attr, S);
-        return true;
-      } else if (Attr.getName()->getName() == "nomips16") {
-        HandleNoMips16Attr(D, Attr, S);
-        return true;
-      }
-      return false;
-    }
-  };
-}
-
-const TargetAttributesSema &Sema::getTargetAttributesSema() const {
-  if (TheTargetAttributesSema)
-    return *TheTargetAttributesSema;
-
-  const llvm::Triple &Triple(Context.getTargetInfo().getTriple());
-  switch (Triple.getArch()) {
-  case llvm::Triple::arm:
-  case llvm::Triple::thumb:
-    return *(TheTargetAttributesSema = new ARMAttributesSema);
-  case llvm::Triple::msp430:
-    return *(TheTargetAttributesSema = new MSP430AttributesSema);
-  case llvm::Triple::x86:
-  case llvm::Triple::x86_64:
-    return *(TheTargetAttributesSema = new X86AttributesSema);
-  case llvm::Triple::mips:
-  case llvm::Triple::mipsel:
-    return *(TheTargetAttributesSema = new MipsAttributesSema);
-  default:
-    return *(TheTargetAttributesSema = new TargetAttributesSema);
-  }
-}
diff --git a/lib/Sema/TargetAttributesSema.h b/lib/Sema/TargetAttributesSema.h
deleted file mode 100644
index 410c900..0000000
--- a/lib/Sema/TargetAttributesSema.h
+++ /dev/null
@@ -1,27 +0,0 @@
-//===--- TargetAttributesSema.h - Semantic Analysis For Target Attributes -===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_SEMA_TARGETSEMA_H
-#define CLANG_SEMA_TARGETSEMA_H
-
-namespace clang {
-  class Scope;
-  class Decl;
-  class AttributeList;
-  class Sema;
-
-  class TargetAttributesSema {
-  public:
-    virtual ~TargetAttributesSema();
-    virtual bool ProcessDeclAttribute(Scope *scope, Decl *D,
-                                      const AttributeList &Attr, Sema &S) const;
-  };
-}
-
-#endif
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index 4351c25..f78fca7 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -607,6 +607,7 @@
   ExprResult TransformAddressOfOperand(Expr *E);
   ExprResult TransformDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E,
                                                 bool IsAddressOfOperand);
+  StmtResult TransformOMPExecutableDirective(OMPExecutableDirective *S);
 
 // FIXME: We use LLVM_ATTRIBUTE_NOINLINE because inlining causes a ridiculous
 // amount of stack usage with clang.
@@ -951,9 +952,8 @@
           break;
         }
         default:
-          // FIXME: Would be nice to highlight just the source range.
           SemaRef.Diag(IdLoc, diag::err_not_tag_in_scope)
-            << Kind << Id << DC;
+              << Kind << Id << DC << QualifierLoc.getSourceRange();
           break;
       }
       return QualType();
@@ -1286,16 +1286,51 @@
     return getSema().BuildObjCAtThrowStmt(AtLoc, Operand);
   }
 
-  /// \brief Build a new OpenMP parallel directive.
+  /// \brief Build a new OpenMP executable directive.
   ///
   /// By default, performs semantic analysis to build the new statement.
   /// Subclasses may override this routine to provide different behavior.
-  StmtResult RebuildOMPParallelDirective(ArrayRef<OMPClause *> Clauses,
-                                         Stmt *AStmt,
-                                         SourceLocation StartLoc,
-                                         SourceLocation EndLoc) {
-    return getSema().ActOnOpenMPParallelDirective(Clauses, AStmt,
-                                                  StartLoc, EndLoc);
+  StmtResult RebuildOMPExecutableDirective(OpenMPDirectiveKind Kind,
+                                           ArrayRef<OMPClause *> Clauses,
+                                           Stmt *AStmt,
+                                           SourceLocation StartLoc,
+                                           SourceLocation EndLoc) {
+    return getSema().ActOnOpenMPExecutableDirective(Kind, Clauses, AStmt,
+                                                    StartLoc, EndLoc);
+  }
+
+  /// \brief Build a new OpenMP 'if' clause.
+  ///
+  /// By default, performs semantic analysis to build the new statement.
+  /// Subclasses may override this routine to provide different behavior.
+  OMPClause *RebuildOMPIfClause(Expr *Condition,
+                                SourceLocation StartLoc,
+                                SourceLocation LParenLoc,
+                                SourceLocation EndLoc) {
+    return getSema().ActOnOpenMPIfClause(Condition, StartLoc,
+                                         LParenLoc, EndLoc);
+  }
+
+  /// \brief Build a new OpenMP 'num_threads' clause.
+  ///
+  /// By default, performs semantic analysis to build the new statement.
+  /// Subclasses may override this routine to provide different behavior.
+  OMPClause *RebuildOMPNumThreadsClause(Expr *NumThreads,
+                                        SourceLocation StartLoc,
+                                        SourceLocation LParenLoc,
+                                        SourceLocation EndLoc) {
+    return getSema().ActOnOpenMPNumThreadsClause(NumThreads, StartLoc,
+                                                 LParenLoc, EndLoc);
+  }
+
+  /// \brief Build a new OpenMP 'safelen' clause.
+  ///
+  /// By default, performs semantic analysis to build the new statement.
+  /// Subclasses may override this routine to provide different behavior.
+  OMPClause *RebuildOMPSafelenClause(Expr *Len, SourceLocation StartLoc,
+                                     SourceLocation LParenLoc,
+                                     SourceLocation EndLoc) {
+    return getSema().ActOnOpenMPSafelenClause(Len, StartLoc, LParenLoc, EndLoc);
   }
 
   /// \brief Build a new OpenMP 'default' clause.
@@ -1335,6 +1370,10 @@
                                                    EndLoc);
   }
 
+  /// \brief Build a new OpenMP 'shared' clause.
+  ///
+  /// By default, performs semantic analysis to build the new statement.
+  /// Subclasses may override this routine to provide different behavior.
   OMPClause *RebuildOMPSharedClause(ArrayRef<Expr *> VarList,
                                     SourceLocation StartLoc,
                                     SourceLocation LParenLoc,
@@ -1343,6 +1382,18 @@
                                              EndLoc);
   }
 
+  /// \brief Build a new OpenMP 'copyin' clause.
+  ///
+  /// By default, performs semantic analysis to build the new statement.
+  /// Subclasses may override this routine to provide different behavior.
+  OMPClause *RebuildOMPCopyinClause(ArrayRef<Expr *> VarList,
+                                    SourceLocation StartLoc,
+                                    SourceLocation LParenLoc,
+                                    SourceLocation EndLoc) {
+    return getSema().ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc,
+                                             EndLoc);
+  }
+
   /// \brief Rebuild the operand to an Objective-C \@synchronized statement.
   ///
   /// By default, performs semantic analysis to build the new statement.
@@ -2135,29 +2186,6 @@
                                     Operand);
   }
 
-  /// \brief Build a new unary type trait expression.
-  ///
-  /// By default, performs semantic analysis to build the new expression.
-  /// Subclasses may override this routine to provide different behavior.
-  ExprResult RebuildUnaryTypeTrait(UnaryTypeTrait Trait,
-                                   SourceLocation StartLoc,
-                                   TypeSourceInfo *T,
-                                   SourceLocation RParenLoc) {
-    return getSema().BuildUnaryTypeTrait(Trait, StartLoc, T, RParenLoc);
-  }
-
-  /// \brief Build a new binary type trait expression.
-  ///
-  /// By default, performs semantic analysis to build the new expression.
-  /// Subclasses may override this routine to provide different behavior.
-  ExprResult RebuildBinaryTypeTrait(BinaryTypeTrait Trait,
-                                    SourceLocation StartLoc,
-                                    TypeSourceInfo *LhsT,
-                                    TypeSourceInfo *RhsT,
-                                    SourceLocation RParenLoc) {
-    return getSema().BuildBinaryTypeTrait(Trait, StartLoc, LhsT, RhsT, RParenLoc);
-  }
-
   /// \brief Build a new type trait expression.
   ///
   /// By default, performs semantic analysis to build the new expression.
@@ -2555,11 +2583,9 @@
                                        CK_BuiltinFnToFnPtr).take();
 
     // Build the CallExpr
-    ExprResult TheCall = SemaRef.Owned(
-      new (SemaRef.Context) CallExpr(SemaRef.Context, Callee, SubExprs,
-                                     Builtin->getCallResultType(),
-                            Expr::getValueKindForType(Builtin->getResultType()),
-                                     RParenLoc));
+    ExprResult TheCall = SemaRef.Owned(new (SemaRef.Context) CallExpr(
+        SemaRef.Context, Callee, SubExprs, Builtin->getCallResultType(),
+        Expr::getValueKindForType(Builtin->getReturnType()), RParenLoc));
 
     // Type-check the __builtin_shufflevector expression.
     return SemaRef.SemaBuiltinShuffleVector(cast<CallExpr>(TheCall.take()));
@@ -2657,6 +2683,10 @@
                                              QualType ObjectType,
                                              NamedDecl *FirstQualifierInScope,
                                              CXXScopeSpec &SS);
+
+  TypeSourceInfo *TransformTSIInObjectScope(TypeLoc TL, QualType ObjectType,
+                                            NamedDecl *FirstQualifierInScope,
+                                            CXXScopeSpec &SS);
 };
 
 template<typename Derived>
@@ -3564,52 +3594,14 @@
                                                    QualType ObjectType,
                                                    NamedDecl *UnqualLookup,
                                                    CXXScopeSpec &SS) {
-  QualType T = TL.getType();
-  if (getDerived().AlreadyTransformed(T))
+  if (getDerived().AlreadyTransformed(TL.getType()))
     return TL;
 
-  TypeLocBuilder TLB;
-  QualType Result;
-
-  if (isa<TemplateSpecializationType>(T)) {
-    TemplateSpecializationTypeLoc SpecTL =
-        TL.castAs<TemplateSpecializationTypeLoc>();
-
-    TemplateName Template =
-      getDerived().TransformTemplateName(SS,
-                                         SpecTL.getTypePtr()->getTemplateName(),
-                                         SpecTL.getTemplateNameLoc(),
-                                         ObjectType, UnqualLookup);
-    if (Template.isNull())
-      return TypeLoc();
-
-    Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
-                                                              Template);
-  } else if (isa<DependentTemplateSpecializationType>(T)) {
-    DependentTemplateSpecializationTypeLoc SpecTL =
-        TL.castAs<DependentTemplateSpecializationTypeLoc>();
-
-    TemplateName Template
-      = getDerived().RebuildTemplateName(SS,
-                                         *SpecTL.getTypePtr()->getIdentifier(),
-                                         SpecTL.getTemplateNameLoc(),
-                                         ObjectType, UnqualLookup);
-    if (Template.isNull())
-      return TypeLoc();
-
-    Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
-                                                                       SpecTL,
-                                                                     Template,
-                                                                       SS);
-  } else {
-    // Nothing special needs to be done for these.
-    Result = getDerived().TransformType(TLB, TL);
-  }
-
-  if (Result.isNull())
-    return TypeLoc();
-
-  return TLB.getTypeSourceInfo(SemaRef.Context, Result)->getTypeLoc();
+  TypeSourceInfo *TSI =
+      TransformTSIInObjectScope(TL, ObjectType, UnqualLookup, SS);
+  if (TSI)
+    return TSI->getTypeLoc();
+  return TypeLoc();
 }
 
 template<typename Derived>
@@ -3618,16 +3610,23 @@
                                                    QualType ObjectType,
                                                    NamedDecl *UnqualLookup,
                                                    CXXScopeSpec &SS) {
-  // FIXME: Painfully copy-paste from the above!
-
-  QualType T = TSInfo->getType();
-  if (getDerived().AlreadyTransformed(T))
+  if (getDerived().AlreadyTransformed(TSInfo->getType()))
     return TSInfo;
 
+  return TransformTSIInObjectScope(TSInfo->getTypeLoc(), ObjectType,
+                                   UnqualLookup, SS);
+}
+
+template <typename Derived>
+TypeSourceInfo *TreeTransform<Derived>::TransformTSIInObjectScope(
+    TypeLoc TL, QualType ObjectType, NamedDecl *UnqualLookup,
+    CXXScopeSpec &SS) {
+  QualType T = TL.getType();
+  assert(!getDerived().AlreadyTransformed(T));
+
   TypeLocBuilder TLB;
   QualType Result;
 
-  TypeLoc TL = TSInfo->getTypeLoc();
   if (isa<TemplateSpecializationType>(T)) {
     TemplateSpecializationTypeLoc SpecTL =
         TL.castAs<TemplateSpecializationTypeLoc>();
@@ -3693,6 +3692,13 @@
   return TransformTypeSpecType(TLB, T);
 }
 
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformAdjustedType(TypeLocBuilder &TLB,
+                                                       AdjustedTypeLoc TL) {
+  // Adjustments applied during transformation are handled elsewhere.
+  return getDerived().TransformType(TLB, TL.getOriginalLoc());
+}
+
 template<typename Derived>
 QualType TreeTransform<Derived>::TransformDecayedType(TypeLocBuilder &TLB,
                                                       DecayedTypeLoc TL) {
@@ -3861,6 +3867,14 @@
       return QualType();
   }
 
+  // If we had to adjust the pointee type when building a member pointer, make
+  // sure to push TypeLoc info for it.
+  const MemberPointerType *MPT = Result->getAs<MemberPointerType>();
+  if (MPT && PointeeType != MPT->getPointeeType()) {
+    assert(isa<AdjustedType>(MPT->getPointeeType()));
+    TLB.push<AdjustedTypeLoc>(MPT->getPointeeType());
+  }
+
   MemberPointerTypeLoc NewTL = TLB.push<MemberPointerTypeLoc>(Result);
   NewTL.setSigilLoc(TL.getSigilLoc());
   NewTL.setClassTInfo(NewClsTInfo);
@@ -3966,7 +3980,9 @@
       return QualType();
   }
 
-  VariableArrayTypeLoc NewTL = TLB.push<VariableArrayTypeLoc>(Result);
+  // We might have constant size array now, but fortunately it has the same
+  // location layout.
+  ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result);
   NewTL.setLBracketLoc(TL.getLBracketLoc());
   NewTL.setRBracketLoc(TL.getRBracketLoc());
   NewTL.setSizeExpr(Size);
@@ -4391,11 +4407,9 @@
   QualType ResultType;
 
   if (T->hasTrailingReturn()) {
-    if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
-                                                 TL.getParmArray(),
-                                                 TL.getNumArgs(),
-                                             TL.getTypePtr()->arg_type_begin(),
-                                                 ParamTypes, &ParamDecls))
+    if (getDerived().TransformFunctionTypeParams(
+            TL.getBeginLoc(), TL.getParmArray(), TL.getNumParams(),
+            TL.getTypePtr()->param_type_begin(), ParamTypes, &ParamDecls))
       return QualType();
 
     {
@@ -4407,31 +4421,29 @@
       //   declarator.
       Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, ThisTypeQuals);
 
-      ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
+      ResultType = getDerived().TransformType(TLB, TL.getReturnLoc());
       if (ResultType.isNull())
         return QualType();
     }
   }
   else {
-    ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
+    ResultType = getDerived().TransformType(TLB, TL.getReturnLoc());
     if (ResultType.isNull())
       return QualType();
 
-    if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
-                                                 TL.getParmArray(),
-                                                 TL.getNumArgs(),
-                                             TL.getTypePtr()->arg_type_begin(),
-                                                 ParamTypes, &ParamDecls))
+    if (getDerived().TransformFunctionTypeParams(
+            TL.getBeginLoc(), TL.getParmArray(), TL.getNumParams(),
+            TL.getTypePtr()->param_type_begin(), ParamTypes, &ParamDecls))
       return QualType();
   }
 
   // FIXME: Need to transform the exception-specification too.
 
   QualType Result = TL.getType();
-  if (getDerived().AlwaysRebuild() ||
-      ResultType != T->getResultType() ||
-      T->getNumArgs() != ParamTypes.size() ||
-      !std::equal(T->arg_type_begin(), T->arg_type_end(), ParamTypes.begin())) {
+  if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType() ||
+      T->getNumParams() != ParamTypes.size() ||
+      !std::equal(T->param_type_begin(), T->param_type_end(),
+                  ParamTypes.begin())) {
     Result = getDerived().RebuildFunctionProtoType(ResultType, ParamTypes,
                                                    T->getExtProtoInfo());
     if (Result.isNull())
@@ -4443,8 +4455,8 @@
   NewTL.setLParenLoc(TL.getLParenLoc());
   NewTL.setRParenLoc(TL.getRParenLoc());
   NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
-  for (unsigned i = 0, e = NewTL.getNumArgs(); i != e; ++i)
-    NewTL.setArg(i, ParamDecls[i]);
+  for (unsigned i = 0, e = NewTL.getNumParams(); i != e; ++i)
+    NewTL.setParam(i, ParamDecls[i]);
 
   return Result;
 }
@@ -4454,13 +4466,12 @@
                                                  TypeLocBuilder &TLB,
                                                  FunctionNoProtoTypeLoc TL) {
   const FunctionNoProtoType *T = TL.getTypePtr();
-  QualType ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
+  QualType ResultType = getDerived().TransformType(TLB, TL.getReturnLoc());
   if (ResultType.isNull())
     return QualType();
 
   QualType Result = TL.getType();
-  if (getDerived().AlwaysRebuild() ||
-      ResultType != T->getResultType())
+  if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType())
     Result = getDerived().RebuildFunctionNoProtoType(ResultType);
 
   FunctionNoProtoTypeLoc NewTL = TLB.push<FunctionNoProtoTypeLoc>(Result);
@@ -5284,13 +5295,12 @@
   bool SubStmtInvalid = false;
   bool SubStmtChanged = false;
   SmallVector<Stmt*, 8> Statements;
-  for (CompoundStmt::body_iterator B = S->body_begin(), BEnd = S->body_end();
-       B != BEnd; ++B) {
-    StmtResult Result = getDerived().TransformStmt(*B);
+  for (auto *B : S->body()) {
+    StmtResult Result = getDerived().TransformStmt(B);
     if (Result.isInvalid()) {
       // Immediately fail if this was a DeclStmt, since it's very
       // likely that this will cause problems for future statements.
-      if (isa<DeclStmt>(*B))
+      if (isa<DeclStmt>(B))
         return StmtError();
 
       // Otherwise, just keep processing substatements and fail later.
@@ -5298,7 +5308,7 @@
       continue;
     }
 
-    SubStmtChanged = SubStmtChanged || Result.get() != *B;
+    SubStmtChanged = SubStmtChanged || Result.get() != B;
     Statements.push_back(Result.takeAs<Stmt>());
   }
 
@@ -5695,14 +5705,12 @@
 TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
   bool DeclChanged = false;
   SmallVector<Decl *, 4> Decls;
-  for (DeclStmt::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
-       D != DEnd; ++D) {
-    Decl *Transformed = getDerived().TransformDefinition((*D)->getLocation(),
-                                                         *D);
+  for (auto *D : S->decls()) {
+    Decl *Transformed = getDerived().TransformDefinition(D->getLocation(), D);
     if (!Transformed)
       return StmtError();
 
-    if (Transformed != *D)
+    if (Transformed != D)
       DeclChanged = true;
 
     Decls.push_back(Transformed);
@@ -6276,9 +6284,8 @@
 
 template<typename Derived>
 StmtResult
-TreeTransform<Derived>::TransformOMPParallelDirective(OMPParallelDirective *D) {
-  DeclarationNameInfo DirName;
-  getSema().StartOpenMPDSABlock(OMPD_parallel, DirName, 0);
+TreeTransform<Derived>::TransformOMPExecutableDirective(
+                                 OMPExecutableDirective *D) {
 
   // Transform the clauses
   llvm::SmallVector<OMPClause *, 16> TClauses;
@@ -6289,7 +6296,6 @@
     if (*I) {
       OMPClause *Clause = getDerived().TransformOMPClause(*I);
       if (!Clause) {
-        getSema().EndOpenMPDSABlock(0);
         return StmtError();
       }
       TClauses.push_back(Clause);
@@ -6299,25 +6305,74 @@
     }
   }
   if (!D->getAssociatedStmt()) {
-    getSema().EndOpenMPDSABlock(0);
     return StmtError();
   }
   StmtResult AssociatedStmt =
     getDerived().TransformStmt(D->getAssociatedStmt());
   if (AssociatedStmt.isInvalid()) {
-    getSema().EndOpenMPDSABlock(0);
     return StmtError();
   }
 
-  StmtResult Res = getDerived().RebuildOMPParallelDirective(TClauses,
-                                                            AssociatedStmt.take(),
-                                                            D->getLocStart(),
-                                                            D->getLocEnd());
-  getSema().EndOpenMPDSABlock(Res.get());
+  return getDerived().RebuildOMPExecutableDirective(D->getDirectiveKind(),
+                                                    TClauses,
+                                                    AssociatedStmt.take(),
+                                                    D->getLocStart(),
+                                                    D->getLocEnd());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPParallelDirective(OMPParallelDirective *D) {
+  DeclarationNameInfo DirName;
+  getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel, DirName, 0);
+  StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+  getDerived().getSema().EndOpenMPDSABlock(Res.get());
   return Res;
 }
 
 template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPSimdDirective(OMPSimdDirective *D) {
+  DeclarationNameInfo DirName;
+  getDerived().getSema().StartOpenMPDSABlock(OMPD_simd, DirName, 0);
+  StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+  getDerived().getSema().EndOpenMPDSABlock(Res.get());
+  return Res;
+}
+
+template<typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPIfClause(OMPIfClause *C) {
+  ExprResult Cond = getDerived().TransformExpr(C->getCondition());
+  if (Cond.isInvalid())
+    return 0;
+  return getDerived().RebuildOMPIfClause(Cond.take(), C->getLocStart(),
+                                         C->getLParenLoc(), C->getLocEnd());
+}
+
+template<typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNumThreadsClause(OMPNumThreadsClause *C) {
+  ExprResult NumThreads = getDerived().TransformExpr(C->getNumThreads());
+  if (NumThreads.isInvalid())
+    return 0;
+  return getDerived().RebuildOMPNumThreadsClause(NumThreads.take(),
+                                                 C->getLocStart(),
+                                                 C->getLParenLoc(),
+                                                 C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPSafelenClause(OMPSafelenClause *C) {
+  ExprResult E = getDerived().TransformExpr(C->getSafelen());
+  if (E.isInvalid())
+    return 0;
+  return getDerived().RebuildOMPSafelenClause(
+      E.take(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template<typename Derived>
 OMPClause *
 TreeTransform<Derived>::TransformOMPDefaultClause(OMPDefaultClause *C) {
   return getDerived().RebuildOMPDefaultClause(C->getDefaultKind(),
@@ -6332,10 +6387,8 @@
 TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) {
   llvm::SmallVector<Expr *, 16> Vars;
   Vars.reserve(C->varlist_size());
-  for (OMPPrivateClause::varlist_iterator I = C->varlist_begin(),
-                                          E = C->varlist_end();
-       I != E; ++I) {
-    ExprResult EVar = getDerived().TransformExpr(cast<Expr>(*I));
+  for (auto *I : C->varlists()) {
+    ExprResult EVar = getDerived().TransformExpr(cast<Expr>(I));
     if (EVar.isInvalid())
       return 0;
     Vars.push_back(EVar.take());
@@ -6352,10 +6405,8 @@
                                                  OMPFirstprivateClause *C) {
   llvm::SmallVector<Expr *, 16> Vars;
   Vars.reserve(C->varlist_size());
-  for (OMPFirstprivateClause::varlist_iterator I = C->varlist_begin(),
-                                               E = C->varlist_end();
-       I != E; ++I) {
-    ExprResult EVar = getDerived().TransformExpr(cast<Expr>(*I));
+  for (auto *I : C->varlists()) {
+    ExprResult EVar = getDerived().TransformExpr(cast<Expr>(I));
     if (EVar.isInvalid())
       return 0;
     Vars.push_back(EVar.take());
@@ -6371,10 +6422,8 @@
 TreeTransform<Derived>::TransformOMPSharedClause(OMPSharedClause *C) {
   llvm::SmallVector<Expr *, 16> Vars;
   Vars.reserve(C->varlist_size());
-  for (OMPSharedClause::varlist_iterator I = C->varlist_begin(),
-                                         E = C->varlist_end();
-       I != E; ++I) {
-    ExprResult EVar = getDerived().TransformExpr(cast<Expr>(*I));
+  for (auto *I : C->varlists()) {
+    ExprResult EVar = getDerived().TransformExpr(cast<Expr>(I));
     if (EVar.isInvalid())
       return 0;
     Vars.push_back(EVar.take());
@@ -6385,6 +6434,23 @@
                                              C->getLocEnd());
 }
 
+template<typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPCopyinClause(OMPCopyinClause *C) {
+  llvm::SmallVector<Expr *, 16> Vars;
+  Vars.reserve(C->varlist_size());
+  for (auto *I : C->varlists()) {
+    ExprResult EVar = getDerived().TransformExpr(cast<Expr>(I));
+    if (EVar.isInvalid())
+      return 0;
+    Vars.push_back(EVar.take());
+  }
+  return getDerived().RebuildOMPCopyinClause(Vars,
+                                             C->getLocStart(),
+                                             C->getLParenLoc(),
+                                             C->getLocEnd());
+}
+
 //===----------------------------------------------------------------------===//
 // Expression transformation
 //===----------------------------------------------------------------------===//
@@ -7829,9 +7895,8 @@
     // Expand using declarations.
     if (isa<UsingDecl>(InstD)) {
       UsingDecl *UD = cast<UsingDecl>(InstD);
-      for (UsingDecl::shadow_iterator I = UD->shadow_begin(),
-             E = UD->shadow_end(); I != E; ++I)
-        R.addDecl(*I);
+      for (auto *I : UD->shadows())
+        R.addDecl(I);
       continue;
     }
 
@@ -7890,44 +7955,6 @@
 
 template<typename Derived>
 ExprResult
-TreeTransform<Derived>::TransformUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
-  TypeSourceInfo *T = getDerived().TransformType(E->getQueriedTypeSourceInfo());
-  if (!T)
-    return ExprError();
-
-  if (!getDerived().AlwaysRebuild() &&
-      T == E->getQueriedTypeSourceInfo())
-    return SemaRef.Owned(E);
-
-  return getDerived().RebuildUnaryTypeTrait(E->getTrait(),
-                                            E->getLocStart(),
-                                            T,
-                                            E->getLocEnd());
-}
-
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::TransformBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
-  TypeSourceInfo *LhsT = getDerived().TransformType(E->getLhsTypeSourceInfo());
-  if (!LhsT)
-    return ExprError();
-
-  TypeSourceInfo *RhsT = getDerived().TransformType(E->getRhsTypeSourceInfo());
-  if (!RhsT)
-    return ExprError();
-
-  if (!getDerived().AlwaysRebuild() &&
-      LhsT == E->getLhsTypeSourceInfo() && RhsT == E->getRhsTypeSourceInfo())
-    return SemaRef.Owned(E);
-
-  return getDerived().RebuildBinaryTypeTrait(E->getTrait(),
-                                            E->getLocStart(),
-                                            LhsT, RhsT,
-                                            E->getLocEnd());
-}
-
-template<typename Derived>
-ExprResult
 TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
   bool ArgChanged = false;
   SmallVector<TypeSourceInfo *, 4> Args;
@@ -8353,7 +8380,7 @@
     FunctionProtoTypeLoc NewCallOpFPTL = 
         NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>();
     ParmVarDecl **NewParamDeclArray = NewCallOpFPTL.getParmArray();
-    const unsigned NewNumArgs = NewCallOpFPTL.getNumArgs();
+    const unsigned NewNumArgs = NewCallOpFPTL.getNumParams();
 
     for (unsigned I = 0; I < NewNumArgs; ++I) {
       // If this call operator's type does not require transformation, 
@@ -8416,7 +8443,8 @@
   bool Invalid = false;
 
   // Introduce the context of the call operator.
-  Sema::ContextRAII SavedContext(getSema(), CallOperator);
+  Sema::ContextRAII SavedContext(getSema(), CallOperator,
+                                 /*NewThisContext*/false);
 
   LambdaScopeInfo *const LSI = getSema().getCurLambda();
   // Enter the scope of the lambda.
@@ -8734,9 +8762,8 @@
     // Expand using declarations.
     if (isa<UsingDecl>(InstD)) {
       UsingDecl *UD = cast<UsingDecl>(InstD);
-      for (UsingDecl::shadow_iterator I = UD->shadow_begin(),
-             E = UD->shadow_end(); I != E; ++I)
-        R.addDecl(*I);
+      for (auto *I : UD->shadows())
+        R.addDecl(I);
       continue;
     }
 
@@ -9337,7 +9364,7 @@
 
   const FunctionProtoType *exprFunctionType = E->getFunctionType();
   QualType exprResultType =
-      getDerived().TransformType(exprFunctionType->getResultType());
+      getDerived().TransformType(exprFunctionType->getReturnType());
 
   QualType functionType =
     getDerived().RebuildFunctionProtoType(exprResultType, paramTypes,
@@ -9364,9 +9391,8 @@
   // In builds with assertions, make sure that we captured everything we
   // captured before.
   if (!SemaRef.getDiagnostics().hasErrorOccurred()) {
-    for (BlockDecl::capture_iterator i = oldBlock->capture_begin(),
-           e = oldBlock->capture_end(); i != e; ++i) {
-      VarDecl *oldCapture = i->getVariable();
+    for (const auto &I : oldBlock->captures()) {
+      VarDecl *oldCapture = I.getVariable();
 
       // Ignore parameter packs.
       if (isa<ParmVarDecl>(oldCapture) &&
@@ -9443,8 +9469,8 @@
 TreeTransform<Derived>::RebuildMemberPointerType(QualType PointeeType,
                                                  QualType ClassType,
                                                  SourceLocation Sigil) {
-  return SemaRef.BuildMemberPointerType(PointeeType, ClassType,
-                                        Sigil, getDerived().getBaseEntity());
+  return SemaRef.BuildMemberPointerType(PointeeType, ClassType, Sigil,
+                                        getDerived().getBaseEntity());
 }
 
 template<typename Derived>
diff --git a/lib/Serialization/ASTCommon.h b/lib/Serialization/ASTCommon.h
index ef81e69..524a9c2 100644
--- a/lib/Serialization/ASTCommon.h
+++ b/lib/Serialization/ASTCommon.h
@@ -26,8 +26,12 @@
   UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
   UPD_CXX_ADDED_ANONYMOUS_NAMESPACE,
   UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER,
+  UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION,
+  UPD_CXX_RESOLVED_EXCEPTION_SPEC,
   UPD_CXX_DEDUCED_RETURN_TYPE,
-  UPD_DECL_MARKED_USED
+  UPD_DECL_MARKED_USED,
+  UPD_MANGLING_NUMBER,
+  UPD_STATIC_LOCAL_NUMBER
 };
 
 TypeIdx TypeIdxFromBuiltin(const BuiltinType *BT);
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 4d1b4b9..ff0b1dd 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -49,6 +49,7 @@
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/system_error.h"
 #include <algorithm>
 #include <cstdio>
@@ -59,6 +60,75 @@
 using namespace clang::serialization::reader;
 using llvm::BitstreamCursor;
 
+
+//===----------------------------------------------------------------------===//
+// ChainedASTReaderListener implementation
+//===----------------------------------------------------------------------===//
+
+bool
+ChainedASTReaderListener::ReadFullVersionInformation(StringRef FullVersion) {
+  return First->ReadFullVersionInformation(FullVersion) ||
+         Second->ReadFullVersionInformation(FullVersion);
+}
+bool ChainedASTReaderListener::ReadLanguageOptions(const LangOptions &LangOpts,
+                                                   bool Complain) {
+  return First->ReadLanguageOptions(LangOpts, Complain) ||
+         Second->ReadLanguageOptions(LangOpts, Complain);
+}
+bool
+ChainedASTReaderListener::ReadTargetOptions(const TargetOptions &TargetOpts,
+                                            bool Complain) {
+  return First->ReadTargetOptions(TargetOpts, Complain) ||
+         Second->ReadTargetOptions(TargetOpts, Complain);
+}
+bool ChainedASTReaderListener::ReadDiagnosticOptions(
+    const DiagnosticOptions &DiagOpts, bool Complain) {
+  return First->ReadDiagnosticOptions(DiagOpts, Complain) ||
+         Second->ReadDiagnosticOptions(DiagOpts, Complain);
+}
+bool
+ChainedASTReaderListener::ReadFileSystemOptions(const FileSystemOptions &FSOpts,
+                                                bool Complain) {
+  return First->ReadFileSystemOptions(FSOpts, Complain) ||
+         Second->ReadFileSystemOptions(FSOpts, Complain);
+}
+
+bool ChainedASTReaderListener::ReadHeaderSearchOptions(
+    const HeaderSearchOptions &HSOpts, bool Complain) {
+  return First->ReadHeaderSearchOptions(HSOpts, Complain) ||
+         Second->ReadHeaderSearchOptions(HSOpts, Complain);
+}
+bool ChainedASTReaderListener::ReadPreprocessorOptions(
+    const PreprocessorOptions &PPOpts, bool Complain,
+    std::string &SuggestedPredefines) {
+  return First->ReadPreprocessorOptions(PPOpts, Complain,
+                                        SuggestedPredefines) ||
+         Second->ReadPreprocessorOptions(PPOpts, Complain, SuggestedPredefines);
+}
+void ChainedASTReaderListener::ReadCounter(const serialization::ModuleFile &M,
+                                           unsigned Value) {
+  First->ReadCounter(M, Value);
+  Second->ReadCounter(M, Value);
+}
+bool ChainedASTReaderListener::needsInputFileVisitation() {
+  return First->needsInputFileVisitation() ||
+         Second->needsInputFileVisitation();
+}
+bool ChainedASTReaderListener::needsSystemInputFileVisitation() {
+  return First->needsSystemInputFileVisitation() ||
+  Second->needsSystemInputFileVisitation();
+}
+void ChainedASTReaderListener::visitModuleFile(StringRef Filename) {
+  First->visitModuleFile(Filename);
+  Second->visitModuleFile(Filename);
+}
+bool ChainedASTReaderListener::visitInputFile(StringRef Filename,
+                                              bool isSystem,
+                                              bool isOverridden) {
+  return First->visitInputFile(Filename, isSystem, isOverridden) ||
+         Second->visitInputFile(Filename, isSystem, isOverridden);
+}
+
 //===----------------------------------------------------------------------===//
 // PCH validator implementation
 //===----------------------------------------------------------------------===//
@@ -140,7 +210,6 @@
   CHECK_TARGET_OPT(Triple, "target");
   CHECK_TARGET_OPT(CPU, "target CPU");
   CHECK_TARGET_OPT(ABI, "target ABI");
-  CHECK_TARGET_OPT(CXXABI, "target C++ ABI");
   CHECK_TARGET_OPT(LinkerVersion, "target linker version");
 #undef CHECK_TARGET_OPT
 
@@ -401,19 +470,19 @@
 
 std::pair<unsigned, unsigned>
 ASTSelectorLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
-  using namespace clang::io;
-  unsigned KeyLen = ReadUnalignedLE16(d);
-  unsigned DataLen = ReadUnalignedLE16(d);
+  using namespace llvm::support;
+  unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
+  unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
   return std::make_pair(KeyLen, DataLen);
 }
 
 ASTSelectorLookupTrait::internal_key_type 
 ASTSelectorLookupTrait::ReadKey(const unsigned char* d, unsigned) {
-  using namespace clang::io;
+  using namespace llvm::support;
   SelectorTable &SelTable = Reader.getContext().Selectors;
-  unsigned N = ReadUnalignedLE16(d);
-  IdentifierInfo *FirstII
-    = Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
+  unsigned N = endian::readNext<uint16_t, little, unaligned>(d);
+  IdentifierInfo *FirstII = Reader.getLocalIdentifier(
+      F, endian::readNext<uint32_t, little, unaligned>(d));
   if (N == 0)
     return SelTable.getNullarySelector(FirstII);
   else if (N == 1)
@@ -422,7 +491,8 @@
   SmallVector<IdentifierInfo *, 16> Args;
   Args.push_back(FirstII);
   for (unsigned I = 1; I != N; ++I)
-    Args.push_back(Reader.getLocalIdentifier(F, ReadUnalignedLE32(d)));
+    Args.push_back(Reader.getLocalIdentifier(
+        F, endian::readNext<uint32_t, little, unaligned>(d)));
 
   return SelTable.getSelector(N, Args.data());
 }
@@ -430,13 +500,16 @@
 ASTSelectorLookupTrait::data_type 
 ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d, 
                                  unsigned DataLen) {
-  using namespace clang::io;
+  using namespace llvm::support;
 
   data_type Result;
 
-  Result.ID = Reader.getGlobalSelectorID(F, ReadUnalignedLE32(d));
-  unsigned NumInstanceMethodsAndBits = ReadUnalignedLE16(d);
-  unsigned NumFactoryMethodsAndBits = ReadUnalignedLE16(d);
+  Result.ID = Reader.getGlobalSelectorID(
+      F, endian::readNext<uint32_t, little, unaligned>(d));
+  unsigned NumInstanceMethodsAndBits =
+      endian::readNext<uint16_t, little, unaligned>(d);
+  unsigned NumFactoryMethodsAndBits =
+      endian::readNext<uint16_t, little, unaligned>(d);
   Result.InstanceBits = NumInstanceMethodsAndBits & 0x3;
   Result.FactoryBits = NumFactoryMethodsAndBits & 0x3;
   unsigned NumInstanceMethods = NumInstanceMethodsAndBits >> 2;
@@ -444,15 +517,15 @@
 
   // Load instance methods
   for (unsigned I = 0; I != NumInstanceMethods; ++I) {
-    if (ObjCMethodDecl *Method
-          = Reader.GetLocalDeclAs<ObjCMethodDecl>(F, ReadUnalignedLE32(d)))
+    if (ObjCMethodDecl *Method = Reader.GetLocalDeclAs<ObjCMethodDecl>(
+            F, endian::readNext<uint32_t, little, unaligned>(d)))
       Result.Instance.push_back(Method);
   }
 
   // Load factory methods
   for (unsigned I = 0; I != NumFactoryMethods; ++I) {
-    if (ObjCMethodDecl *Method
-          = Reader.GetLocalDeclAs<ObjCMethodDecl>(F, ReadUnalignedLE32(d)))
+    if (ObjCMethodDecl *Method = Reader.GetLocalDeclAs<ObjCMethodDecl>(
+            F, endian::readNext<uint32_t, little, unaligned>(d)))
       Result.Factory.push_back(Method);
   }
 
@@ -465,9 +538,9 @@
 
 std::pair<unsigned, unsigned>
 ASTIdentifierLookupTraitBase::ReadKeyDataLength(const unsigned char*& d) {
-  using namespace clang::io;
-  unsigned DataLen = ReadUnalignedLE16(d);
-  unsigned KeyLen = ReadUnalignedLE16(d);
+  using namespace llvm::support;
+  unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
+  unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
   return std::make_pair(KeyLen, DataLen);
 }
 
@@ -490,8 +563,8 @@
 IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
                                                    const unsigned char* d,
                                                    unsigned DataLen) {
-  using namespace clang::io;
-  unsigned RawID = ReadUnalignedLE32(d);
+  using namespace llvm::support;
+  unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
   bool IsInteresting = RawID & 0x01;
 
   // Wipe out the "is interesting" bit.
@@ -517,8 +590,8 @@
     return II;
   }
 
-  unsigned ObjCOrBuiltinID = ReadUnalignedLE16(d);
-  unsigned Bits = ReadUnalignedLE16(d);
+  unsigned ObjCOrBuiltinID = endian::readNext<uint16_t, little, unaligned>(d);
+  unsigned Bits = endian::readNext<uint16_t, little, unaligned>(d);
   bool CPlusPlusOperatorKeyword = Bits & 0x01;
   Bits >>= 1;
   bool HasRevertedTokenIDToIdentifier = Bits & 0x01;
@@ -567,11 +640,13 @@
   // If this identifier is a macro, deserialize the macro
   // definition.
   if (hadMacroDefinition) {
-    uint32_t MacroDirectivesOffset = ReadUnalignedLE32(d);
+    uint32_t MacroDirectivesOffset =
+        endian::readNext<uint32_t, little, unaligned>(d);
     DataLen -= 4;
     SmallVector<uint32_t, 8> LocalMacroIDs;
     if (hasSubmoduleMacros) {
-      while (uint32_t LocalMacroID = ReadUnalignedLE32(d)) {
+      while (uint32_t LocalMacroID =
+                 endian::readNext<uint32_t, little, unaligned>(d)) {
         DataLen -= 4;
         LocalMacroIDs.push_back(LocalMacroID);
       }
@@ -579,11 +654,34 @@
     }
 
     if (F.Kind == MK_Module) {
+      // Macro definitions are stored from newest to oldest, so reverse them
+      // before registering them.
+      llvm::SmallVector<unsigned, 8> MacroSizes;
       for (SmallVectorImpl<uint32_t>::iterator
-             I = LocalMacroIDs.begin(), E = LocalMacroIDs.end(); I != E; ++I) {
-        MacroID MacID = Reader.getGlobalMacroID(F, *I);
-        Reader.addPendingMacroFromModule(II, &F, MacID, F.DirectImportLoc);
+             I = LocalMacroIDs.begin(), E = LocalMacroIDs.end(); I != E; /**/) {
+        unsigned Size = 1;
+
+        static const uint32_t HasOverridesFlag = 0x80000000U;
+        if (I + 1 != E && (I[1] & HasOverridesFlag))
+          Size += 1 + (I[1] & ~HasOverridesFlag);
+
+        MacroSizes.push_back(Size);
+        I += Size;
       }
+
+      SmallVectorImpl<uint32_t>::iterator I = LocalMacroIDs.end();
+      for (SmallVectorImpl<unsigned>::reverse_iterator SI = MacroSizes.rbegin(),
+                                                       SE = MacroSizes.rend();
+           SI != SE; ++SI) {
+        I -= *SI;
+
+        uint32_t LocalMacroID = *I;
+        llvm::ArrayRef<uint32_t> Overrides;
+        if (*SI != 1)
+          Overrides = llvm::makeArrayRef(&I[2], *SI - 2);
+        Reader.addPendingMacroFromModule(II, &F, LocalMacroID, Overrides);
+      }
+      assert(I == LocalMacroIDs.begin());
     } else {
       Reader.addPendingMacroFromPCH(II, &F, MacroDirectivesOffset);
     }
@@ -596,7 +694,8 @@
   if (DataLen > 0) {
     SmallVector<uint32_t, 4> DeclIDs;
     for (; DataLen > 0; DataLen -= 4)
-      DeclIDs.push_back(Reader.getGlobalDeclID(F, ReadUnalignedLE32(d)));
+      DeclIDs.push_back(Reader.getGlobalDeclID(
+          F, endian::readNext<uint32_t, little, unaligned>(d)));
     Reader.SetGloballyVisibleDecls(II, DeclIDs);
   }
 
@@ -664,34 +763,37 @@
 
 std::pair<unsigned, unsigned>
 ASTDeclContextNameLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
-  using namespace clang::io;
-  unsigned KeyLen = ReadUnalignedLE16(d);
-  unsigned DataLen = ReadUnalignedLE16(d);
+  using namespace llvm::support;
+  unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
+  unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
   return std::make_pair(KeyLen, DataLen);
 }
 
 ASTDeclContextNameLookupTrait::internal_key_type 
 ASTDeclContextNameLookupTrait::ReadKey(const unsigned char* d, unsigned) {
-  using namespace clang::io;
+  using namespace llvm::support;
 
   DeclNameKey Key;
   Key.Kind = (DeclarationName::NameKind)*d++;
   switch (Key.Kind) {
   case DeclarationName::Identifier:
-    Key.Data = (uint64_t)Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
+    Key.Data = (uint64_t)Reader.getLocalIdentifier(
+        F, endian::readNext<uint32_t, little, unaligned>(d));
     break;
   case DeclarationName::ObjCZeroArgSelector:
   case DeclarationName::ObjCOneArgSelector:
   case DeclarationName::ObjCMultiArgSelector:
     Key.Data =
-       (uint64_t)Reader.getLocalSelector(F, ReadUnalignedLE32(d))
-                   .getAsOpaquePtr();
+        (uint64_t)Reader.getLocalSelector(
+                             F, endian::readNext<uint32_t, little, unaligned>(
+                                    d)).getAsOpaquePtr();
     break;
   case DeclarationName::CXXOperatorName:
     Key.Data = *d++; // OverloadedOperatorKind
     break;
   case DeclarationName::CXXLiteralOperatorName:
-    Key.Data = (uint64_t)Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
+    Key.Data = (uint64_t)Reader.getLocalIdentifier(
+        F, endian::readNext<uint32_t, little, unaligned>(d));
     break;
   case DeclarationName::CXXConstructorName:
   case DeclarationName::CXXDestructorName:
@@ -708,8 +810,8 @@
 ASTDeclContextNameLookupTrait::ReadData(internal_key_type, 
                                         const unsigned char* d,
                                         unsigned DataLen) {
-  using namespace clang::io;
-  unsigned NumDecls = ReadUnalignedLE16(d);
+  using namespace llvm::support;
+  unsigned NumDecls = endian::readNext<uint16_t, little, unaligned>(d);
   LE32DeclID *Start = reinterpret_cast<LE32DeclID *>(
                         const_cast<unsigned char *>(d));
   return std::make_pair(Start, Start + NumDecls);
@@ -1262,16 +1364,18 @@
     
 std::pair<unsigned, unsigned>
 HeaderFileInfoTrait::ReadKeyDataLength(const unsigned char*& d) {
-  unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d);
+  using namespace llvm::support;
+  unsigned KeyLen = (unsigned) endian::readNext<uint16_t, little, unaligned>(d);
   unsigned DataLen = (unsigned) *d++;
   return std::make_pair(KeyLen, DataLen);
 }
 
 HeaderFileInfoTrait::internal_key_type
 HeaderFileInfoTrait::ReadKey(const unsigned char *d, unsigned) {
+  using namespace llvm::support;
   internal_key_type ikey;
-  ikey.Size = off_t(clang::io::ReadUnalignedLE64(d));
-  ikey.ModTime = time_t(clang::io::ReadUnalignedLE64(d));
+  ikey.Size = off_t(endian::readNext<uint64_t, little, unaligned>(d));
+  ikey.ModTime = time_t(endian::readNext<uint64_t, little, unaligned>(d));
   ikey.Filename = (const char *)d;
   return ikey;
 }
@@ -1280,7 +1384,7 @@
 HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
                               unsigned DataLen) {
   const unsigned char *End = d + DataLen;
-  using namespace clang::io;
+  using namespace llvm::support;
   HeaderFileInfo HFI;
   unsigned Flags = *d++;
   HFI.HeaderRole = static_cast<ModuleMap::ModuleHeaderRole>
@@ -1290,10 +1394,11 @@
   HFI.DirInfo = (Flags >> 2) & 0x03;
   HFI.Resolved = (Flags >> 1) & 0x01;
   HFI.IndexHeaderMapHeader = Flags & 0x01;
-  HFI.NumIncludes = ReadUnalignedLE16(d);
-  HFI.ControllingMacroID = Reader.getGlobalIdentifierID(M, 
-                                                        ReadUnalignedLE32(d));
-  if (unsigned FrameworkOffset = ReadUnalignedLE32(d)) {
+  HFI.NumIncludes = endian::readNext<uint16_t, little, unaligned>(d);
+  HFI.ControllingMacroID = Reader.getGlobalIdentifierID(
+      M, endian::readNext<uint32_t, little, unaligned>(d));
+  if (unsigned FrameworkOffset =
+          endian::readNext<uint32_t, little, unaligned>(d)) {
     // The framework offset is 1 greater than the actual offset, 
     // since 0 is used as an indicator for "no framework name".
     StringRef FrameworkName(FrameworkStrings + FrameworkOffset - 1);
@@ -1301,7 +1406,7 @@
   }
   
   if (d != End) {
-    uint32_t LocalSMID = ReadUnalignedLE32(d);
+    uint32_t LocalSMID = endian::readNext<uint32_t, little, unaligned>(d);
     if (LocalSMID) {
       // This header is part of a module. Associate it with the module to enable
       // implicit module import.
@@ -1323,12 +1428,19 @@
   return HFI;
 }
 
-void ASTReader::addPendingMacroFromModule(IdentifierInfo *II,
-                                          ModuleFile *M,
-                                          GlobalMacroID GMacID,
-                                          SourceLocation ImportLoc) {
+void
+ASTReader::addPendingMacroFromModule(IdentifierInfo *II, ModuleFile *M,
+                                     GlobalMacroID GMacID,
+                                     llvm::ArrayRef<SubmoduleID> Overrides) {
   assert(NumCurrentElementsDeserializing > 0 &&"Missing deserialization guard");
-  PendingMacroIDs[II].push_back(PendingMacroInfo(M, GMacID, ImportLoc));
+  SubmoduleID *OverrideData = 0;
+  if (!Overrides.empty()) {
+    OverrideData = new (Context) SubmoduleID[Overrides.size() + 1];
+    OverrideData[0] = Overrides.size();
+    for (unsigned I = 0; I != Overrides.size(); ++I)
+      OverrideData[I + 1] = getGlobalSubmoduleID(*M, Overrides[I]);
+  }
+  PendingMacroIDs[II].push_back(PendingMacroInfo(M, GMacID, OverrideData));
 }
 
 void ASTReader::addPendingMacroFromPCH(IdentifierInfo *II,
@@ -1477,6 +1589,59 @@
     IdentifierGeneration[II] = CurrentGeneration;
 }
 
+struct ASTReader::ModuleMacroInfo {
+  SubmoduleID SubModID;
+  MacroInfo *MI;
+  SubmoduleID *Overrides;
+  // FIXME: Remove this.
+  ModuleFile *F;
+
+  bool isDefine() const { return MI; }
+
+  SubmoduleID getSubmoduleID() const { return SubModID; }
+
+  llvm::ArrayRef<SubmoduleID> getOverriddenSubmodules() const {
+    if (!Overrides)
+      return llvm::ArrayRef<SubmoduleID>();
+    return llvm::makeArrayRef(Overrides + 1, *Overrides);
+  }
+
+  DefMacroDirective *import(Preprocessor &PP, SourceLocation ImportLoc) const {
+    if (!MI)
+      return 0;
+    return PP.AllocateDefMacroDirective(MI, ImportLoc, /*isImported=*/true);
+  }
+};
+
+ASTReader::ModuleMacroInfo *
+ASTReader::getModuleMacro(const PendingMacroInfo &PMInfo) {
+  ModuleMacroInfo Info;
+
+  uint32_t ID = PMInfo.ModuleMacroData.MacID;
+  if (ID & 1) {
+    // Macro undefinition.
+    Info.SubModID = getGlobalSubmoduleID(*PMInfo.M, ID >> 1);
+    Info.MI = 0;
+  } else {
+    // Macro definition.
+    GlobalMacroID GMacID = getGlobalMacroID(*PMInfo.M, ID >> 1);
+    assert(GMacID);
+
+    // If this macro has already been loaded, don't do so again.
+    // FIXME: This is highly dubious. Multiple macro definitions can have the
+    // same MacroInfo (and hence the same GMacID) due to #pragma push_macro etc.
+    if (MacrosLoaded[GMacID - NUM_PREDEF_MACRO_IDS])
+      return 0;
+
+    Info.MI = getMacro(GMacID);
+    Info.SubModID = Info.MI->getOwningModuleID();
+  }
+  Info.Overrides = PMInfo.ModuleMacroData.Overrides;
+  Info.F = PMInfo.M;
+
+  return new (Context) ModuleMacroInfo(Info);
+}
+
 void ASTReader::resolvePendingMacro(IdentifierInfo *II,
                                     const PendingMacroInfo &PMInfo) {
   assert(II);
@@ -1486,42 +1651,21 @@
                               PMInfo.PCHMacroData.MacroDirectivesOffset);
     return;
   }
-  
+
   // Module Macro.
 
-  GlobalMacroID GMacID = PMInfo.ModuleMacroData.GMacID;
-  SourceLocation ImportLoc =
-      SourceLocation::getFromRawEncoding(PMInfo.ModuleMacroData.ImportLoc);
-
-  assert(GMacID);
-  // If this macro has already been loaded, don't do so again.
-  if (MacrosLoaded[GMacID - NUM_PREDEF_MACRO_IDS])
+  ModuleMacroInfo *MMI = getModuleMacro(PMInfo);
+  if (!MMI)
     return;
 
-  MacroInfo *MI = getMacro(GMacID);
-  SubmoduleID SubModID = MI->getOwningModuleID();
-  MacroDirective *MD = PP.AllocateDefMacroDirective(MI, ImportLoc,
-                                                    /*isImported=*/true);
-
-  // Determine whether this macro definition is visible.
-  bool Hidden = false;
-  Module *Owner = 0;
-  if (SubModID) {
-    if ((Owner = getSubmodule(SubModID))) {
-      if (Owner->NameVisibility == Module::Hidden) {
-        // The owning module is not visible, and this macro definition
-        // should not be, either.
-        Hidden = true;
-
-        // Note that this macro definition was hidden because its owning
-        // module is not yet visible.
-        HiddenNamesMap[Owner].push_back(HiddenName(II, MD));
-      }
-    }
+  Module *Owner = getSubmodule(MMI->getSubmoduleID());
+  if (Owner && Owner->NameVisibility == Module::Hidden) {
+    // Macros in the owning module are hidden. Just remember this macro to
+    // install if we make this module visible.
+    HiddenNamesMap[Owner].HiddenMacros.insert(std::make_pair(II, MMI));
+  } else {
+    installImportedMacro(II, MMI, Owner);
   }
-
-  if (!Hidden)
-    installImportedMacro(II, MD, Owner);
 }
 
 void ASTReader::installPCHMacroDirectives(IdentifierInfo *II,
@@ -1606,34 +1750,181 @@
   return PrevInSystem && NewInSystem;
 }
 
-void ASTReader::installImportedMacro(IdentifierInfo *II, MacroDirective *MD,
-                                     Module *Owner) {
-  assert(II && MD);
+void ASTReader::removeOverriddenMacros(IdentifierInfo *II,
+                                       AmbiguousMacros &Ambig,
+                                       llvm::ArrayRef<SubmoduleID> Overrides) {
+  for (unsigned OI = 0, ON = Overrides.size(); OI != ON; ++OI) {
+    SubmoduleID OwnerID = Overrides[OI];
 
-  DefMacroDirective *DefMD = cast<DefMacroDirective>(MD);
+    // If this macro is not yet visible, remove it from the hidden names list.
+    Module *Owner = getSubmodule(OwnerID);
+    HiddenNames &Hidden = HiddenNamesMap[Owner];
+    HiddenMacrosMap::iterator HI = Hidden.HiddenMacros.find(II);
+    if (HI != Hidden.HiddenMacros.end()) {
+      auto SubOverrides = HI->second->getOverriddenSubmodules();
+      Hidden.HiddenMacros.erase(HI);
+      removeOverriddenMacros(II, Ambig, SubOverrides);
+    }
+
+    // If this macro is already in our list of conflicts, remove it from there.
+    Ambig.erase(
+        std::remove_if(Ambig.begin(), Ambig.end(), [&](DefMacroDirective *MD) {
+          return MD->getInfo()->getOwningModuleID() == OwnerID;
+        }),
+        Ambig.end());
+  }
+}
+
+ASTReader::AmbiguousMacros *
+ASTReader::removeOverriddenMacros(IdentifierInfo *II,
+                                  llvm::ArrayRef<SubmoduleID> Overrides) {
   MacroDirective *Prev = PP.getMacroDirective(II);
-  if (Prev) {
-    MacroDirective::DefInfo PrevDef = Prev->getDefinition();
-    MacroInfo *PrevMI = PrevDef.getMacroInfo();
-    MacroInfo *NewMI = DefMD->getInfo();
-    if (NewMI != PrevMI && !PrevMI->isIdenticalTo(*NewMI, PP,
-                                                  /*Syntactically=*/true)) {
-      // Before marking the macros as ambiguous, check if this is a case where
-      // both macros are in system headers. If so, we trust that the system
-      // did not get it wrong. This also handles cases where Clang's own
-      // headers have a different spelling of certain system macros:
-      //   #define LONG_MAX __LONG_MAX__ (clang's limits.h)
-      //   #define LONG_MAX 0x7fffffffffffffffL (system's limits.h)
-      if (!areDefinedInSystemModules(PrevMI, NewMI, Owner, *this)) {
-        PrevDef.getDirective()->setAmbiguous(true);
-        DefMD->setAmbiguous(true);
-      }
+  if (!Prev && Overrides.empty())
+    return 0;
+
+  DefMacroDirective *PrevDef = Prev ? Prev->getDefinition().getDirective() : 0;
+  if (PrevDef && PrevDef->isAmbiguous()) {
+    // We had a prior ambiguity. Check whether we resolve it (or make it worse).
+    AmbiguousMacros &Ambig = AmbiguousMacroDefs[II];
+    Ambig.push_back(PrevDef);
+
+    removeOverriddenMacros(II, Ambig, Overrides);
+
+    if (!Ambig.empty())
+      return &Ambig;
+
+    AmbiguousMacroDefs.erase(II);
+  } else {
+    // There's no ambiguity yet. Maybe we're introducing one.
+    llvm::SmallVector<DefMacroDirective*, 1> Ambig;
+    if (PrevDef)
+      Ambig.push_back(PrevDef);
+
+    removeOverriddenMacros(II, Ambig, Overrides);
+
+    if (!Ambig.empty()) {
+      AmbiguousMacros &Result = AmbiguousMacroDefs[II];
+      Result.swap(Ambig);
+      return &Result;
     }
   }
-  
+
+  // We ended up with no ambiguity.
+  return 0;
+}
+
+void ASTReader::installImportedMacro(IdentifierInfo *II, ModuleMacroInfo *MMI,
+                                     Module *Owner) {
+  assert(II && Owner);
+
+  SourceLocation ImportLoc = Owner->MacroVisibilityLoc;
+  if (ImportLoc.isInvalid()) {
+    // FIXME: If we made macros from this module visible but didn't provide a
+    // source location for the import, we don't have a location for the macro.
+    // Use the location at which the containing module file was first imported
+    // for now.
+    ImportLoc = MMI->F->DirectImportLoc;
+    assert(ImportLoc.isValid() && "no import location for a visible macro?");
+  }
+
+  llvm::SmallVectorImpl<DefMacroDirective*> *Prev =
+      removeOverriddenMacros(II, MMI->getOverriddenSubmodules());
+
+
+  // Create a synthetic macro definition corresponding to the import (or null
+  // if this was an undefinition of the macro).
+  DefMacroDirective *MD = MMI->import(PP, ImportLoc);
+
+  // If there's no ambiguity, just install the macro.
+  if (!Prev) {
+    if (MD)
+      PP.appendMacroDirective(II, MD);
+    else
+      PP.appendMacroDirective(II, PP.AllocateUndefMacroDirective(ImportLoc));
+    return;
+  }
+  assert(!Prev->empty());
+
+  if (!MD) {
+    // We imported a #undef that didn't remove all prior definitions. The most
+    // recent prior definition remains, and we install it in the place of the
+    // imported directive.
+    MacroInfo *NewMI = Prev->back()->getInfo();
+    Prev->pop_back();
+    MD = PP.AllocateDefMacroDirective(NewMI, ImportLoc, /*Imported*/true);
+  }
+
+  // We're introducing a macro definition that creates or adds to an ambiguity.
+  // We can resolve that ambiguity if this macro is token-for-token identical to
+  // all of the existing definitions.
+  MacroInfo *NewMI = MD->getInfo();
+  assert(NewMI && "macro definition with no MacroInfo?");
+  while (!Prev->empty()) {
+    MacroInfo *PrevMI = Prev->back()->getInfo();
+    assert(PrevMI && "macro definition with no MacroInfo?");
+
+    // Before marking the macros as ambiguous, check if this is a case where
+    // both macros are in system headers. If so, we trust that the system
+    // did not get it wrong. This also handles cases where Clang's own
+    // headers have a different spelling of certain system macros:
+    //   #define LONG_MAX __LONG_MAX__ (clang's limits.h)
+    //   #define LONG_MAX 0x7fffffffffffffffL (system's limits.h)
+    //
+    // FIXME: Remove the defined-in-system-headers check. clang's limits.h
+    // overrides the system limits.h's macros, so there's no conflict here.
+    if (NewMI != PrevMI &&
+        !PrevMI->isIdenticalTo(*NewMI, PP, /*Syntactically=*/true) &&
+        !areDefinedInSystemModules(PrevMI, NewMI, Owner, *this))
+      break;
+
+    // The previous definition is the same as this one (or both are defined in
+    // system modules so we can assume they're equivalent); we don't need to
+    // track it any more.
+    Prev->pop_back();
+  }
+
+  if (!Prev->empty())
+    MD->setAmbiguous(true);
+
   PP.appendMacroDirective(II, MD);
 }
 
+ASTReader::InputFileInfo
+ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
+  // Go find this input file.
+  BitstreamCursor &Cursor = F.InputFilesCursor;
+  SavedStreamPosition SavedPosition(Cursor);
+  Cursor.JumpToBit(F.InputFileOffsets[ID-1]);
+
+  unsigned Code = Cursor.ReadCode();
+  RecordData Record;
+  StringRef Blob;
+
+  unsigned Result = Cursor.readRecord(Code, Record, &Blob);
+  assert(static_cast<InputFileRecordTypes>(Result) == INPUT_FILE &&
+         "invalid record type for input file");
+  (void)Result;
+
+  std::string Filename;
+  off_t StoredSize;
+  time_t StoredTime;
+  bool Overridden;
+  
+  assert(Record[0] == ID && "Bogus stored ID or offset");
+  StoredSize = static_cast<off_t>(Record[1]);
+  StoredTime = static_cast<time_t>(Record[2]);
+  Overridden = static_cast<bool>(Record[3]);
+  Filename = Blob;
+  MaybeAddSystemRootToFilename(F, Filename);
+  
+  InputFileInfo R = { std::move(Filename), StoredSize, StoredTime, Overridden };
+  return R;
+}
+
+std::string ASTReader::getInputFileName(ModuleFile &F, unsigned int ID) {
+  return readInputFileInfo(F, ID).Filename;
+}
+
 InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
   // If this ID is bogus, just return an empty input file.
   if (ID == 0 || ID > F.InputFilesLoaded.size())
@@ -1643,107 +1934,113 @@
   if (F.InputFilesLoaded[ID-1].getFile())
     return F.InputFilesLoaded[ID-1];
 
+  if (F.InputFilesLoaded[ID-1].isNotFound())
+    return InputFile();
+
   // Go find this input file.
   BitstreamCursor &Cursor = F.InputFilesCursor;
   SavedStreamPosition SavedPosition(Cursor);
   Cursor.JumpToBit(F.InputFileOffsets[ID-1]);
   
-  unsigned Code = Cursor.ReadCode();
-  RecordData Record;
-  StringRef Blob;
-  switch ((InputFileRecordTypes)Cursor.readRecord(Code, Record, &Blob)) {
-  case INPUT_FILE: {
-    unsigned StoredID = Record[0];
-    assert(ID == StoredID && "Bogus stored ID or offset");
-    (void)StoredID;
-    off_t StoredSize = (off_t)Record[1];
-    time_t StoredTime = (time_t)Record[2];
-    bool Overridden = (bool)Record[3];
-    
-    // Get the file entry for this input file.
-    StringRef OrigFilename = Blob;
-    std::string Filename = OrigFilename;
-    MaybeAddSystemRootToFilename(F, Filename);
-    const FileEntry *File 
-      = Overridden? FileMgr.getVirtualFile(Filename, StoredSize, StoredTime)
-                  : FileMgr.getFile(Filename, /*OpenFile=*/false);
-    
-    // If we didn't find the file, resolve it relative to the
-    // original directory from which this AST file was created.
-    if (File == 0 && !F.OriginalDir.empty() && !CurrentDir.empty() &&
-        F.OriginalDir != CurrentDir) {
-      std::string Resolved = resolveFileRelativeToOriginalDir(Filename,
-                                                              F.OriginalDir,
-                                                              CurrentDir);
-      if (!Resolved.empty())
-        File = FileMgr.getFile(Resolved);
-    }
-    
-    // For an overridden file, create a virtual file with the stored
-    // size/timestamp.
-    if (Overridden && File == 0) {
-      File = FileMgr.getVirtualFile(Filename, StoredSize, StoredTime);
-    }
-    
-    if (File == 0) {
-      if (Complain) {
-        std::string ErrorStr = "could not find file '";
-        ErrorStr += Filename;
-        ErrorStr += "' referenced by AST file";
-        Error(ErrorStr.c_str());
-      }
-      return InputFile();
-    }
+  InputFileInfo FI = readInputFileInfo(F, ID);
+  off_t StoredSize = FI.StoredSize;
+  time_t StoredTime = FI.StoredTime;
+  bool Overridden = FI.Overridden;
+  StringRef Filename = FI.Filename;
 
-    // Check if there was a request to override the contents of the file
-    // that was part of the precompiled header. Overridding such a file
-    // can lead to problems when lexing using the source locations from the
-    // PCH.
-    SourceManager &SM = getSourceManager();
-    if (!Overridden && SM.isFileOverridden(File)) {
-      if (Complain)
-        Error(diag::err_fe_pch_file_overridden, Filename);
-      // After emitting the diagnostic, recover by disabling the override so
-      // that the original file will be used.
-      SM.disableFileContentsOverride(File);
-      // The FileEntry is a virtual file entry with the size of the contents
-      // that would override the original contents. Set it to the original's
-      // size/time.
-      FileMgr.modifyFileEntry(const_cast<FileEntry*>(File),
-                              StoredSize, StoredTime);
+  const FileEntry *File
+    = Overridden? FileMgr.getVirtualFile(Filename, StoredSize, StoredTime)
+                : FileMgr.getFile(Filename, /*OpenFile=*/false);
+
+  // If we didn't find the file, resolve it relative to the
+  // original directory from which this AST file was created.
+  if (File == 0 && !F.OriginalDir.empty() && !CurrentDir.empty() &&
+      F.OriginalDir != CurrentDir) {
+    std::string Resolved = resolveFileRelativeToOriginalDir(Filename,
+                                                            F.OriginalDir,
+                                                            CurrentDir);
+    if (!Resolved.empty())
+      File = FileMgr.getFile(Resolved);
+  }
+
+  // For an overridden file, create a virtual file with the stored
+  // size/timestamp.
+  if (Overridden && File == 0) {
+    File = FileMgr.getVirtualFile(Filename, StoredSize, StoredTime);
+  }
+
+  if (File == 0) {
+    if (Complain) {
+      std::string ErrorStr = "could not find file '";
+      ErrorStr += Filename;
+      ErrorStr += "' referenced by AST file";
+      Error(ErrorStr.c_str());
     }
+    // Record that we didn't find the file.
+    F.InputFilesLoaded[ID-1] = InputFile::getNotFound();
+    return InputFile();
+  }
 
-    bool IsOutOfDate = false;
+  // Check if there was a request to override the contents of the file
+  // that was part of the precompiled header. Overridding such a file
+  // can lead to problems when lexing using the source locations from the
+  // PCH.
+  SourceManager &SM = getSourceManager();
+  if (!Overridden && SM.isFileOverridden(File)) {
+    if (Complain)
+      Error(diag::err_fe_pch_file_overridden, Filename);
+    // After emitting the diagnostic, recover by disabling the override so
+    // that the original file will be used.
+    SM.disableFileContentsOverride(File);
+    // The FileEntry is a virtual file entry with the size of the contents
+    // that would override the original contents. Set it to the original's
+    // size/time.
+    FileMgr.modifyFileEntry(const_cast<FileEntry*>(File),
+                            StoredSize, StoredTime);
+  }
 
-    // For an overridden file, there is nothing to validate.
-    if (!Overridden && (StoredSize != File->getSize()
+  bool IsOutOfDate = false;
+
+  // For an overridden file, there is nothing to validate.
+  if (!Overridden && (StoredSize != File->getSize()
 #if !defined(LLVM_ON_WIN32)
-         // In our regression testing, the Windows file system seems to
-         // have inconsistent modification times that sometimes
-         // erroneously trigger this error-handling path.
-         || StoredTime != File->getModificationTime()
+       // In our regression testing, the Windows file system seems to
+       // have inconsistent modification times that sometimes
+       // erroneously trigger this error-handling path.
+       || StoredTime != File->getModificationTime()
 #endif
-         )) {
-      if (Complain) {
-        Error(diag::err_fe_pch_file_modified, Filename, F.FileName);
-        if (Context.getLangOpts().Modules && !Diags.isDiagnosticInFlight()) {
-          Diag(diag::note_module_cache_path)
-            << PP.getHeaderSearchInfo().getModuleCachePath();
-        }
+       )) {
+    if (Complain) {
+      // Build a list of the PCH imports that got us here (in reverse).
+      SmallVector<ModuleFile *, 4> ImportStack(1, &F);
+      while (ImportStack.back()->ImportedBy.size() > 0)
+        ImportStack.push_back(ImportStack.back()->ImportedBy[0]);
+
+      // The top-level PCH is stale.
+      StringRef TopLevelPCHName(ImportStack.back()->FileName);
+      Error(diag::err_fe_pch_file_modified, Filename, TopLevelPCHName);
+
+      // Print the import stack.
+      if (ImportStack.size() > 1 && !Diags.isDiagnosticInFlight()) {
+        Diag(diag::note_pch_required_by)
+          << Filename << ImportStack[0]->FileName;
+        for (unsigned I = 1; I < ImportStack.size(); ++I)
+          Diag(diag::note_pch_required_by)
+            << ImportStack[I-1]->FileName << ImportStack[I]->FileName;
       }
 
-      IsOutOfDate = true;
+      if (!Diags.isDiagnosticInFlight())
+        Diag(diag::note_pch_rebuild_required) << TopLevelPCHName;
     }
 
-    InputFile IF = InputFile(File, Overridden, IsOutOfDate);
-
-    // Note that we've loaded this input file.
-    F.InputFilesLoaded[ID-1] = IF;
-    return IF;
-  }
+    IsOutOfDate = true;
   }
 
-  return InputFile();
+  InputFile IF = InputFile(File, Overridden, IsOutOfDate);
+
+  // Note that we've loaded this input file.
+  F.InputFilesLoaded[ID-1] = IF;
+  return IF;
 }
 
 const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) {
@@ -1808,20 +2105,54 @@
     case llvm::BitstreamEntry::Error:
       Error("malformed block record in AST file");
       return Failure;
-    case llvm::BitstreamEntry::EndBlock:
-      // Validate all of the non-system input files.
-      if (!DisableValidation) {
+    case llvm::BitstreamEntry::EndBlock: {
+      // Validate input files.
+      const HeaderSearchOptions &HSOpts =
+          PP.getHeaderSearchInfo().getHeaderSearchOpts();
+
+      // All user input files reside at the index range [0, Record[1]), and
+      // system input files reside at [Record[1], Record[0]).
+      // Record is the one from INPUT_FILE_OFFSETS.
+      unsigned NumInputs = Record[0];
+      unsigned NumUserInputs = Record[1];
+
+      if (!DisableValidation &&
+          (!HSOpts.ModulesValidateOncePerBuildSession ||
+           F.InputFilesValidationTimestamp <= HSOpts.BuildSessionTimestamp)) {
         bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
-        // All user input files reside at the index range [0, Record[1]).
-        // Record is the one from INPUT_FILE_OFFSETS.
-        for (unsigned I = 0, N = Record[1]; I < N; ++I) {
+
+        // If we are reading a module, we will create a verification timestamp,
+        // so we verify all input files.  Otherwise, verify only user input
+        // files.
+
+        unsigned N = NumUserInputs;
+        if (ValidateSystemInputs ||
+            (HSOpts.ModulesValidateOncePerBuildSession && F.Kind == MK_Module))
+          N = NumInputs;
+
+        for (unsigned I = 0; I < N; ++I) {
           InputFile IF = getInputFile(F, I+1, Complain);
           if (!IF.getFile() || IF.isOutOfDate())
             return OutOfDate;
         }
       }
+
+      if (Listener)
+        Listener->visitModuleFile(F.FileName);
+
+      if (Listener && Listener->needsInputFileVisitation()) {
+        unsigned N = Listener->needsSystemInputFileVisitation() ? NumInputs
+                                                                : NumUserInputs;
+        for (unsigned I = 0; I < N; ++I) {
+          bool IsSystem = I >= NumUserInputs;
+          InputFileInfo FI = readInputFileInfo(F, I+1);
+          Listener->visitInputFile(FI.Filename, IsSystem, FI.Overridden);
+        }
+      }
+
       return Success;
-      
+    }
+
     case llvm::BitstreamEntry::SubBlock:
       switch (Entry.ID) {
       case INPUT_FILES_BLOCK_ID:
@@ -1854,8 +2185,8 @@
     case METADATA: {
       if (Record[0] != VERSION_MAJOR && !DisableValidation) {
         if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
-          Diag(Record[0] < VERSION_MAJOR? diag::warn_pch_version_too_old
-                                        : diag::warn_pch_version_too_new);
+          Diag(Record[0] < VERSION_MAJOR? diag::err_pch_version_too_old
+                                        : diag::err_pch_version_too_new);
         return VersionMismatch;
       }
 
@@ -1871,7 +2202,7 @@
       StringRef ASTBranch = Blob;
       if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
         if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
-          Diag(diag::warn_pch_different_branch) << ASTBranch << CurBranch;
+          Diag(diag::err_pch_different_branch) << ASTBranch << CurBranch;
         return VersionMismatch;
       }
       break;
@@ -1916,7 +2247,7 @@
       bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
       if (Listener && &F == *ModuleMgr.begin() &&
           ParseLanguageOptions(Record, Complain, *Listener) &&
-          !DisableValidation)
+          !DisableValidation && !AllowConfigurationMismatch)
         return ConfigurationMismatch;
       break;
     }
@@ -1925,7 +2256,7 @@
       bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
       if (Listener && &F == *ModuleMgr.begin() &&
           ParseTargetOptions(Record, Complain, *Listener) &&
-          !DisableValidation)
+          !DisableValidation && !AllowConfigurationMismatch)
         return ConfigurationMismatch;
       break;
     }
@@ -1934,7 +2265,7 @@
       bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
       if (Listener && &F == *ModuleMgr.begin() &&
           ParseDiagnosticOptions(Record, Complain, *Listener) &&
-          !DisableValidation)
+          !DisableValidation && !AllowConfigurationMismatch)
         return ConfigurationMismatch;
       break;
     }
@@ -1943,7 +2274,7 @@
       bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
       if (Listener && &F == *ModuleMgr.begin() &&
           ParseFileSystemOptions(Record, Complain, *Listener) &&
-          !DisableValidation)
+          !DisableValidation && !AllowConfigurationMismatch)
         return ConfigurationMismatch;
       break;
     }
@@ -1952,7 +2283,7 @@
       bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
       if (Listener && &F == *ModuleMgr.begin() &&
           ParseHeaderSearchOptions(Record, Complain, *Listener) &&
-          !DisableValidation)
+          !DisableValidation && !AllowConfigurationMismatch)
         return ConfigurationMismatch;
       break;
     }
@@ -1962,7 +2293,7 @@
       if (Listener && &F == *ModuleMgr.begin() &&
           ParsePreprocessorOptions(Record, Complain, *Listener,
                                    SuggestedPredefines) &&
-          !DisableValidation)
+          !DisableValidation && !AllowConfigurationMismatch)
         return ConfigurationMismatch;
       break;
     }
@@ -2035,14 +2366,7 @@
           return true;
         }
         break;
-        
-      case DECL_UPDATES_BLOCK_ID:
-        if (Stream.SkipBlock()) {
-          Error("malformed block record in AST file");
-          return true;
-        }
-        break;
-        
+
       case PREPROCESSOR_BLOCK_ID:
         F.MacroCursor = Stream;
         if (!PP.getExternalSource())
@@ -2191,6 +2515,12 @@
         DeclContext *TU = Context.getTranslationUnitDecl();
         F.DeclContextInfos[TU].NameLookupTableData = Table;
         TU->setHasExternalVisibleStorage(true);
+      } else if (Decl *D = DeclsLoaded[ID - NUM_PREDEF_DECL_IDS]) {
+        auto *DC = cast<DeclContext>(D);
+        DC->getPrimaryContext()->setHasExternalVisibleStorage(true);
+        auto *&LookupTable = F.DeclContextInfos[DC].NameLookupTableData;
+        delete LookupTable;
+        LookupTable = Table;
       } else
         PendingVisibleUpdates[ID].push_back(std::make_pair(Table, &F));
       break;
@@ -2237,9 +2567,9 @@
       break;
     }
 
-    case EXTERNAL_DEFINITIONS:
+    case EAGERLY_DESERIALIZED_DECLS:
       for (unsigned I = 0, N = Record.size(); I != N; ++I)
-        ExternalDefinitions.push_back(getGlobalDeclID(F, Record[I]));
+        EagerlyDeserializedDecls.push_back(getGlobalDeclID(F, Record[I]));
       break;
 
     case SPECIAL_TYPES:
@@ -2365,7 +2695,7 @@
       F.SLocEntryOffsets = (const uint32_t *)Blob.data();
       F.LocalNumSLocEntries = Record[0];
       unsigned SLocSpaceSize = Record[1];
-      llvm::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
+      std::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
           SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries,
                                               SLocSpaceSize);
       // Make our entry in the range map. BaseID is negative and growing, so
@@ -2384,9 +2714,9 @@
 
       // Initialize the remapping table.
       // Invalid stays invalid.
-      F.SLocRemap.insert(std::make_pair(0U, 0));
+      F.SLocRemap.insertOrReplace(std::make_pair(0U, 0));
       // This module. Base was 2 when being compiled.
-      F.SLocRemap.insert(std::make_pair(2U,
+      F.SLocRemap.insertOrReplace(std::make_pair(2U,
                                   static_cast<int>(F.SLocEntryBaseOffset - 2)));
       
       TotalNumSLocEntries += F.LocalNumSLocEntries;
@@ -2397,7 +2727,13 @@
       // Additional remapping information.
       const unsigned char *Data = (const unsigned char*)Blob.data();
       const unsigned char *DataEnd = Data + Blob.size();
-      
+
+      // If we see this entry before SOURCE_LOCATION_OFFSETS, add placeholders.
+      if (F.SLocRemap.find(0) == F.SLocRemap.end()) {
+        F.SLocRemap.insert(std::make_pair(0U, 0));
+        F.SLocRemap.insert(std::make_pair(2U, 1));
+      }
+
       // Continuous range maps we may be updating in our module.
       ContinuousRangeMap<uint32_t, int, 2>::Builder SLocRemap(F.SLocRemap);
       ContinuousRangeMap<uint32_t, int, 2>::Builder 
@@ -2414,7 +2750,8 @@
       ContinuousRangeMap<uint32_t, int, 2>::Builder TypeRemap(F.TypeRemap);
 
       while(Data < DataEnd) {
-        uint16_t Len = io::ReadUnalignedLE16(Data);
+        using namespace llvm::support;
+        uint16_t Len = endian::readNext<uint16_t, little, unaligned>(Data);
         StringRef Name = StringRef((const char*)Data, Len);
         Data += Len;
         ModuleFile *OM = ModuleMgr.lookup(Name);
@@ -2423,15 +2760,23 @@
           return true;
         }
 
-        uint32_t SLocOffset = io::ReadUnalignedLE32(Data);
-        uint32_t IdentifierIDOffset = io::ReadUnalignedLE32(Data);
-        uint32_t MacroIDOffset = io::ReadUnalignedLE32(Data);
-        uint32_t PreprocessedEntityIDOffset = io::ReadUnalignedLE32(Data);
-        uint32_t SubmoduleIDOffset = io::ReadUnalignedLE32(Data);
-        uint32_t SelectorIDOffset = io::ReadUnalignedLE32(Data);
-        uint32_t DeclIDOffset = io::ReadUnalignedLE32(Data);
-        uint32_t TypeIndexOffset = io::ReadUnalignedLE32(Data);
-        
+        uint32_t SLocOffset =
+            endian::readNext<uint32_t, little, unaligned>(Data);
+        uint32_t IdentifierIDOffset =
+            endian::readNext<uint32_t, little, unaligned>(Data);
+        uint32_t MacroIDOffset =
+            endian::readNext<uint32_t, little, unaligned>(Data);
+        uint32_t PreprocessedEntityIDOffset =
+            endian::readNext<uint32_t, little, unaligned>(Data);
+        uint32_t SubmoduleIDOffset =
+            endian::readNext<uint32_t, little, unaligned>(Data);
+        uint32_t SelectorIDOffset =
+            endian::readNext<uint32_t, little, unaligned>(Data);
+        uint32_t DeclIDOffset =
+            endian::readNext<uint32_t, little, unaligned>(Data);
+        uint32_t TypeIndexOffset =
+            endian::readNext<uint32_t, little, unaligned>(Data);
+
         // Source location offset is mapped to OM->SLocEntryBaseOffset.
         SLocRemap.insert(std::make_pair(SLocOffset,
           static_cast<int>(OM->SLocEntryBaseOffset - SLocOffset)));
@@ -2569,6 +2914,7 @@
         Error("invalid DECL_UPDATE_OFFSETS block in AST file");
         return true;
       }
+      // FIXME: If we've already loaded the decl, perform the updates now.
       for (unsigned I = 0, N = Record.size(); I != N; I += 2)
         DeclUpdateOffsets[getGlobalDeclID(F, Record[I])]
           .push_back(std::make_pair(&F, Record[I+1]));
@@ -2690,9 +3036,11 @@
         // If we aren't loading a module (which has its own exports), make
         // all of the imported modules visible.
         // FIXME: Deal with macros-only imports.
-        for (unsigned I = 0, N = Record.size(); I != N; ++I) {
-          if (unsigned GlobalID = getGlobalSubmoduleID(F, Record[I]))
-            ImportedModules.push_back(GlobalID);
+        for (unsigned I = 0, N = Record.size(); I != N; /**/) {
+          unsigned GlobalID = getGlobalSubmoduleID(F, Record[I++]);
+          SourceLocation Loc = ReadSourceLocation(F, Record, I);
+          if (GlobalID)
+            ImportedModules.push_back(ImportedSubmodule(GlobalID, Loc));
         }
       }
       break;
@@ -2791,30 +3139,25 @@
 }
 
 void ASTReader::makeNamesVisible(const HiddenNames &Names, Module *Owner) {
-  for (unsigned I = 0, N = Names.size(); I != N; ++I) {
-    switch (Names[I].getKind()) {
-    case HiddenName::Declaration: {
-      Decl *D = Names[I].getDecl();
-      bool wasHidden = D->Hidden;
-      D->Hidden = false;
+  for (unsigned I = 0, N = Names.HiddenDecls.size(); I != N; ++I) {
+    Decl *D = Names.HiddenDecls[I];
+    bool wasHidden = D->Hidden;
+    D->Hidden = false;
 
-      if (wasHidden && SemaObj) {
-        if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D)) {
-          moveMethodToBackOfGlobalList(*SemaObj, Method);
-        }
+    if (wasHidden && SemaObj) {
+      if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D)) {
+        moveMethodToBackOfGlobalList(*SemaObj, Method);
       }
-      break;
-    }
-    case HiddenName::MacroVisibility: {
-      std::pair<IdentifierInfo *, MacroDirective *> Macro = Names[I].getMacro();
-      installImportedMacro(Macro.first, Macro.second, Owner);
-      break;
-    }
     }
   }
+
+  for (HiddenMacrosMap::const_iterator I = Names.HiddenMacros.begin(),
+                                       E = Names.HiddenMacros.end();
+       I != E; ++I)
+    installImportedMacro(I->first, I->second, Owner);
 }
 
-void ASTReader::makeModuleVisible(Module *Mod, 
+void ASTReader::makeModuleVisible(Module *Mod,
                                   Module::NameVisibilityKind NameVisibility,
                                   SourceLocation ImportLoc,
                                   bool Complain) {
@@ -2829,15 +3172,18 @@
       // there is nothing more to do.
       continue;
     }
-    
+
     if (!Mod->isAvailable()) {
       // Modules that aren't available cannot be made visible.
       continue;
     }
 
     // Update the module's name visibility.
+    if (NameVisibility >= Module::MacrosVisible &&
+        Mod->NameVisibility < Module::MacrosVisible)
+      Mod->MacroVisibilityLoc = ImportLoc;
     Mod->NameVisibility = NameVisibility;
-    
+
     // If we've already deserialized any names from this module,
     // mark them as visible.
     HiddenNamesMapType::iterator Hidden = HiddenNamesMap.find(Mod);
@@ -2899,6 +3245,17 @@
          !hasGlobalIndex() && TriedLoadingGlobalIndex;
 }
 
+static void updateModuleTimestamp(ModuleFile &MF) {
+  // Overwrite the timestamp file contents so that file's mtime changes.
+  std::string TimestampFilename = MF.getTimestampFilename();
+  std::string ErrorInfo;
+  llvm::raw_fd_ostream OS(TimestampFilename.c_str(), ErrorInfo,
+                          llvm::sys::fs::F_Text);
+  if (!ErrorInfo.empty())
+    return;
+  OS << "Timestamp file\n";
+}
+
 ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
                                             ModuleKind Type,
                                             SourceLocation ImportLoc,
@@ -3057,6 +3414,21 @@
                        PreviousGeneration);
   }
 
+  if (PP.getHeaderSearchInfo()
+          .getHeaderSearchOpts()
+          .ModulesValidateOncePerBuildSession) {
+    // Now we are certain that the module and all modules it depends on are
+    // up to date.  Create or update timestamp files for modules that are
+    // located in the module cache (not for PCH files that could be anywhere
+    // in the filesystem).
+    for (unsigned I = 0, N = Loaded.size(); I != N; ++I) {
+      ImportedModule &M = Loaded[I];
+      if (M.Mod->Kind == MK_Module) {
+        updateModuleTimestamp(*M.Mod);
+      }
+    }
+  }
+
   return Success;
 }
 
@@ -3177,7 +3549,7 @@
     case AST_BLOCK_ID:
       if (!HaveReadControlBlock) {
         if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
-          Diag(diag::warn_pch_version_too_old);
+          Diag(diag::err_pch_version_too_old);
         return VersionMismatch;
       }
 
@@ -3323,12 +3695,14 @@
     Context.setcudaConfigureCallDecl(
                            cast<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[0])));
   }
-  
+
   // Re-export any modules that were imported by a non-module AST file.
-  for (unsigned I = 0, N = ImportedModules.size(); I != N; ++I) {
-    if (Module *Imported = getSubmodule(ImportedModules[I]))
+  // FIXME: This does not make macro-only imports visible again. It also doesn't
+  // make #includes mapped to module imports visible.
+  for (auto &Import : ImportedModules) {
+    if (Module *Imported = getSubmodule(Import.ID))
       makeModuleVisible(Imported, Module::AllVisible,
-                        /*ImportLoc=*/SourceLocation(),
+                        /*ImportLoc=*/Import.ImportLoc,
                         /*Complain=*/false);
   }
   ImportedModules.clear();
@@ -3381,7 +3755,7 @@
                                              DiagnosticsEngine &Diags) {
   // Open the AST file.
   std::string ErrStr;
-  OwningPtr<llvm::MemoryBuffer> Buffer;
+  std::unique_ptr<llvm::MemoryBuffer> Buffer;
   Buffer.reset(FileMgr.getBufferForFile(ASTFileName, &ErrStr));
   if (!Buffer) {
     Diags.Report(diag::err_fe_unable_to_read_pch_file) << ASTFileName << ErrStr;
@@ -3448,17 +3822,17 @@
     {
     }
 
-    virtual bool ReadLanguageOptions(const LangOptions &LangOpts,
-                                     bool Complain) {
+    bool ReadLanguageOptions(const LangOptions &LangOpts,
+                             bool Complain) override {
       return checkLanguageOptions(ExistingLangOpts, LangOpts, 0);
     }
-    virtual bool ReadTargetOptions(const TargetOptions &TargetOpts,
-                                   bool Complain) {
+    bool ReadTargetOptions(const TargetOptions &TargetOpts,
+                           bool Complain) override {
       return checkTargetOptions(ExistingTargetOpts, TargetOpts, 0);
     }
-    virtual bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
-                                         bool Complain,
-                                         std::string &SuggestedPredefines) {
+    bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+                                 bool Complain,
+                                 std::string &SuggestedPredefines) override {
       return checkPreprocessorOptions(ExistingPPOpts, PPOpts, 0, FileMgr,
                                       SuggestedPredefines, ExistingLangOpts);
     }
@@ -3470,7 +3844,7 @@
                                         ASTReaderListener &Listener) {
   // Open the AST file.
   std::string ErrStr;
-  OwningPtr<llvm::MemoryBuffer> Buffer;
+  std::unique_ptr<llvm::MemoryBuffer> Buffer;
   Buffer.reset(FileMgr.getBufferForFile(Filename, &ErrStr));
   if (!Buffer) {
     return true;
@@ -3496,6 +3870,7 @@
     return true;
 
   bool NeedsInputFiles = Listener.needsInputFileVisitation();
+  bool NeedsSystemInputFiles = Listener.needsSystemInputFileVisitation();
   BitstreamCursor InputFilesCursor;
   if (NeedsInputFiles) {
     InputFilesCursor = Stream;
@@ -3582,6 +3957,10 @@
       for (unsigned I = 0; I != NumInputFiles; ++I) {
         // Go find this input file.
         bool isSystemFile = I >= NumUserFiles;
+
+        if (isSystemFile && !NeedsSystemInputFiles)
+          break; // the rest are system input files
+
         BitstreamCursor &Cursor = InputFilesCursor;
         SavedStreamPosition SavedPosition(Cursor);
         Cursor.JumpToBit(InputFileOffs[I]);
@@ -3592,7 +3971,8 @@
         bool shouldContinue = false;
         switch ((InputFileRecordTypes)Cursor.readRecord(Code, Record, &Blob)) {
         case INPUT_FILE:
-          shouldContinue = Listener.visitInputFile(Blob, isSystemFile);
+          bool Overridden = static_cast<bool>(Record[3]);
+          shouldContinue = Listener.visitInputFile(Blob, isSystemFile, Overridden);
           break;
         }
         if (!shouldContinue)
@@ -3663,15 +4043,17 @@
       }
       
       StringRef Name = Blob;
-      SubmoduleID GlobalID = getGlobalSubmoduleID(F, Record[0]);
-      SubmoduleID Parent = getGlobalSubmoduleID(F, Record[1]);
-      bool IsFramework = Record[2];
-      bool IsExplicit = Record[3];
-      bool IsSystem = Record[4];
-      bool InferSubmodules = Record[5];
-      bool InferExplicitSubmodules = Record[6];
-      bool InferExportWildcard = Record[7];
-      bool ConfigMacrosExhaustive = Record[8];
+      unsigned Idx = 0;
+      SubmoduleID GlobalID = getGlobalSubmoduleID(F, Record[Idx++]);
+      SubmoduleID Parent = getGlobalSubmoduleID(F, Record[Idx++]);
+      bool IsFramework = Record[Idx++];
+      bool IsExplicit = Record[Idx++];
+      bool IsSystem = Record[Idx++];
+      bool IsExternC = Record[Idx++];
+      bool InferSubmodules = Record[Idx++];
+      bool InferExplicitSubmodules = Record[Idx++];
+      bool InferExportWildcard = Record[Idx++];
+      bool ConfigMacrosExhaustive = Record[Idx++];
 
       Module *ParentModule = 0;
       if (Parent)
@@ -3707,6 +4089,7 @@
       
       CurrentModule->IsFromModuleFile = true;
       CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem;
+      CurrentModule->IsExternC = IsExternC;
       CurrentModule->InferSubmodules = InferSubmodules;
       CurrentModule->InferExplicitSubmodules = InferExplicitSubmodules;
       CurrentModule->InferExportWildcard = InferExportWildcard;
@@ -4004,7 +4387,6 @@
   TargetOpts.Triple = ReadString(Record, Idx);
   TargetOpts.CPU = ReadString(Record, Idx);
   TargetOpts.ABI = ReadString(Record, Idx);
-  TargetOpts.CXXABI = ReadString(Record, Idx);
   TargetOpts.LinkerVersion = ReadString(Record, Idx);
   for (unsigned N = Record[Idx++]; N; --N) {
     TargetOpts.FeaturesAsWritten.push_back(ReadString(Record, Idx));
@@ -4068,6 +4450,7 @@
 
   HSOpts.ResourceDir = ReadString(Record, Idx);
   HSOpts.ModuleCachePath = ReadString(Record, Idx);
+  HSOpts.ModuleUserBuildPath = ReadString(Record, Idx);
   HSOpts.DisableModuleHash = Record[Idx++];
   HSOpts.UseBuiltinIncludes = Record[Idx++];
   HSOpts.UseStandardSystemIncludes = Record[Idx++];
@@ -4548,6 +4931,16 @@
     return DT;
   }
 
+  case TYPE_ADJUSTED: {
+    if (Record.size() != 2) {
+      Error("Incorrect encoding of adjusted type");
+      return QualType();
+    }
+    QualType OriginalTy = readType(*Loc.F, Record, Idx);
+    QualType AdjustedTy = readType(*Loc.F, Record, Idx);
+    return Context.getAdjustedType(OriginalTy, AdjustedTy);
+  }
+
   case TYPE_BLOCK_POINTER: {
     if (Record.size() != 1) {
       Error("Incorrect encoding of block pointer type");
@@ -4671,23 +5064,8 @@
     EPI.HasTrailingReturn = Record[Idx++];
     EPI.TypeQuals = Record[Idx++];
     EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
-    ExceptionSpecificationType EST =
-        static_cast<ExceptionSpecificationType>(Record[Idx++]);
-    EPI.ExceptionSpecType = EST;
-    SmallVector<QualType, 2> Exceptions;
-    if (EST == EST_Dynamic) {
-      EPI.NumExceptions = Record[Idx++];
-      for (unsigned I = 0; I != EPI.NumExceptions; ++I)
-        Exceptions.push_back(readType(*Loc.F, Record, Idx));
-      EPI.Exceptions = Exceptions.data();
-    } else if (EST == EST_ComputedNoexcept) {
-      EPI.NoexceptExpr = ReadExpr(*Loc.F);
-    } else if (EST == EST_Uninstantiated) {
-      EPI.ExceptionSpecDecl = ReadDeclAs<FunctionDecl>(*Loc.F, Record, Idx);
-      EPI.ExceptionSpecTemplate = ReadDeclAs<FunctionDecl>(*Loc.F, Record, Idx);
-    } else if (EST == EST_Unevaluated) {
-      EPI.ExceptionSpecDecl = ReadDeclAs<FunctionDecl>(*Loc.F, Record, Idx);
-    }
+    SmallVector<QualType, 8> ExceptionStorage;
+    readExceptionSpec(*Loc.F, ExceptionStorage, EPI, Record, Idx);
     return Context.getFunctionType(ResultType, ParamTypes, EPI);
   }
 
@@ -4837,9 +5215,9 @@
     unsigned Idx = 0;
     QualType Parm = readType(*Loc.F, Record, Idx);
     QualType Replacement = readType(*Loc.F, Record, Idx);
-    return
-      Context.getSubstTemplateTypeParmType(cast<TemplateTypeParmType>(Parm),
-                                            Replacement);
+    return Context.getSubstTemplateTypeParmType(
+        cast<TemplateTypeParmType>(Parm),
+        Context.getCanonicalType(Replacement));
   }
 
   case TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK: {
@@ -4942,6 +5320,29 @@
   llvm_unreachable("Invalid TypeCode!");
 }
 
+void ASTReader::readExceptionSpec(ModuleFile &ModuleFile,
+                                  SmallVectorImpl<QualType> &Exceptions,
+                                  FunctionProtoType::ExtProtoInfo &EPI,
+                                  const RecordData &Record, unsigned &Idx) {
+  ExceptionSpecificationType EST =
+      static_cast<ExceptionSpecificationType>(Record[Idx++]);
+  EPI.ExceptionSpecType = EST;
+  if (EST == EST_Dynamic) {
+    EPI.NumExceptions = Record[Idx++];
+    for (unsigned I = 0; I != EPI.NumExceptions; ++I)
+      Exceptions.push_back(readType(ModuleFile, Record, Idx));
+    EPI.Exceptions = Exceptions.data();
+  } else if (EST == EST_ComputedNoexcept) {
+    EPI.NoexceptExpr = ReadExpr(ModuleFile);
+  } else if (EST == EST_Uninstantiated) {
+    EPI.ExceptionSpecDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+    EPI.ExceptionSpecTemplate =
+        ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+  } else if (EST == EST_Unevaluated) {
+    EPI.ExceptionSpecDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+  }
+}
+
 class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
   ASTReader &Reader;
   ModuleFile &F;
@@ -4997,6 +5398,9 @@
 void TypeLocReader::VisitDecayedTypeLoc(DecayedTypeLoc TL) {
   // nothing to do
 }
+void TypeLocReader::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
+  // nothing to do
+}
 void TypeLocReader::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
   TL.setCaretLoc(ReadSourceLocation(Record, Idx));
 }
@@ -5046,8 +5450,8 @@
   TL.setLParenLoc(ReadSourceLocation(Record, Idx));
   TL.setRParenLoc(ReadSourceLocation(Record, Idx));
   TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx));
-  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) {
-    TL.setArg(i, ReadDeclAs<ParmVarDecl>(Record, Idx));
+  for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i) {
+    TL.setParam(i, ReadDeclAs<ParmVarDecl>(Record, Idx));
   }
 }
 void TypeLocReader::VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc TL) {
@@ -5924,15 +6328,23 @@
                                        ASTConsumer *Consumer) {
   assert(ImplD && Consumer);
 
-  for (ObjCImplDecl::method_iterator
-         I = ImplD->meth_begin(), E = ImplD->meth_end(); I != E; ++I)
-    Consumer->HandleInterestingDecl(DeclGroupRef(*I));
+  for (auto *I : ImplD->methods())
+    Consumer->HandleInterestingDecl(DeclGroupRef(I));
 
   Consumer->HandleInterestingDecl(DeclGroupRef(ImplD));
 }
 
 void ASTReader::PassInterestingDeclsToConsumer() {
   assert(Consumer);
+
+  if (PassingDeclsToConsumer)
+    return;
+
+  // Guard variable to avoid recursively redoing the process of passing
+  // decls to consumer.
+  SaveAndRestore<bool> GuardPassingDeclsToConsumer(PassingDeclsToConsumer,
+                                                   true);
+
   while (!InterestingDecls.empty()) {
     Decl *D = InterestingDecls.front();
     InterestingDecls.pop_front();
@@ -5954,12 +6366,12 @@
   if (!Consumer)
     return;
 
-  for (unsigned I = 0, N = ExternalDefinitions.size(); I != N; ++I) {
+  for (unsigned I = 0, N = EagerlyDeserializedDecls.size(); I != N; ++I) {
     // Force deserialization of this decl, which will cause it to be queued for
     // passing to the consumer.
-    GetDecl(ExternalDefinitions[I]);
+    GetDecl(EagerlyDeserializedDecls[I]);
   }
-  ExternalDefinitions.clear();
+  EagerlyDeserializedDecls.clear();
 
   PassInterestingDeclsToConsumer();
 }
@@ -6211,7 +6623,7 @@
   public:
     explicit ASTIdentifierIterator(const ASTReader &Reader);
 
-    virtual StringRef Next();
+    StringRef Next() override;
   };
 }
 
@@ -7298,6 +7710,7 @@
        I = CommentsCursors.begin(),
        E = CommentsCursors.end();
        I != E; ++I) {
+    Comments.clear();
     BitstreamCursor &Cursor = I->first;
     serialization::ModuleFile &F = *I->second;
     SavedStreamPosition SavedPosition(Cursor);
@@ -7306,7 +7719,7 @@
     while (true) {
       llvm::BitstreamEntry Entry =
         Cursor.advanceSkippingSubblocks(BitstreamCursor::AF_DontPopBlockAtEnd);
-      
+
       switch (Entry.Kind) {
       case llvm::BitstreamEntry::SubBlock: // Handled for us already.
       case llvm::BitstreamEntry::Error:
@@ -7336,9 +7749,9 @@
       }
       }
     }
-  NextCursor:;
+  NextCursor:
+    Context.Comments.addDeserializedComments(Comments);
   }
-  Context.Comments.addCommentsToFront(Comments);
 }
 
 void ASTReader::finishPendingActions() {
@@ -7352,14 +7765,14 @@
     TopLevelDeclsMap TopLevelDecls;
 
     while (!PendingIdentifierInfos.empty()) {
-      // FIXME: std::move
       IdentifierInfo *II = PendingIdentifierInfos.back().first;
-      SmallVector<uint32_t, 4> DeclIDs = PendingIdentifierInfos.back().second;
+      SmallVector<uint32_t, 4> DeclIDs =
+          std::move(PendingIdentifierInfos.back().second);
       PendingIdentifierInfos.pop_back();
 
       SetGloballyVisibleDecls(II, DeclIDs, &TopLevelDecls[II]);
     }
-  
+
     // Load pending declaration chains.
     for (unsigned I = 0; I != PendingDeclChains.size(); ++I) {
       loadPendingDeclChain(PendingDeclChains[I]);
@@ -7382,14 +7795,14 @@
       SmallVector<PendingMacroInfo, 2> GlobalIDs;
       GlobalIDs.swap(PendingMacroIDs.begin()[I].second);
       // Initialize the macro history from chained-PCHs ahead of module imports.
-      for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx !=  NumIDs;
+      for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
            ++IDIdx) {
         const PendingMacroInfo &Info = GlobalIDs[IDIdx];
         if (Info.M->Kind != MK_Module)
           resolvePendingMacro(II, Info);
       }
       // Handle module imports.
-      for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx !=  NumIDs;
+      for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
            ++IDIdx) {
         const PendingMacroInfo &Info = GlobalIDs[IDIdx];
         if (Info.M->Kind == MK_Module)
@@ -7430,16 +7843,14 @@
       llvm::SmallVector<const NamedDecl*, 4> Candidates;
       for (DeclContext::lookup_iterator I = R.begin(), E = R.end();
            !Found && I != E; ++I) {
-        for (Decl::redecl_iterator RI = (*I)->redecls_begin(),
-                                   RE = (*I)->redecls_end();
-             RI != RE; ++RI) {
-          if ((*RI)->getLexicalDeclContext() == CanonDef) {
+        for (auto RI : (*I)->redecls()) {
+          if (RI->getLexicalDeclContext() == CanonDef) {
             // This declaration is present in the canonical definition. If it's
             // in the same redecl chain, it's the one we're looking for.
-            if ((*RI)->getCanonicalDecl() == DCanon)
+            if (RI->getCanonicalDecl() == DCanon)
               Found = true;
             else
-              Candidates.push_back(cast<NamedDecl>(*RI));
+              Candidates.push_back(cast<NamedDecl>(RI));
             break;
           }
         }
@@ -7480,44 +7891,35 @@
         const_cast<TagType*>(TagT)->decl = TD;
       }
       
-      if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(*D)) {
-        for (CXXRecordDecl::redecl_iterator R = RD->redecls_begin(),
-                                         REnd = RD->redecls_end();
-             R != REnd; ++R)
-          cast<CXXRecordDecl>(*R)->DefinitionData = RD->DefinitionData;
+      if (auto RD = dyn_cast<CXXRecordDecl>(*D)) {
+        for (auto R : RD->redecls())
+          cast<CXXRecordDecl>(R)->DefinitionData = RD->DefinitionData;
         
       }
 
       continue;
     }
     
-    if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(*D)) {
+    if (auto ID = dyn_cast<ObjCInterfaceDecl>(*D)) {
       // Make sure that the ObjCInterfaceType points at the definition.
       const_cast<ObjCInterfaceType *>(cast<ObjCInterfaceType>(ID->TypeForDecl))
         ->Decl = ID;
       
-      for (ObjCInterfaceDecl::redecl_iterator R = ID->redecls_begin(),
-                                           REnd = ID->redecls_end();
-           R != REnd; ++R)
+      for (auto R : ID->redecls())
         R->Data = ID->Data;
       
       continue;
     }
     
-    if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(*D)) {
-      for (ObjCProtocolDecl::redecl_iterator R = PD->redecls_begin(),
-                                          REnd = PD->redecls_end();
-           R != REnd; ++R)
+    if (auto PD = dyn_cast<ObjCProtocolDecl>(*D)) {
+      for (auto R : PD->redecls())
         R->Data = PD->Data;
       
       continue;
     }
     
-    RedeclarableTemplateDecl *RTD
-      = cast<RedeclarableTemplateDecl>(*D)->getCanonicalDecl();
-    for (RedeclarableTemplateDecl::redecl_iterator R = RTD->redecls_begin(),
-                                                REnd = RTD->redecls_end();
-         R != REnd; ++R)
+    auto RTD = cast<RedeclarableTemplateDecl>(*D)->getCanonicalDecl();
+    for (auto R : RTD->redecls())
       R->Common = RTD->Common;
   }
   PendingDefinitions.clear();
@@ -7553,20 +7955,10 @@
   }
   --NumCurrentElementsDeserializing;
 
-  if (NumCurrentElementsDeserializing == 0 &&
-      Consumer && !PassingDeclsToConsumer) {
-    // Guard variable to avoid recursively redoing the process of passing
-    // decls to consumer.
-    SaveAndRestore<bool> GuardPassingDeclsToConsumer(PassingDeclsToConsumer,
-                                                     true);
-
-    while (!InterestingDecls.empty()) {
-      // We are not in recursive loading, so it's safe to pass the "interesting"
-      // decls to the consumer.
-      Decl *D = InterestingDecls.front();
-      InterestingDecls.pop_front();
-      PassInterestingDeclToConsumer(D);
-    }
+  if (NumCurrentElementsDeserializing == 0 && Consumer) {
+    // We are not in recursive loading, so it's safe to pass the "interesting"
+    // decls to the consumer.
+    PassInterestingDeclsToConsumer();
   }
 }
 
@@ -7587,13 +7979,18 @@
 
 ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
                      StringRef isysroot, bool DisableValidation,
-                     bool AllowASTWithCompilerErrors, bool UseGlobalIndex)
+                     bool AllowASTWithCompilerErrors,
+                     bool AllowConfigurationMismatch,
+                     bool ValidateSystemInputs,
+                     bool UseGlobalIndex)
   : Listener(new PCHValidator(PP, *this)), DeserializationListener(0),
     SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
     Diags(PP.getDiagnostics()), SemaObj(0), PP(PP), Context(Context),
     Consumer(0), ModuleMgr(PP.getFileManager()),
     isysroot(isysroot), DisableValidation(DisableValidation),
     AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
+    AllowConfigurationMismatch(AllowConfigurationMismatch),
+    ValidateSystemInputs(ValidateSystemInputs),
     UseGlobalIndex(UseGlobalIndex), TriedLoadingGlobalIndex(false),
     CurrentGeneration(0), CurrSwitchCaseStmts(&SwitchCaseStmts),
     NumSLocEntriesRead(0), TotalNumSLocEntries(0), 
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index b8102d8..7d95072 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -356,11 +356,14 @@
 }
 
 void ASTDeclReader::VisitDecl(Decl *D) {
-  if (D->isTemplateParameter()) {
+  if (D->isTemplateParameter() || D->isTemplateParameterPack() ||
+      isa<ParmVarDecl>(D)) {
     // We don't want to deserialize the DeclContext of a template
-    // parameter immediately, because the template parameter might be
-    // used in the formulation of its DeclContext. Use the translation
-    // unit DeclContext as a placeholder.
+    // parameter or of a parameter of a function template immediately.   These
+    // entities might be used in the formulation of its DeclContext (for
+    // example, a function parameter can be used in decltype() in trailing
+    // return type of the function).  Use the translation unit DeclContext as a
+    // placeholder.
     GlobalDeclID SemaDCIDForTemplateParmDecl = ReadDeclID(Record, Idx);
     GlobalDeclID LexicalDCIDForTemplateParmDecl = ReadDeclID(Record, Idx);
     Reader.addPendingDeclContextInfo(D,
@@ -409,7 +412,7 @@
           
           // Note that this declaration was hidden because its owning module is 
           // not yet visible.
-          Reader.HiddenNamesMap[Owner].push_back(D);
+          Reader.HiddenNamesMap[Owner].HiddenDecls.push_back(D);
         }
       }
     }
@@ -709,8 +712,8 @@
   MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record[Idx++]);
   MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
   MD->SetRelatedResultType(Record[Idx++]);
-  MD->setResultType(Reader.readType(F, Record, Idx));
-  MD->setResultTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+  MD->setReturnType(Reader.readType(F, Record, Idx));
+  MD->setReturnTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
   MD->DeclEndLoc = ReadSourceLocation(Record, Idx);
   unsigned NumParams = Record[Idx++];
   SmallVector<ParmVarDecl *, 16> Params;
@@ -755,6 +758,7 @@
     Data.SuperClassLoc = ReadSourceLocation(Record, Idx);
 
     Data.EndLoc = ReadSourceLocation(Record, Idx);
+    Data.HasDesignatedInitializers = Record[Idx++];
     
     // Read the directly referenced protocols and their SourceLocations.
     unsigned NumProtocols = Record[Idx++];
@@ -798,8 +802,6 @@
   IVD->setNextIvar(0);
   bool synth = Record[Idx++];
   IVD->setSynthesize(synth);
-  bool backingIvarReferencedInAccessor = Record[Idx++];
-  IVD->setBackingIvarReferencedInAccessor(backingIvarReferencedInAccessor);
 }
 
 void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
@@ -907,8 +909,8 @@
   D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
   D->setHasNonZeroConstructors(Record[Idx++]);
   D->setHasDestructors(Record[Idx++]);
-  llvm::tie(D->IvarInitializers, D->NumIvarInitializers)
-      = Reader.ReadCXXCtorInitializers(F, Record, Idx);
+  std::tie(D->IvarInitializers, D->NumIvarInitializers) =
+      Reader.ReadCXXCtorInitializers(F, Record, Idx);
 }
 
 
@@ -971,7 +973,7 @@
   VD->setCachedLinkage(VarLinkage);
 
   // Reconstruct the one piece of the IdentifierNamespace that we need.
-  if (VarLinkage != NoLinkage &&
+  if (VD->getStorageClass() == SC_Extern && VarLinkage != NoLinkage &&
       VD->getLexicalDeclContext()->isFunctionOrMethod())
     VD->setLocalExternDecl();
 
@@ -1186,6 +1188,7 @@
   Data.HasProtectedFields = Record[Idx++];
   Data.HasPublicFields = Record[Idx++];
   Data.HasMutableFields = Record[Idx++];
+  Data.HasVariantMembers = Record[Idx++];
   Data.HasOnlyCMembers = Record[Idx++];
   Data.HasInClassInitializer = Record[Idx++];
   Data.HasUninitializedReferenceMember = Record[Idx++];
@@ -1345,10 +1348,12 @@
 
 void ASTDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
   VisitCXXMethodDecl(D);
-  
+
+  if (auto *CD = ReadDeclAs<CXXConstructorDecl>(Record, Idx))
+    D->setInheritedConstructor(CD);
   D->IsExplicitSpecified = Record[Idx++];
-  llvm::tie(D->CtorInitializers, D->NumCtorInitializers)
-      = Reader.ReadCXXCtorInitializers(F, Record, Idx);
+  std::tie(D->CtorInitializers, D->NumCtorInitializers) =
+      Reader.ReadCXXCtorInitializers(F, Record, Idx);
 }
 
 void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
@@ -1981,7 +1986,8 @@
 
   if (isa<FileScopeAsmDecl>(D) || 
       isa<ObjCProtocolDecl>(D) || 
-      isa<ObjCImplDecl>(D))
+      isa<ObjCImplDecl>(D) ||
+      isa<ImportDecl>(D))
     return true;
   if (VarDecl *Var = dyn_cast<VarDecl>(D))
     return Var->isFileVarDecl() &&
@@ -2605,12 +2611,10 @@
       // There are updates. This means the context has external visible
       // storage, even if the original stored version didn't.
       LookupDC->setHasExternalVisibleStorage(true);
-      DeclContextVisibleUpdates &U = I->second;
-      for (DeclContextVisibleUpdates::iterator UI = U.begin(), UE = U.end();
-           UI != UE; ++UI) {
-        DeclContextInfo &Info = UI->second->DeclContextInfos[DC];
+      for (const auto &Update : I->second) {
+        DeclContextInfo &Info = Update.second->DeclContextInfos[DC];
         delete Info.NameLookupTableData;
-        Info.NameLookupTableData = UI->first;
+        Info.NameLookupTableData = Update.first;
       }
       PendingVisibleUpdates.erase(I);
     }
@@ -2642,6 +2646,7 @@
   DeclUpdateOffsetsMap::iterator UpdI = DeclUpdateOffsets.find(ID);
   if (UpdI != DeclUpdateOffsets.end()) {
     FileOffsetsTy &UpdateOffsets = UpdI->second;
+    bool WasInteresting = isConsumerInterestedIn(D, false);
     for (FileOffsetsTy::iterator
          I = UpdateOffsets.begin(), E = UpdateOffsets.end(); I != E; ++I) {
       ModuleFile *F = I->first;
@@ -2654,33 +2659,23 @@
       unsigned RecCode = Cursor.readRecord(Code, Record);
       (void)RecCode;
       assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!");
-      
+
       unsigned Idx = 0;
       ASTDeclReader Reader(*this, *F, ID, 0, Record, Idx);
       Reader.UpdateDecl(D, *F, Record);
+
+      // We might have made this declaration interesting. If so, remember that
+      // we need to hand it off to the consumer.
+      if (!WasInteresting &&
+          isConsumerInterestedIn(D, Reader.hasPendingBody())) {
+        InterestingDecls.push_back(D);
+        WasInteresting = true;
+      }
     }
   }
 }
 
 namespace {
-  struct CompareLocalRedeclarationsInfoToID {
-    bool operator()(const LocalRedeclarationsInfo &X, DeclID Y) {
-      return X.FirstID < Y;
-    }
-
-    bool operator()(DeclID X, const LocalRedeclarationsInfo &Y) {
-      return X < Y.FirstID;
-    }
-
-    bool operator()(const LocalRedeclarationsInfo &X, 
-                    const LocalRedeclarationsInfo &Y) {
-      return X.FirstID < Y.FirstID;
-    }
-    bool operator()(DeclID X, DeclID Y) {
-      return X < Y;
-    }
-  };
-  
   /// \brief Module visitor class that finds all of the redeclarations of a 
   /// 
   class RedeclChainVisitor {
@@ -2724,10 +2719,11 @@
       
       // Perform a binary search to find the local redeclarations for this
       // declaration (if any).
+      const LocalRedeclarationsInfo Compare = { ID, 0 };
       const LocalRedeclarationsInfo *Result
         = std::lower_bound(M.RedeclarationsMap,
                            M.RedeclarationsMap + M.LocalNumRedeclarationsInMap, 
-                           ID, CompareLocalRedeclarationsInfoToID());
+                           Compare);
       if (Result == M.RedeclarationsMap + M.LocalNumRedeclarationsInMap ||
           Result->FirstID != ID) {
         // If we have a previously-canonical singleton declaration that was 
@@ -2802,24 +2798,6 @@
 }
 
 namespace {
-  struct CompareObjCCategoriesInfo {
-    bool operator()(const ObjCCategoriesInfo &X, DeclID Y) {
-      return X.DefinitionID < Y;
-    }
-    
-    bool operator()(DeclID X, const ObjCCategoriesInfo &Y) {
-      return X < Y.DefinitionID;
-    }
-    
-    bool operator()(const ObjCCategoriesInfo &X, 
-                    const ObjCCategoriesInfo &Y) {
-      return X.DefinitionID < Y.DefinitionID;
-    }
-    bool operator()(DeclID X, DeclID Y) {
-      return X < Y;
-    }
-  };
-
   /// \brief Given an ObjC interface, goes through the modules and links to the
   /// interface all the categories for it.
   class ObjCCategoriesVisitor {
@@ -2881,15 +2859,12 @@
         Tail(0) 
     {
       // Populate the name -> category map with the set of known categories.
-      for (ObjCInterfaceDecl::known_categories_iterator
-             Cat = Interface->known_categories_begin(),
-             CatEnd = Interface->known_categories_end();
-           Cat != CatEnd; ++Cat) {
+      for (auto *Cat : Interface->known_categories()) {
         if (Cat->getDeclName())
-          NameCategoryMap[Cat->getDeclName()] = *Cat;
+          NameCategoryMap[Cat->getDeclName()] = Cat;
         
         // Keep track of the tail of the category list.
-        Tail = *Cat;
+        Tail = Cat;
       }
     }
 
@@ -2912,10 +2887,11 @@
 
       // Perform a binary search to find the local redeclarations for this
       // declaration (if any).
+      const ObjCCategoriesInfo Compare = { LocalID, 0 };
       const ObjCCategoriesInfo *Result
         = std::lower_bound(M.ObjCCategoriesMap,
                            M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap, 
-                           LocalID, CompareObjCCategoriesInfo());
+                           Compare);
       if (Result == M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap ||
           Result->DefinitionID != LocalID) {
         // We didn't find anything. If the class definition is in this module
@@ -2979,6 +2955,36 @@
           Reader.ReadSourceLocation(ModuleFile, Record, Idx));
       break;
 
+    case UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION: {
+      FunctionDecl *FD = cast<FunctionDecl>(D);
+      if (Reader.PendingBodies[FD])
+        // FIXME: Maybe check for ODR violations.
+        break;
+
+      if (Record[Idx++])
+        FD->setImplicitlyInline();
+      FD->setInnerLocStart(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+      if (auto *CD = dyn_cast<CXXConstructorDecl>(FD))
+        std::tie(CD->CtorInitializers, CD->NumCtorInitializers) =
+            Reader.ReadCXXCtorInitializers(ModuleFile, Record, Idx);
+      // Store the offset of the body so we can lazily load it later.
+      Reader.PendingBodies[FD] = GetCurrentCursorOffset();
+      HasPendingBody = true;
+      assert(Idx == Record.size() && "lazy body must be last");
+      break;
+    }
+
+    case UPD_CXX_RESOLVED_EXCEPTION_SPEC: {
+      auto *FD = cast<FunctionDecl>(D);
+      auto *FPT = FD->getType()->castAs<FunctionProtoType>();
+      auto EPI = FPT->getExtProtoInfo();
+      SmallVector<QualType, 8> ExceptionStorage;
+      Reader.readExceptionSpec(ModuleFile, ExceptionStorage, EPI, Record, Idx);
+      FD->setType(Reader.Context.getFunctionType(FPT->getReturnType(),
+                                                 FPT->getParamTypes(), EPI));
+      break;
+    }
+
     case UPD_CXX_DEDUCED_RETURN_TYPE: {
       FunctionDecl *FD = cast<FunctionDecl>(D);
       Reader.Context.adjustDeducedFunctionResultType(
@@ -2992,6 +2998,14 @@
       D->Used = true;
       break;
     }
+
+    case UPD_MANGLING_NUMBER:
+      Reader.Context.setManglingNumber(cast<NamedDecl>(D), Record[Idx++]);
+      break;
+
+    case UPD_STATIC_LOCAL_NUMBER:
+      Reader.Context.setStaticLocalNumber(cast<VarDecl>(D), Record[Idx++]);
+      break;
     }
   }
 }
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index 1115e8f..9a201d0 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -399,13 +399,11 @@
   S->getCapturedDecl()->setBody(S->getCapturedStmt());
 
   // Captures
-  for (CapturedStmt::capture_iterator I = S->capture_begin(),
-                                      E = S->capture_end();
-       I != E; ++I) {
-    I->VarAndKind.setPointer(ReadDeclAs<VarDecl>(Record, Idx));
-    I->VarAndKind
+  for (auto &I : S->captures()) {
+    I.VarAndKind.setPointer(ReadDeclAs<VarDecl>(Record, Idx));
+    I.VarAndKind
         .setInt(static_cast<CapturedStmt::VariableCaptureKind>(Record[Idx++]));
-    I->Loc = ReadSourceLocation(Record, Idx);
+    I.Loc = ReadSourceLocation(Record, Idx);
   }
 }
 
@@ -1484,33 +1482,15 @@
   E->NamingClass = ReadDeclAs<CXXRecordDecl>(Record, Idx);
 }
 
-void ASTStmtReader::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
-  VisitExpr(E);
-  E->UTT = (UnaryTypeTrait)Record[Idx++];
-  E->Value = (bool)Record[Idx++];
-  SourceRange Range = ReadSourceRange(Record, Idx);
-  E->Loc = Range.getBegin();
-  E->RParen = Range.getEnd();
-  E->QueriedType = GetTypeSourceInfo(Record, Idx);
-}
-
-void ASTStmtReader::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
-  VisitExpr(E);
-  E->BTT = (BinaryTypeTrait)Record[Idx++];
-  E->Value = (bool)Record[Idx++];
-  SourceRange Range = ReadSourceRange(Record, Idx);
-  E->Loc = Range.getBegin();
-  E->RParen = Range.getEnd();
-  E->LhsType = GetTypeSourceInfo(Record, Idx);
-  E->RhsType = GetTypeSourceInfo(Record, Idx);
-}
-
 void ASTStmtReader::VisitTypeTraitExpr(TypeTraitExpr *E) {
   VisitExpr(E);
   E->TypeTraitExprBits.NumArgs = Record[Idx++];
   E->TypeTraitExprBits.Kind = Record[Idx++];
   E->TypeTraitExprBits.Value = Record[Idx++];
-  
+  SourceRange Range = ReadSourceRange(Record, Idx);
+  E->Loc = Range.getBegin();
+  E->RParenLoc = Range.getEnd();
+
   TypeSourceInfo **Args = E->getTypeSourceInfos();
   for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
     Args[I] = GetTypeSourceInfo(Record, Idx);
@@ -1691,6 +1671,15 @@
 OMPClause *OMPClauseReader::readClause() {
   OMPClause *C;
   switch (Record[Idx++]) {
+  case OMPC_if:
+    C = new (Context) OMPIfClause();
+    break;
+  case OMPC_num_threads:
+    C = new (Context) OMPNumThreadsClause();
+    break;
+  case OMPC_safelen:
+    C = new (Context) OMPSafelenClause();
+    break;
   case OMPC_default:
     C = new (Context) OMPDefaultClause();
     break;
@@ -1703,6 +1692,9 @@
   case OMPC_shared:
     C = OMPSharedClause::CreateEmpty(Context, Record[Idx++]);
     break;
+  case OMPC_copyin:
+    C = OMPCopyinClause::CreateEmpty(Context, Record[Idx++]);
+    break;
   }
   Visit(C);
   C->setLocStart(Reader->ReadSourceLocation(Record, Idx));
@@ -1711,6 +1703,21 @@
   return C;
 }
 
+void OMPClauseReader::VisitOMPIfClause(OMPIfClause *C) {
+  C->setCondition(Reader->Reader.ReadSubExpr());
+  C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
+  C->setNumThreads(Reader->Reader.ReadSubExpr());
+  C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPSafelenClause(OMPSafelenClause *C) {
+  C->setSafelen(Reader->Reader.ReadSubExpr());
+  C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
 void OMPClauseReader::VisitOMPDefaultClause(OMPDefaultClause *C) {
   C->setDefaultKind(
        static_cast<OpenMPDefaultClauseKind>(Record[Idx++]));
@@ -1748,12 +1755,20 @@
   C->setVarRefs(Vars);
 }
 
+void OMPClauseReader::VisitOMPCopyinClause(OMPCopyinClause *C) {
+  C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+  unsigned NumVars = C->varlist_size();
+  SmallVector<Expr *, 16> Vars;
+  Vars.reserve(NumVars);
+  for (unsigned i = 0; i != NumVars; ++i)
+    Vars.push_back(Reader->Reader.ReadSubExpr());
+  C->setVarRefs(Vars);
+}
+
 //===----------------------------------------------------------------------===//
 // OpenMP Directives.
 //===----------------------------------------------------------------------===//
 void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
-  VisitStmt(E);
-  ++Idx;
   E->setLocStart(ReadSourceLocation(Record, Idx));
   E->setLocEnd(ReadSourceLocation(Record, Idx));
   OMPClauseReader ClauseReader(this, Reader.getContext(), Record, Idx);
@@ -1765,6 +1780,16 @@
 }
 
 void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
+  VisitStmt(D);
+  // The NumClauses field was read in ReadStmtFromStream.
+  ++Idx;
+  VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
+  VisitStmt(D);
+  // Two fields (NumClauses and CollapsedNum) were read in ReadStmtFromStream.
+  Idx += 2;
   VisitOMPExecutableDirective(D);
 }
 
@@ -2240,13 +2265,22 @@
                                               DeclarationNameInfo(),
                                               0);
       break;
+
     case STMT_OMP_PARALLEL_DIRECTIVE:
       S =
         OMPParallelDirective::CreateEmpty(Context,
                                           Record[ASTStmtReader::NumStmtFields],
                                           Empty);
       break;
-        
+
+    case STMT_OMP_SIMD_DIRECTIVE: {
+      unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+      unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+      S = OMPSimdDirective::CreateEmpty(Context, NumClauses,
+                                        CollapsedNum, Empty);
+      break;
+    }
+
     case EXPR_CXX_OPERATOR_CALL:
       S = new (Context) CXXOperatorCallExpr(Context, Empty);
       break;
@@ -2254,11 +2288,11 @@
     case EXPR_CXX_MEMBER_CALL:
       S = new (Context) CXXMemberCallExpr(Context, Empty);
       break;
-        
+
     case EXPR_CXX_CONSTRUCT:
       S = new (Context) CXXConstructExpr(Empty);
       break;
-      
+
     case EXPR_CXX_TEMPORARY_OBJECT:
       S = new (Context) CXXTemporaryObjectExpr(Empty);
       break;
@@ -2393,14 +2427,6 @@
                                    ? Record[ASTStmtReader::NumExprFields + 1] 
                                    : 0);
       break;
-      
-    case EXPR_CXX_UNARY_TYPE_TRAIT:
-      S = new (Context) UnaryTypeTraitExpr(Empty);
-      break;
-
-    case EXPR_BINARY_TYPE_TRAIT:
-      S = new (Context) BinaryTypeTraitExpr(Empty);
-      break;
 
     case EXPR_TYPE_TRAIT:
       S = TypeTraitExpr::CreateDeserialized(Context, 
@@ -2491,7 +2517,7 @@
     StmtStack.push_back(S);
   }
 Done:
-  assert(StmtStack.size() > PrevNumStmts && "Read too many sub stmts!");
+  assert(StmtStack.size() > PrevNumStmts && "Read too many sub-stmts!");
   assert(StmtStack.size() == PrevNumStmts + 1 && "Extra expressions on stack!");
   return StmtStack.pop_back_val();
 }
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 405488c..e4b5cdb 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -17,6 +17,7 @@
 #include "clang/AST/Decl.h"
 #include "clang/AST/DeclContextInternals.h"
 #include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclLookups.h"
 #include "clang/AST/DeclTemplate.h"
 #include "clang/AST/Expr.h"
 #include "clang/AST/ExprCXX.h"
@@ -45,6 +46,7 @@
 #include "llvm/ADT/Hashing.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/EndianStream.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/Path.h"
@@ -113,6 +115,12 @@
   Code = TYPE_DECAYED;
 }
 
+void ASTTypeWriter::VisitAdjustedType(const AdjustedType *T) {
+  Writer.AddTypeRef(T->getOriginalType(), Record);
+  Writer.AddTypeRef(T->getAdjustedType(), Record);
+  Code = TYPE_ADJUSTED;
+}
+
 void ASTTypeWriter::VisitBlockPointerType(const BlockPointerType *T) {
   Writer.AddTypeRef(T->getPointeeType(), Record);
   Code = TYPE_BLOCK_POINTER;
@@ -173,7 +181,7 @@
 }
 
 void ASTTypeWriter::VisitFunctionType(const FunctionType *T) {
-  Writer.AddTypeRef(T->getResultType(), Record);
+  Writer.AddTypeRef(T->getReturnType(), Record);
   FunctionType::ExtInfo C = T->getExtInfo();
   Record.push_back(C.getNoReturn());
   Record.push_back(C.getHasRegParm());
@@ -188,15 +196,8 @@
   Code = TYPE_FUNCTION_NO_PROTO;
 }
 
-void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
-  VisitFunctionType(T);
-  Record.push_back(T->getNumArgs());
-  for (unsigned I = 0, N = T->getNumArgs(); I != N; ++I)
-    Writer.AddTypeRef(T->getArgType(I), Record);
-  Record.push_back(T->isVariadic());
-  Record.push_back(T->hasTrailingReturn());
-  Record.push_back(T->getTypeQuals());
-  Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
+static void addExceptionSpec(ASTWriter &Writer, const FunctionProtoType *T,
+                             ASTWriter::RecordDataImpl &Record) {
   Record.push_back(T->getExceptionSpecType());
   if (T->getExceptionSpecType() == EST_Dynamic) {
     Record.push_back(T->getNumExceptions());
@@ -210,6 +211,18 @@
   } else if (T->getExceptionSpecType() == EST_Unevaluated) {
     Writer.AddDeclRef(T->getExceptionSpecDecl(), Record);
   }
+}
+
+void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
+  VisitFunctionType(T);
+  Record.push_back(T->getNumParams());
+  for (unsigned I = 0, N = T->getNumParams(); I != N; ++I)
+    Writer.AddTypeRef(T->getParamType(I), Record);
+  Record.push_back(T->isVariadic());
+  Record.push_back(T->hasTrailingReturn());
+  Record.push_back(T->getTypeQuals());
+  Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
+  addExceptionSpec(Writer, T, Record);
   Code = TYPE_FUNCTION_PROTO;
 }
 
@@ -395,9 +408,8 @@
 void ASTTypeWriter::VisitObjCObjectType(const ObjCObjectType *T) {
   Writer.AddTypeRef(T->getBaseType(), Record);
   Record.push_back(T->getNumProtocols());
-  for (ObjCObjectType::qual_iterator I = T->qual_begin(),
-       E = T->qual_end(); I != E; ++I)
-    Writer.AddDeclRef(*I, Record);
+  for (const auto *I : T->quals())
+    Writer.AddDeclRef(I, Record);
   Code = TYPE_OBJC_OBJECT;
 }
 
@@ -455,6 +467,9 @@
 void TypeLocWriter::VisitDecayedTypeLoc(DecayedTypeLoc TL) {
   // nothing to do
 }
+void TypeLocWriter::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
+  // nothing to do
+}
 void TypeLocWriter::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
   Writer.AddSourceLocation(TL.getCaretLoc(), Record);
 }
@@ -503,8 +518,8 @@
   Writer.AddSourceLocation(TL.getLParenLoc(), Record);
   Writer.AddSourceLocation(TL.getRParenLoc(), Record);
   Writer.AddSourceLocation(TL.getLocalRangeEnd(), Record);
-  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
-    Writer.AddDeclRef(TL.getArg(i), Record);
+  for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i)
+    Writer.AddDeclRef(TL.getParam(i), Record);
 }
 void TypeLocWriter::VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc TL) {
   VisitFunctionTypeLoc(TL);
@@ -764,10 +779,8 @@
   RECORD(EXPR_CXX_UNRESOLVED_CONSTRUCT);
   RECORD(EXPR_CXX_UNRESOLVED_MEMBER);
   RECORD(EXPR_CXX_UNRESOLVED_LOOKUP);
-  RECORD(EXPR_CXX_UNARY_TYPE_TRAIT);
   RECORD(EXPR_CXX_NOEXCEPT);
   RECORD(EXPR_OPAQUE_VALUE);
-  RECORD(EXPR_BINARY_TYPE_TRAIT);
   RECORD(EXPR_PACK_EXPANSION);
   RECORD(EXPR_SIZEOF_PACK);
   RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK);
@@ -806,7 +819,7 @@
   RECORD(DECL_OFFSET);
   RECORD(IDENTIFIER_OFFSET);
   RECORD(IDENTIFIER_TABLE);
-  RECORD(EXTERNAL_DEFINITIONS);
+  RECORD(EAGERLY_DESERIALIZED_DECLS);
   RECORD(SPECIAL_TYPES);
   RECORD(STATISTICS);
   RECORD(TENTATIVE_DEFINITIONS);
@@ -1040,7 +1053,6 @@
   // Imports
   if (Chain) {
     serialization::ModuleManager &Mgr = Chain->getModuleManager();
-    SmallVector<char, 128> ModulePaths;
     Record.clear();
 
     for (ModuleManager::ModuleIterator M = Mgr.begin(), MEnd = Mgr.end();
@@ -1097,7 +1109,6 @@
   AddString(TargetOpts.Triple, Record);
   AddString(TargetOpts.CPU, Record);
   AddString(TargetOpts.ABI, Record);
-  AddString(TargetOpts.CXXABI, Record);
   AddString(TargetOpts.LinkerVersion, Record);
   Record.push_back(TargetOpts.FeaturesAsWritten.size());
   for (unsigned I = 0, N = TargetOpts.FeaturesAsWritten.size(); I != N; ++I) {
@@ -1156,6 +1167,7 @@
 
   AddString(HSOpts.ResourceDir, Record);
   AddString(HSOpts.ModuleCachePath, Record);
+  AddString(HSOpts.ModuleUserBuildPath, Record);
   Record.push_back(HSOpts.DisableModuleHash);
   Record.push_back(HSOpts.UseBuiltinIncludes);
   Record.push_back(HSOpts.UseStandardSystemIncludes);
@@ -1294,33 +1306,6 @@
       SortedFiles.push_front(Entry);
   }
 
-  // If we have an isysroot for a Darwin SDK, include its SDKSettings.plist in
-  // the set of (non-system) input files. This is simple heuristic for
-  // detecting whether the system headers may have changed, because it is too
-  // expensive to stat() all of the system headers.
-  FileManager &FileMgr = SourceMgr.getFileManager();
-  if (!HSOpts.Sysroot.empty() && !Chain) {
-    llvm::SmallString<128> SDKSettingsFileName(HSOpts.Sysroot);
-    llvm::sys::path::append(SDKSettingsFileName, "SDKSettings.plist");
-    if (const FileEntry *SDKSettingsFile = FileMgr.getFile(SDKSettingsFileName)) {
-      InputFileEntry Entry = { SDKSettingsFile, false, false };
-      SortedFiles.push_front(Entry);
-    }
-  }
-
-  // Add the compiler's own module.map in the set of (non-system) input files.
-  // This is a simple heuristic for detecting whether the compiler's headers
-  // have changed, because we don't want to stat() all of them.
-  if (Modules && !Chain) {
-    SmallString<128> P = StringRef(HSOpts.ResourceDir);
-    llvm::sys::path::append(P, "include");
-    llvm::sys::path::append(P, "module.map");
-    if (const FileEntry *ModuleMapFile = FileMgr.getFile(P)) {
-      InputFileEntry Entry = { ModuleMapFile, false, false };
-      SortedFiles.push_front(Entry);
-    }
-  }
-
   unsigned UserFilesNum = 0;
   // Write out all of the input files.
   std::vector<uint32_t> InputFileOffsets;
@@ -1357,7 +1342,7 @@
     
     // Ask the file manager to fixup the relative path for us. This will 
     // honor the working directory.
-    FileMgr.FixupRelativePath(FilePath);
+    SourceMgr.getFileManager().FixupRelativePath(FilePath);
     
     // FIXME: This call to make_absolute shouldn't be necessary, the
     // call to FixupRelativePath should always return an absolute path.
@@ -1481,26 +1466,31 @@
     
     std::pair<unsigned,unsigned>
     EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
+      using namespace llvm::support;
+      endian::Writer<little> Writer(Out);
       unsigned KeyLen = strlen(key.Filename) + 1 + 8 + 8;
-      clang::io::Emit16(Out, KeyLen);
+      Writer.write<uint16_t>(KeyLen);
       unsigned DataLen = 1 + 2 + 4 + 4;
       if (Data.isModuleHeader)
         DataLen += 4;
-      clang::io::Emit8(Out, DataLen);
+      Writer.write<uint8_t>(DataLen);
       return std::make_pair(KeyLen, DataLen);
     }
     
     void EmitKey(raw_ostream& Out, key_type_ref key, unsigned KeyLen) {
-      clang::io::Emit64(Out, key.FE->getSize());
+      using namespace llvm::support;
+      endian::Writer<little> LE(Out);
+      LE.write<uint64_t>(key.FE->getSize());
       KeyLen -= 8;
-      clang::io::Emit64(Out, key.FE->getModificationTime());
+      LE.write<uint64_t>(key.FE->getModificationTime());
       KeyLen -= 8;
       Out.write(key.Filename, KeyLen);
     }
     
     void EmitData(raw_ostream &Out, key_type_ref key,
                   data_type_ref Data, unsigned DataLen) {
-      using namespace clang::io;
+      using namespace llvm::support;
+      endian::Writer<little> LE(Out);
       uint64_t Start = Out.tell(); (void)Start;
       
       unsigned char Flags = (Data.HeaderRole << 6)
@@ -1509,13 +1499,13 @@
                           | (Data.DirInfo << 2)
                           | (Data.Resolved << 1)
                           | Data.IndexHeaderMapHeader;
-      Emit8(Out, (uint8_t)Flags);
-      Emit16(Out, (uint16_t) Data.NumIncludes);
+      LE.write<uint8_t>(Flags);
+      LE.write<uint16_t>(Data.NumIncludes);
       
       if (!Data.ControllingMacro)
-        Emit32(Out, (uint32_t)Data.ControllingMacroID);
+        LE.write<uint32_t>(Data.ControllingMacroID);
       else
-        Emit32(Out, (uint32_t)Writer.getIdentifierRef(Data.ControllingMacro));
+        LE.write<uint32_t>(Writer.getIdentifierRef(Data.ControllingMacro));
       
       unsigned Offset = 0;
       if (!Data.Framework.empty()) {
@@ -1532,11 +1522,11 @@
         } else
           Offset = Pos->second;
       }
-      Emit32(Out, Offset);
+      LE.write<uint32_t>(Offset);
 
       if (Data.isModuleHeader) {
         Module *Mod = HS.findModuleForHeader(key.FE).getModule();
-        Emit32(Out, Writer.getExistingSubmoduleID(Mod));
+        LE.write<uint32_t>(Writer.getExistingSubmoduleID(Mod));
       }
 
       assert(Out.tell() - Start == DataLen && "Wrong data length");
@@ -1568,10 +1558,10 @@
 
     // Use HeaderSearch's getFileInfo to make sure we get the HeaderFileInfo
     // from the external source if it was not provided already.
-    const HeaderFileInfo &HFI = HS.getFileInfo(File);
-    if (HFI.External && Chain)
-      continue;
-    if (HFI.isModuleHeader && !HFI.isCompilingModuleHeader)
+    HeaderFileInfo HFI;
+    if (!HS.tryGetFileInfo(File, HFI) ||
+        (HFI.External && Chain) ||
+        (HFI.isModuleHeader && !HFI.isCompilingModuleHeader))
       continue;
 
     // Turn the file name into an absolute path, if it isn't already.
@@ -1594,9 +1584,10 @@
   SmallString<4096> TableData;
   uint32_t BucketOffset;
   {
+    using namespace llvm::support;
     llvm::raw_svector_ostream Out(TableData);
     // Make sure that no bucket is at offset 0
-    clang::io::Emit32(Out, 0);
+    endian::Writer<little>(Out).write<uint32_t>(0);
     BucketOffset = Generator.Emit(Out, GeneratorTrait);
   }
 
@@ -1851,12 +1842,14 @@
   }
 
   static void EmitKey(raw_ostream& Out, key_type_ref Key, unsigned KeyLen) {
-    clang::io::Emit32(Out, Key);
+    using namespace llvm::support;
+    endian::Writer<little>(Out).write<uint32_t>(Key);
   }
 
   static void EmitData(raw_ostream& Out, key_type_ref Key, data_type_ref Data,
                        unsigned) {
-    clang::io::Emit32(Out, Data.MacroDirectivesOffset);
+    using namespace llvm::support;
+    endian::Writer<little>(Out).write<uint32_t>(Data.MacroDirectivesOffset);
   }
 };
 } // end anonymous namespace
@@ -1946,8 +1939,6 @@
 
     // Emit the macro directives in reverse source order.
     for (; MD; MD = MD->getPrevious()) {
-      if (MD->isHidden())
-        continue;
       if (shouldIgnoreMacro(MD, IsModule, PP))
         continue;
 
@@ -2054,9 +2045,10 @@
   SmallString<4096> MacroTable;
   uint32_t BucketOffset;
   {
+    using namespace llvm::support;
     llvm::raw_svector_ostream Out(MacroTable);
     // Make sure that no bucket is at offset 0
-    clang::io::Emit32(Out, 0);
+    endian::Writer<little>(Out).write<uint32_t>(0);
     BucketOffset = Generator.Emit(Out);
   }
 
@@ -2227,9 +2219,7 @@
   // other consumers of this information.
   SourceManager &SrcMgr = PP->getSourceManager();
   ModuleMap &ModMap = PP->getHeaderSearchInfo().getModuleMap();
-  for (ASTContext::import_iterator I = Context->local_import_begin(),
-                                IEnd = Context->local_import_end();
-       I != IEnd; ++I) {
+  for (const auto *I : Context->local_imports()) {
     if (Module *ImportedFrom
           = ModMap.inferModuleFromLocation(FullSourceLoc(I->getLocation(), 
                                                          SrcMgr))) {
@@ -2248,7 +2238,8 @@
   Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Parent
   Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
   Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExplicit
-  Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsSystem  
+  Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsSystem
+  Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExternC
   Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferSubmodules...
   Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExplicit...
   Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExportWild...
@@ -2336,6 +2327,7 @@
     Record.push_back(Mod->IsFramework);
     Record.push_back(Mod->IsExplicit);
     Record.push_back(Mod->IsSystem);
+    Record.push_back(Mod->IsExternC);
     Record.push_back(Mod->InferSubmodules);
     Record.push_back(Mod->InferExplicitSubmodules);
     Record.push_back(Mod->InferExportWildcard);
@@ -2618,9 +2610,8 @@
   RecordData Record;
   Record.push_back(DECL_CONTEXT_LEXICAL);
   SmallVector<KindDeclIDPair, 64> Decls;
-  for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
-         D != DEnd; ++D)
-    Decls.push_back(std::make_pair((*D)->getKind(), GetDeclRef(*D)));
+  for (const auto *D : DC->decls())
+    Decls.push_back(std::make_pair(D->getKind(), GetDeclRef(D)));
 
   ++NumLexicalDeclContexts;
   Stream.EmitRecordWithBlob(DeclContextLexicalAbbrev, Record, data(Decls));
@@ -2728,8 +2719,10 @@
   std::pair<unsigned,unsigned>
     EmitKeyDataLength(raw_ostream& Out, Selector Sel,
                       data_type_ref Methods) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
     unsigned KeyLen = 2 + (Sel.getNumArgs()? Sel.getNumArgs() * 4 : 4);
-    clang::io::Emit16(Out, KeyLen);
+    LE.write<uint16_t>(KeyLen);
     unsigned DataLen = 4 + 2 + 2; // 2 bytes for each of the method counts
     for (const ObjCMethodList *Method = &Methods.Instance; Method;
          Method = Method->getNext())
@@ -2739,27 +2732,31 @@
          Method = Method->getNext())
       if (Method->Method)
         DataLen += 4;
-    clang::io::Emit16(Out, DataLen);
+    LE.write<uint16_t>(DataLen);
     return std::make_pair(KeyLen, DataLen);
   }
 
   void EmitKey(raw_ostream& Out, Selector Sel, unsigned) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
     uint64_t Start = Out.tell();
     assert((Start >> 32) == 0 && "Selector key offset too large");
     Writer.SetSelectorOffset(Sel, Start);
     unsigned N = Sel.getNumArgs();
-    clang::io::Emit16(Out, N);
+    LE.write<uint16_t>(N);
     if (N == 0)
       N = 1;
     for (unsigned I = 0; I != N; ++I)
-      clang::io::Emit32(Out,
-                    Writer.getIdentifierRef(Sel.getIdentifierInfoForSlot(I)));
+      LE.write<uint32_t>(
+          Writer.getIdentifierRef(Sel.getIdentifierInfoForSlot(I)));
   }
 
   void EmitData(raw_ostream& Out, key_type_ref,
                 data_type_ref Methods, unsigned DataLen) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
     uint64_t Start = Out.tell(); (void)Start;
-    clang::io::Emit32(Out, Methods.ID);
+    LE.write<uint32_t>(Methods.ID);
     unsigned NumInstanceMethods = 0;
     for (const ObjCMethodList *Method = &Methods.Instance; Method;
          Method = Method->getNext())
@@ -2779,16 +2776,16 @@
     unsigned FactoryBits = Methods.Factory.getBits();
     assert(FactoryBits < 4);
     unsigned NumFactoryMethodsAndBits = (NumFactoryMethods << 2) | FactoryBits;
-    clang::io::Emit16(Out, NumInstanceMethodsAndBits);
-    clang::io::Emit16(Out, NumFactoryMethodsAndBits);
+    LE.write<uint16_t>(NumInstanceMethodsAndBits);
+    LE.write<uint16_t>(NumFactoryMethodsAndBits);
     for (const ObjCMethodList *Method = &Methods.Instance; Method;
          Method = Method->getNext())
       if (Method->Method)
-        clang::io::Emit32(Out, Writer.getDeclID(Method->Method));
+        LE.write<uint32_t>(Writer.getDeclID(Method->Method));
     for (const ObjCMethodList *Method = &Methods.Factory; Method;
          Method = Method->getNext())
       if (Method->Method)
-        clang::io::Emit32(Out, Writer.getDeclID(Method->Method));
+        LE.write<uint32_t>(Writer.getDeclID(Method->Method));
 
     assert(Out.tell() - Start == DataLen && "Data length is wrong");
   }
@@ -2857,10 +2854,11 @@
     SmallString<4096> MethodPool;
     uint32_t BucketOffset;
     {
+      using namespace llvm::support;
       ASTMethodPoolTrait Trait(*this);
       llvm::raw_svector_ostream Out(MethodPool);
       // Make sure that no bucket is at offset 0
-      clang::io::Emit32(Out, 0);
+      endian::Writer<little>(Out).write<uint32_t>(0);
       BucketOffset = Generator.Emit(Out, Trait);
     }
 
@@ -2959,83 +2957,101 @@
     return false;
   }
 
-  DefMacroDirective *getFirstPublicSubmoduleMacro(MacroDirective *MD,
-                                                  SubmoduleID &ModID) {
+  typedef llvm::SmallVectorImpl<SubmoduleID> OverriddenList;
+
+  MacroDirective *
+  getFirstPublicSubmoduleMacro(MacroDirective *MD, SubmoduleID &ModID) {
     ModID = 0;
-    if (DefMacroDirective *DefMD = getPublicSubmoduleMacro(MD, ModID))
-      if (!shouldIgnoreMacro(DefMD, IsModule, PP))
-        return DefMD;
+    llvm::SmallVector<SubmoduleID, 1> Overridden;
+    if (MacroDirective *NextMD = getPublicSubmoduleMacro(MD, ModID, Overridden))
+      if (!shouldIgnoreMacro(NextMD, IsModule, PP))
+        return NextMD;
     return 0;
   }
 
-  DefMacroDirective *getNextPublicSubmoduleMacro(DefMacroDirective *MD,
-                                                 SubmoduleID &ModID) {
-    if (DefMacroDirective *
-          DefMD = getPublicSubmoduleMacro(MD->getPrevious(), ModID))
-      if (!shouldIgnoreMacro(DefMD, IsModule, PP))
-        return DefMD;
+  MacroDirective *
+  getNextPublicSubmoduleMacro(MacroDirective *MD, SubmoduleID &ModID,
+                              OverriddenList &Overridden) {
+    if (MacroDirective *NextMD =
+            getPublicSubmoduleMacro(MD->getPrevious(), ModID, Overridden))
+      if (!shouldIgnoreMacro(NextMD, IsModule, PP))
+        return NextMD;
     return 0;
   }
 
   /// \brief Traverses the macro directives history and returns the latest
-  /// macro that is public and not undefined in the same submodule.
-  /// A macro that is defined in submodule A and undefined in submodule B,
+  /// public macro definition or undefinition that is not in ModID.
+  /// A macro that is defined in submodule A and undefined in submodule B
   /// will still be considered as defined/exported from submodule A.
-  DefMacroDirective *getPublicSubmoduleMacro(MacroDirective *MD,
-                                             SubmoduleID &ModID) {
+  /// ModID is updated to the module containing the returned directive.
+  ///
+  /// FIXME: This process breaks down if a module defines a macro, imports
+  ///        another submodule that changes the macro, then changes the
+  ///        macro again itself.
+  MacroDirective *getPublicSubmoduleMacro(MacroDirective *MD,
+                                          SubmoduleID &ModID,
+                                          OverriddenList &Overridden) {
     if (!MD)
       return 0;
 
+    Overridden.clear();
     SubmoduleID OrigModID = ModID;
-    bool isUndefined = false;
-    Optional<bool> isPublic;
+    Optional<bool> IsPublic;
     for (; MD; MD = MD->getPrevious()) {
-      if (MD->isHidden())
-        continue;
-
       SubmoduleID ThisModID = getSubmoduleID(MD);
       if (ThisModID == 0) {
-        isUndefined = false;
-        isPublic = Optional<bool>();
+        IsPublic = Optional<bool>();
         continue;
       }
-      if (ThisModID != ModID){
+      if (ThisModID != ModID) {
         ModID = ThisModID;
-        isUndefined = false;
-        isPublic = Optional<bool>();
+        IsPublic = Optional<bool>();
       }
+
+      // If this is a definition from a submodule import, that submodule's
+      // definition is overridden by the definition or undefinition that we
+      // started with.
+      // FIXME: This should only apply to macros defined in OrigModID.
+      // We can't do that currently, because a #include of a different submodule
+      // of the same module just leaks through macros instead of providing new
+      // DefMacroDirectives for them.
+      if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) {
+        // Figure out which submodule the macro was originally defined within.
+        SubmoduleID SourceID = DefMD->getInfo()->getOwningModuleID();
+        if (!SourceID) {
+          SourceLocation DefLoc = DefMD->getInfo()->getDefinitionLoc();
+          if (DefLoc == MD->getLocation())
+            SourceID = ThisModID;
+          else
+            SourceID = Writer.inferSubmoduleIDFromLocation(DefLoc);
+        }
+        if (SourceID != OrigModID)
+          Overridden.push_back(SourceID);
+      }
+
       // We are looking for a definition in a different submodule than the one
       // that we started with. If a submodule has re-definitions of the same
       // macro, only the last definition will be used as the "exported" one.
       if (ModID == OrigModID)
         continue;
 
-      if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) {
-        if (!isUndefined && (!isPublic.hasValue() || isPublic.getValue()))
-          return DefMD;
-        continue;
+      // The latest visibility directive for a name in a submodule affects all
+      // the directives that come before it.
+      if (VisibilityMacroDirective *VisMD =
+              dyn_cast<VisibilityMacroDirective>(MD)) {
+        if (!IsPublic.hasValue())
+          IsPublic = VisMD->isPublic();
+      } else if (!IsPublic.hasValue() || IsPublic.getValue()) {
+        // FIXME: If we find an imported macro, we should include its list of
+        // overrides in our export.
+        return MD;
       }
-
-      if (isa<UndefMacroDirective>(MD)) {
-        isUndefined = true;
-        continue;
-      }
-
-      VisibilityMacroDirective *VisMD = cast<VisibilityMacroDirective>(MD);
-      if (!isPublic.hasValue())
-        isPublic = VisMD->isPublic();
     }
 
     return 0;
   }
 
   SubmoduleID getSubmoduleID(MacroDirective *MD) {
-    if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) {
-      MacroInfo *MI = DefMD->getInfo();
-      if (unsigned ID = MI->getOwningModuleID())
-        return ID;
-      return Writer.inferSubmoduleIDFromLocation(MI->getDefinitionLoc());
-    }
     return Writer.inferSubmoduleIDFromLocation(MD->getLocation());
   }
 
@@ -3066,11 +3082,18 @@
         DataLen += 4; // MacroDirectives offset.
         if (IsModule) {
           SubmoduleID ModID;
-          for (DefMacroDirective *
-                 DefMD = getFirstPublicSubmoduleMacro(Macro, ModID);
-                 DefMD; DefMD = getNextPublicSubmoduleMacro(DefMD, ModID)) {
-            DataLen += 4; // MacroInfo ID.
+          llvm::SmallVector<SubmoduleID, 4> Overridden;
+          for (MacroDirective *
+                 MD = getFirstPublicSubmoduleMacro(Macro, ModID);
+                 MD; MD = getNextPublicSubmoduleMacro(MD, ModID, Overridden)) {
+            // Previous macro's overrides.
+            if (!Overridden.empty())
+              DataLen += 4 * (1 + Overridden.size());
+            DataLen += 4; // MacroInfo ID or ModuleID.
           }
+          // Previous macro's overrides.
+          if (!Overridden.empty())
+            DataLen += 4 * (1 + Overridden.size());
           DataLen += 4;
         }
       }
@@ -3080,11 +3103,14 @@
            D != DEnd; ++D)
         DataLen += sizeof(DeclID);
     }
-    clang::io::Emit16(Out, DataLen);
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
+
+    LE.write<uint16_t>(DataLen);
     // We emit the key length after the data length so that every
     // string is preceded by a 16-bit length. This matches the PTH
     // format for storing identifiers.
-    clang::io::Emit16(Out, KeyLen);
+    LE.write<uint16_t>(KeyLen);
     return std::make_pair(KeyLen, DataLen);
   }
 
@@ -3096,18 +3122,31 @@
     Out.write(II->getNameStart(), KeyLen);
   }
 
+  static void emitMacroOverrides(raw_ostream &Out,
+                                 llvm::ArrayRef<SubmoduleID> Overridden) {
+    if (!Overridden.empty()) {
+      using namespace llvm::support;
+      endian::Writer<little> LE(Out);
+      LE.write<uint32_t>(Overridden.size() | 0x80000000U);
+      for (unsigned I = 0, N = Overridden.size(); I != N; ++I)
+        LE.write<uint32_t>(Overridden[I]);
+    }
+  }
+
   void EmitData(raw_ostream& Out, IdentifierInfo* II,
                 IdentID ID, unsigned) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
     MacroDirective *Macro = 0;
     if (!isInterestingIdentifier(II, Macro)) {
-      clang::io::Emit32(Out, ID << 1);
+      LE.write<uint32_t>(ID << 1);
       return;
     }
 
-    clang::io::Emit32(Out, (ID << 1) | 0x01);
+    LE.write<uint32_t>((ID << 1) | 0x01);
     uint32_t Bits = (uint32_t)II->getObjCOrBuiltinID();
     assert((Bits & 0xffff) == Bits && "ObjCOrBuiltinID too big for ASTReader.");
-    clang::io::Emit16(Out, Bits);
+    LE.write<uint16_t>(Bits);
     Bits = 0;
     bool HadMacroDefinition = hadMacroDefinition(II, Macro);
     Bits = (Bits << 1) | unsigned(HadMacroDefinition);
@@ -3116,21 +3155,30 @@
     Bits = (Bits << 1) | unsigned(II->isPoisoned());
     Bits = (Bits << 1) | unsigned(II->hasRevertedTokenIDToIdentifier());
     Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword());
-    clang::io::Emit16(Out, Bits);
+    LE.write<uint16_t>(Bits);
 
     if (HadMacroDefinition) {
-      clang::io::Emit32(Out, Writer.getMacroDirectivesOffset(II));
+      LE.write<uint32_t>(Writer.getMacroDirectivesOffset(II));
       if (IsModule) {
         // Write the IDs of macros coming from different submodules.
         SubmoduleID ModID;
-        for (DefMacroDirective *
-               DefMD = getFirstPublicSubmoduleMacro(Macro, ModID);
-               DefMD; DefMD = getNextPublicSubmoduleMacro(DefMD, ModID)) {
-          MacroID InfoID = Writer.getMacroID(DefMD->getInfo());
-          assert(InfoID);
-          clang::io::Emit32(Out, InfoID);
+        llvm::SmallVector<SubmoduleID, 4> Overridden;
+        for (MacroDirective *
+               MD = getFirstPublicSubmoduleMacro(Macro, ModID);
+               MD; MD = getNextPublicSubmoduleMacro(MD, ModID, Overridden)) {
+          MacroID InfoID = 0;
+          emitMacroOverrides(Out, Overridden);
+          if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) {
+            InfoID = Writer.getMacroID(DefMD->getInfo());
+            assert(InfoID);
+            LE.write<uint32_t>(InfoID << 1);
+          } else {
+            assert(isa<UndefMacroDirective>(MD));
+            LE.write<uint32_t>((ModID << 1) | 1);
+          }
         }
-        clang::io::Emit32(Out, 0);
+        emitMacroOverrides(Out, Overridden);
+        LE.write<uint32_t>(0);
       }
     }
 
@@ -3145,7 +3193,7 @@
     for (SmallVectorImpl<Decl *>::reverse_iterator D = Decls.rbegin(),
                                                 DEnd = Decls.rend();
          D != DEnd; ++D)
-      clang::io::Emit32(Out, Writer.getDeclID(getMostRecentLocalDecl(*D)));
+      LE.write<uint32_t>(Writer.getDeclID(getMostRecentLocalDecl(*D)));
   }
 
   /// \brief Returns the most recent local decl or the given decl if there are
@@ -3214,10 +3262,11 @@
     SmallString<4096> IdentifierTable;
     uint32_t BucketOffset;
     {
+      using namespace llvm::support;
       ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule);
       llvm::raw_svector_ostream Out(IdentifierTable);
       // Make sure that no bucket is at offset 0
-      clang::io::Emit32(Out, 0);
+      endian::Writer<little>(Out).write<uint32_t>(0);
       BucketOffset = Generator.Emit(Out, Trait);
     }
 
@@ -3306,6 +3355,8 @@
   std::pair<unsigned,unsigned>
     EmitKeyDataLength(raw_ostream& Out, DeclarationName Name,
                       data_type_ref Lookup) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
     unsigned KeyLen = 1;
     switch (Name.getNameKind()) {
     case DeclarationName::Identifier:
@@ -3324,35 +3375,35 @@
     case DeclarationName::CXXUsingDirective:
       break;
     }
-    clang::io::Emit16(Out, KeyLen);
+    LE.write<uint16_t>(KeyLen);
 
     // 2 bytes for num of decls and 4 for each DeclID.
     unsigned DataLen = 2 + 4 * Lookup.size();
-    clang::io::Emit16(Out, DataLen);
+    LE.write<uint16_t>(DataLen);
 
     return std::make_pair(KeyLen, DataLen);
   }
 
   void EmitKey(raw_ostream& Out, DeclarationName Name, unsigned) {
-    using namespace clang::io;
-
-    Emit8(Out, Name.getNameKind());
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
+    LE.write<uint8_t>(Name.getNameKind());
     switch (Name.getNameKind()) {
     case DeclarationName::Identifier:
-      Emit32(Out, Writer.getIdentifierRef(Name.getAsIdentifierInfo()));
+      LE.write<uint32_t>(Writer.getIdentifierRef(Name.getAsIdentifierInfo()));
       return;
     case DeclarationName::ObjCZeroArgSelector:
     case DeclarationName::ObjCOneArgSelector:
     case DeclarationName::ObjCMultiArgSelector:
-      Emit32(Out, Writer.getSelectorRef(Name.getObjCSelector()));
+      LE.write<uint32_t>(Writer.getSelectorRef(Name.getObjCSelector()));
       return;
     case DeclarationName::CXXOperatorName:
       assert(Name.getCXXOverloadedOperator() < NUM_OVERLOADED_OPERATORS &&
              "Invalid operator?");
-      Emit8(Out, Name.getCXXOverloadedOperator());
+      LE.write<uint8_t>(Name.getCXXOverloadedOperator());
       return;
     case DeclarationName::CXXLiteralOperatorName:
-      Emit32(Out, Writer.getIdentifierRef(Name.getCXXLiteralIdentifier()));
+      LE.write<uint32_t>(Writer.getIdentifierRef(Name.getCXXLiteralIdentifier()));
       return;
     case DeclarationName::CXXConstructorName:
     case DeclarationName::CXXDestructorName:
@@ -3366,17 +3417,110 @@
 
   void EmitData(raw_ostream& Out, key_type_ref,
                 data_type Lookup, unsigned DataLen) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
     uint64_t Start = Out.tell(); (void)Start;
-    clang::io::Emit16(Out, Lookup.size());
+    LE.write<uint16_t>(Lookup.size());
     for (DeclContext::lookup_iterator I = Lookup.begin(), E = Lookup.end();
          I != E; ++I)
-      clang::io::Emit32(Out, Writer.GetDeclRef(*I));
+      LE.write<uint32_t>(Writer.GetDeclRef(*I));
 
     assert(Out.tell() - Start == DataLen && "Data length is wrong");
   }
 };
 } // end anonymous namespace
 
+uint32_t
+ASTWriter::GenerateNameLookupTable(const DeclContext *DC,
+                                   llvm::SmallVectorImpl<char> &LookupTable) {
+  assert(!DC->LookupPtr.getInt() && "must call buildLookups first");
+  assert(DC == DC->getPrimaryContext() && "only primary DC has lookup table");
+
+  OnDiskChainedHashTableGenerator<ASTDeclContextNameLookupTrait> Generator;
+  ASTDeclContextNameLookupTrait Trait(*this);
+
+  // Create the on-disk hash table representation.
+  DeclarationName ConstructorName;
+  DeclarationName ConversionName;
+  SmallVector<NamedDecl *, 8> ConstructorDecls;
+  SmallVector<NamedDecl *, 4> ConversionDecls;
+
+  auto AddLookupResult = [&](DeclarationName Name,
+                             DeclContext::lookup_result Result) {
+    if (Result.empty())
+      return;
+
+    // Different DeclarationName values of certain kinds are mapped to
+    // identical serialized keys, because we don't want to use type
+    // identifiers in the keys (since type ids are local to the module).
+    switch (Name.getNameKind()) {
+    case DeclarationName::CXXConstructorName:
+      // There may be different CXXConstructorName DeclarationName values
+      // in a DeclContext because a UsingDecl that inherits constructors
+      // has the DeclarationName of the inherited constructors.
+      if (!ConstructorName)
+        ConstructorName = Name;
+      ConstructorDecls.append(Result.begin(), Result.end());
+      return;
+    case DeclarationName::CXXConversionFunctionName:
+      if (!ConversionName)
+        ConversionName = Name;
+      ConversionDecls.append(Result.begin(), Result.end());
+      return;
+    default:
+      break;
+    }
+
+    Generator.insert(Name, Result, Trait);
+  };
+
+  SmallVector<DeclarationName, 16> ExternalNames;
+  for (auto &Lookup : *DC->getLookupPtr()) {
+    if (Lookup.second.hasExternalDecls() ||
+        DC->NeedToReconcileExternalVisibleStorage) {
+      // We don't know for sure what declarations are found by this name,
+      // because the external source might have a different set from the set
+      // that are in the lookup map, and we can't update it now without
+      // risking invalidating our lookup iterator. So add it to a queue to
+      // deal with later.
+      ExternalNames.push_back(Lookup.first);
+      continue;
+    }
+
+    AddLookupResult(Lookup.first, Lookup.second.getLookupResult());
+  }
+
+  // Add the names we needed to defer. Note, this shouldn't add any new decls
+  // to the list we need to serialize: any new declarations we find here should
+  // be imported from an external source.
+  // FIXME: What if the external source isn't an ASTReader?
+  for (const auto &Name : ExternalNames)
+    // FIXME: const_cast since OnDiskHashTable wants a non-const lookup result.
+    AddLookupResult(Name, const_cast<DeclContext*>(DC)->lookup(Name));
+
+  // Add the constructors.
+  if (!ConstructorDecls.empty()) {
+    Generator.insert(ConstructorName,
+                     DeclContext::lookup_result(ConstructorDecls.begin(),
+                                                ConstructorDecls.end()),
+                     Trait);
+  }
+  // Add the conversion functions.
+  if (!ConversionDecls.empty()) {
+    Generator.insert(ConversionName,
+                     DeclContext::lookup_result(ConversionDecls.begin(),
+                                                ConversionDecls.end()),
+                     Trait);
+  }
+
+  // Create the on-disk hash table in a buffer.
+  llvm::raw_svector_ostream Out(LookupTable);
+  // Make sure that no bucket is at offset 0
+  using namespace llvm::support;
+  endian::Writer<little>(Out).write<uint32_t>(0);
+  return Generator.Emit(Out, Trait);
+}
+
 /// \brief Write the block containing all of the declaration IDs
 /// visible from the given DeclContext.
 ///
@@ -3407,50 +3551,9 @@
   if (!Map || Map->empty())
     return 0;
 
-  OnDiskChainedHashTableGenerator<ASTDeclContextNameLookupTrait> Generator;
-  ASTDeclContextNameLookupTrait Trait(*this);
-
-  // Create the on-disk hash table representation.
-  DeclarationName ConversionName;
-  SmallVector<NamedDecl *, 4> ConversionDecls;
-  for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end();
-       D != DEnd; ++D) {
-    DeclarationName Name = D->first;
-    DeclContext::lookup_result Result = D->second.getLookupResult();
-    if (!Result.empty()) {
-      if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
-        // Hash all conversion function names to the same name. The actual
-        // type information in conversion function name is not used in the
-        // key (since such type information is not stable across different
-        // modules), so the intended effect is to coalesce all of the conversion
-        // functions under a single key.
-        if (!ConversionName)
-          ConversionName = Name;
-        ConversionDecls.append(Result.begin(), Result.end());
-        continue;
-      }
-      
-      Generator.insert(Name, Result, Trait);
-    }
-  }
-
-  // Add the conversion functions
-  if (!ConversionDecls.empty()) {
-    Generator.insert(ConversionName, 
-                     DeclContext::lookup_result(ConversionDecls.begin(),
-                                                ConversionDecls.end()),
-                     Trait);
-  }
-  
   // Create the on-disk hash table in a buffer.
   SmallString<4096> LookupTable;
-  uint32_t BucketOffset;
-  {
-    llvm::raw_svector_ostream Out(LookupTable);
-    // Make sure that no bucket is at offset 0
-    clang::io::Emit32(Out, 0);
-    BucketOffset = Generator.Emit(Out, Trait);
-  }
+  uint32_t BucketOffset = GenerateNameLookupTable(DC, LookupTable);
 
   // Write the lookup table
   RecordData Record;
@@ -3471,33 +3574,13 @@
 /// (in C++), for namespaces, and for classes with forward-declared unscoped
 /// enumeration members (in C++11).
 void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
-  StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(DC->getLookupPtr());
+  StoredDeclsMap *Map = DC->getLookupPtr();
   if (!Map || Map->empty())
     return;
 
-  OnDiskChainedHashTableGenerator<ASTDeclContextNameLookupTrait> Generator;
-  ASTDeclContextNameLookupTrait Trait(*this);
-
-  // Create the hash table.
-  for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end();
-       D != DEnd; ++D) {
-    DeclarationName Name = D->first;
-    DeclContext::lookup_result Result = D->second.getLookupResult();
-    // For any name that appears in this table, the results are complete, i.e.
-    // they overwrite results from previous PCHs. Merging is always a mess.
-    if (!Result.empty())
-      Generator.insert(Name, Result, Trait);
-  }
-
   // Create the on-disk hash table in a buffer.
   SmallString<4096> LookupTable;
-  uint32_t BucketOffset;
-  {
-    llvm::raw_svector_ostream Out(LookupTable);
-    // Make sure that no bucket is at offset 0
-    clang::io::Emit32(Out, 0);
-    BucketOffset = Generator.Emit(Out, Trait);
-  }
+  uint32_t BucketOffset = GenerateNameLookupTable(DC, LookupTable);
 
   // Write the lookup table
   RecordData Record;
@@ -3799,9 +3882,7 @@
 }
 
 ASTWriter::~ASTWriter() {
-  for (FileDeclIDsTy::iterator
-         I = FileDeclIDs.begin(), E = FileDeclIDs.end(); I != E; ++I)
-    delete I->second;
+  llvm::DeleteContainerSeconds(FileDeclIDs);
 }
 
 void ASTWriter::WriteAST(Sema &SemaRef,
@@ -4032,11 +4113,9 @@
   // translation unit that do not come from other AST files.
   const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
   SmallVector<KindDeclIDPair, 64> NewGlobalDecls;
-  for (DeclContext::decl_iterator I = TU->noload_decls_begin(),
-                                  E = TU->noload_decls_end();
-       I != E; ++I) {
-    if (!(*I)->isFromASTFile())
-      NewGlobalDecls.push_back(std::make_pair((*I)->getKind(), GetDeclRef(*I)));
+  for (const auto *I : TU->noload_decls()) {
+    if (!I->isFromASTFile())
+      NewGlobalDecls.push_back(std::make_pair(I->getKind(), GetDeclRef(I)));
   }
   
   llvm::BitCodeAbbrev *Abv = new llvm::BitCodeAbbrev();
@@ -4059,14 +4138,25 @@
   
   // If the translation unit has an anonymous namespace, and we don't already
   // have an update block for it, write it as an update block.
+  // FIXME: Why do we not do this if there's already an update block?
   if (NamespaceDecl *NS = TU->getAnonymousNamespace()) {
     ASTWriter::UpdateRecord &Record = DeclUpdates[TU];
-    if (Record.empty()) {
-      Record.push_back(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE);
-      Record.push_back(reinterpret_cast<uint64_t>(NS));
-    }
+    if (Record.empty())
+      Record.push_back(DeclUpdate(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE, NS));
   }
 
+  // Add update records for all mangling numbers and static local numbers.
+  // These aren't really update records, but this is a convenient way of
+  // tagging this rare extra data onto the declarations.
+  for (const auto &Number : Context.MangleNumbers)
+    if (!Number.first->isFromASTFile())
+      DeclUpdates[Number.first].push_back(DeclUpdate(UPD_MANGLING_NUMBER,
+                                                     Number.second));
+  for (const auto &Number : Context.StaticLocalNumbers)
+    if (!Number.first->isFromASTFile())
+      DeclUpdates[Number.first].push_back(DeclUpdate(UPD_STATIC_LOCAL_NUMBER,
+                                                     Number.second));
+
   // Make sure visible decls, added to DeclContexts previously loaded from
   // an AST file, are registered for serialization.
   for (SmallVectorImpl<const Decl *>::iterator
@@ -4090,9 +4180,6 @@
     }
   }
 
-  // Resolve any declaration pointers within the declaration updates block.
-  ResolveDeclUpdatesBlocks();
-  
   // Form the record of special types.
   RecordData SpecialTypes;
   AddTypeRef(Context.getRawCFConstantStringType(), SpecialTypes);
@@ -4104,30 +4191,6 @@
   AddTypeRef(Context.ObjCSelRedefinitionType, SpecialTypes);
   AddTypeRef(Context.getucontext_tType(), SpecialTypes);
 
-  // Keep writing types and declarations until all types and
-  // declarations have been written.
-  Stream.EnterSubblock(DECLTYPES_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
-  WriteDeclsBlockAbbrevs();
-  for (DeclsToRewriteTy::iterator I = DeclsToRewrite.begin(), 
-                                  E = DeclsToRewrite.end(); 
-       I != E; ++I)
-    DeclTypesToEmit.push(const_cast<Decl*>(*I));
-  while (!DeclTypesToEmit.empty()) {
-    DeclOrType DOT = DeclTypesToEmit.front();
-    DeclTypesToEmit.pop();
-    if (DOT.isType())
-      WriteType(DOT.getType());
-    else
-      WriteDecl(Context, DOT.getDecl());
-  }
-  Stream.ExitBlock();
-
-  DoneWritingDeclsAndTypes = true;
-
-  WriteFileDeclIDsMap();
-  WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot);
-  WriteComments();
-  
   if (Chain) {
     // Write the mapping information describing our module dependencies and how
     // each of those modules were mapped into our own offset/ID space, so that
@@ -4154,17 +4217,19 @@
       for (ModuleManager::ModuleConstIterator M = Chain->ModuleMgr.begin(),
                                            MEnd = Chain->ModuleMgr.end();
            M != MEnd; ++M) {
+        using namespace llvm::support;
+        endian::Writer<little> LE(Out);
         StringRef FileName = (*M)->FileName;
-        io::Emit16(Out, FileName.size());
+        LE.write<uint16_t>(FileName.size());
         Out.write(FileName.data(), FileName.size());
-        io::Emit32(Out, (*M)->SLocEntryBaseOffset);
-        io::Emit32(Out, (*M)->BaseIdentifierID);
-        io::Emit32(Out, (*M)->BaseMacroID);
-        io::Emit32(Out, (*M)->BasePreprocessedEntityID);
-        io::Emit32(Out, (*M)->BaseSubmoduleID);
-        io::Emit32(Out, (*M)->BaseSelectorID);
-        io::Emit32(Out, (*M)->BaseDeclID);
-        io::Emit32(Out, (*M)->BaseTypeIndex);
+        LE.write<uint32_t>((*M)->SLocEntryBaseOffset);
+        LE.write<uint32_t>((*M)->BaseIdentifierID);
+        LE.write<uint32_t>((*M)->BaseMacroID);
+        LE.write<uint32_t>((*M)->BasePreprocessedEntityID);
+        LE.write<uint32_t>((*M)->BaseSubmoduleID);
+        LE.write<uint32_t>((*M)->BaseSelectorID);
+        LE.write<uint32_t>((*M)->BaseDeclID);
+        LE.write<uint32_t>((*M)->BaseTypeIndex);
       }
     }
     Record.clear();
@@ -4172,6 +4237,41 @@
     Stream.EmitRecordWithBlob(ModuleOffsetMapAbbrev, Record,
                               Buffer.data(), Buffer.size());
   }
+
+  RecordData DeclUpdatesOffsetsRecord;
+
+  // Keep writing types, declarations, and declaration update records
+  // until we've emitted all of them.
+  Stream.EnterSubblock(DECLTYPES_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
+  WriteDeclsBlockAbbrevs();
+  for (DeclsToRewriteTy::iterator I = DeclsToRewrite.begin(),
+                                  E = DeclsToRewrite.end();
+       I != E; ++I)
+    DeclTypesToEmit.push(const_cast<Decl*>(*I));
+  do {
+    WriteDeclUpdatesBlocks(DeclUpdatesOffsetsRecord);
+    while (!DeclTypesToEmit.empty()) {
+      DeclOrType DOT = DeclTypesToEmit.front();
+      DeclTypesToEmit.pop();
+      if (DOT.isType())
+        WriteType(DOT.getType());
+      else
+        WriteDecl(Context, DOT.getDecl());
+    }
+  } while (!DeclUpdates.empty());
+  Stream.ExitBlock();
+
+  DoneWritingDeclsAndTypes = true;
+
+  // These things can only be done once we've written out decls and types.
+  WriteTypeDeclOffsets();
+  if (!DeclUpdatesOffsetsRecord.empty())
+    Stream.EmitRecord(DECL_UPDATE_OFFSETS, DeclUpdatesOffsetsRecord);
+  WriteCXXBaseSpecifiersOffsets();
+  WriteFileDeclIDsMap();
+  WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot);
+
+  WriteComments();
   WritePreprocessor(PP, isModule);
   WriteHeaderSearch(PP.getHeaderSearchInfo(), isysroot);
   WriteSelectors(SemaRef);
@@ -4179,12 +4279,8 @@
   WriteIdentifierTable(PP, SemaRef.IdResolver, isModule);
   WriteFPPragmaOptions(SemaRef.getFPOptions());
   WriteOpenCLExtensions(SemaRef);
-
-  WriteTypeDeclOffsets();
   WritePragmaDiagnosticMappings(Context.getDiagnostics(), isModule);
 
-  WriteCXXBaseSpecifiersOffsets();
-  
   // If we're emitting a module, write out the submodule information.  
   if (WritingModule)
     WriteSubmodules(WritingModule);
@@ -4192,8 +4288,8 @@
   Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
 
   // Write the record containing external, unnamed definitions.
-  if (!ExternalDefinitions.empty())
-    Stream.EmitRecord(EXTERNAL_DEFINITIONS, ExternalDefinitions);
+  if (!EagerlyDeserializedDecls.empty())
+    Stream.EmitRecord(EAGERLY_DESERIALIZED_DECLS, EagerlyDeserializedDecls);
 
   // Write the record containing tentative definitions.
   if (!TentativeDefinitions.empty())
@@ -4258,27 +4354,41 @@
 
   if (!WritingModule) {
     // Write the submodules that were imported, if any.
-    RecordData ImportedModules;
-    for (ASTContext::import_iterator I = Context.local_import_begin(),
-                                  IEnd = Context.local_import_end();
-         I != IEnd; ++I) {
+    struct ModuleInfo {
+      uint64_t ID;
+      Module *M;
+      ModuleInfo(uint64_t ID, Module *M) : ID(ID), M(M) {}
+    };
+    llvm::SmallVector<ModuleInfo, 64> Imports;
+    for (const auto *I : Context.local_imports()) {
       assert(SubmoduleIDs.find(I->getImportedModule()) != SubmoduleIDs.end());
-      ImportedModules.push_back(SubmoduleIDs[I->getImportedModule()]);
+      Imports.push_back(ModuleInfo(SubmoduleIDs[I->getImportedModule()],
+                         I->getImportedModule()));
     }
-    if (!ImportedModules.empty()) {
-      // Sort module IDs.
-      llvm::array_pod_sort(ImportedModules.begin(), ImportedModules.end());
-      
-      // Unique module IDs.
-      ImportedModules.erase(std::unique(ImportedModules.begin(), 
-                                        ImportedModules.end()),
-                            ImportedModules.end());
-      
+
+    if (!Imports.empty()) {
+      auto Cmp = [](const ModuleInfo &A, const ModuleInfo &B) {
+        return A.ID < B.ID;
+      };
+
+      // Sort and deduplicate module IDs.
+      std::sort(Imports.begin(), Imports.end(), Cmp);
+      Imports.erase(std::unique(Imports.begin(), Imports.end(), Cmp),
+                    Imports.end());
+
+      RecordData ImportedModules;
+      for (const auto &Import : Imports) {
+        ImportedModules.push_back(Import.ID);
+        // FIXME: If the module has macros imported then later has declarations
+        // imported, this location won't be the right one as a location for the
+        // declaration imports.
+        AddSourceLocation(Import.M->MacroVisibilityLoc, ImportedModules);
+      }
+
       Stream.EmitRecord(IMPORTED_MODULES, ImportedModules);
     }
   }
 
-  WriteDeclUpdatesBlocks();
   WriteDeclReplacementsBlock();
   WriteRedeclarations();
   WriteMergedDecls();
@@ -4295,64 +4405,79 @@
   Stream.ExitBlock();
 }
 
-/// \brief Go through the declaration update blocks and resolve declaration
-/// pointers into declaration IDs.
-void ASTWriter::ResolveDeclUpdatesBlocks() {
-  for (DeclUpdateMap::iterator
-       I = DeclUpdates.begin(), E = DeclUpdates.end(); I != E; ++I) {
-    const Decl *D = I->first;
-    UpdateRecord &URec = I->second;
-    
-    if (isRewritten(D))
-      continue; // The decl will be written completely
-
-    unsigned Idx = 0, N = URec.size();
-    while (Idx < N) {
-      switch ((DeclUpdateKind)URec[Idx++]) {
-      case UPD_CXX_ADDED_IMPLICIT_MEMBER:
-      case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
-      case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE:
-        URec[Idx] = GetDeclRef(reinterpret_cast<Decl *>(URec[Idx]));
-        ++Idx;
-        break;
-
-      case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
-      case UPD_DECL_MARKED_USED:
-        ++Idx;
-        break;
-
-      case UPD_CXX_DEDUCED_RETURN_TYPE:
-        URec[Idx] = GetOrCreateTypeID(
-            QualType::getFromOpaquePtr(reinterpret_cast<void *>(URec[Idx])));
-        ++Idx;
-        break;
-      }
-    }
-  }
-}
-
-void ASTWriter::WriteDeclUpdatesBlocks() {
+void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
   if (DeclUpdates.empty())
     return;
 
-  RecordData OffsetsRecord;
-  Stream.EnterSubblock(DECL_UPDATES_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
-  for (DeclUpdateMap::iterator
-         I = DeclUpdates.begin(), E = DeclUpdates.end(); I != E; ++I) {
-    const Decl *D = I->first;
-    UpdateRecord &URec = I->second;
+  DeclUpdateMap LocalUpdates;
+  LocalUpdates.swap(DeclUpdates);
 
+  for (auto &DeclUpdate : LocalUpdates) {
+    const Decl *D = DeclUpdate.first;
     if (isRewritten(D))
       continue; // The decl will be written completely,no need to store updates.
 
-    uint64_t Offset = Stream.GetCurrentBitNo();
-    Stream.EmitRecord(DECL_UPDATES, URec);
-
     OffsetsRecord.push_back(GetDeclRef(D));
-    OffsetsRecord.push_back(Offset);
+    OffsetsRecord.push_back(Stream.GetCurrentBitNo());
+
+    bool HasUpdatedBody = false;
+    RecordData Record;
+    for (auto &Update : DeclUpdate.second) {
+      DeclUpdateKind Kind = (DeclUpdateKind)Update.getKind();
+
+      Record.push_back(Kind);
+      switch (Kind) {
+      case UPD_CXX_ADDED_IMPLICIT_MEMBER:
+      case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
+      case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE:
+        Record.push_back(GetDeclRef(Update.getDecl()));
+        break;
+
+      case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
+        AddSourceLocation(Update.getLoc(), Record);
+        break;
+
+      case UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION:
+        // An updated body is emitted last, so that the reader doesn't need
+        // to skip over the lazy body to reach statements for other records.
+        Record.pop_back();
+        HasUpdatedBody = true;
+        break;
+
+      case UPD_CXX_RESOLVED_EXCEPTION_SPEC:
+        addExceptionSpec(
+            *this,
+            cast<FunctionDecl>(D)->getType()->castAs<FunctionProtoType>(),
+            Record);
+        break;
+
+      case UPD_CXX_DEDUCED_RETURN_TYPE:
+        Record.push_back(GetOrCreateTypeID(Update.getType()));
+        break;
+
+      case UPD_DECL_MARKED_USED:
+        break;
+
+      case UPD_MANGLING_NUMBER:
+      case UPD_STATIC_LOCAL_NUMBER:
+        Record.push_back(Update.getNumber());
+        break;
+      }
+    }
+
+    if (HasUpdatedBody) {
+      const FunctionDecl *Def = cast<FunctionDecl>(D);
+      Record.push_back(UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION);
+      Record.push_back(Def->isInlined());
+      AddSourceLocation(Def->getInnerLocStart(), Record);
+      AddFunctionDefinition(Def, Record);
+    }
+
+    Stream.EmitRecord(DECL_UPDATES, Record);
+
+    // Flush any statements that were written as part of this update record.
+    FlushStmts();
   }
-  Stream.ExitBlock();
-  Stream.EmitRecord(DECL_UPDATE_OFFSETS, OffsetsRecord);
 }
 
 void ASTWriter::WriteDeclReplacementsBlock() {
@@ -4648,7 +4773,7 @@
   assert(SM.isLocalSourceLocation(FileLoc));
   FileID FID;
   unsigned Offset;
-  llvm::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
+  std::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
   if (FID.isInvalid())
     return;
   assert(SM.getSLocEntry(FID).isFile());
@@ -5084,6 +5209,7 @@
   Record.push_back(Data.HasProtectedFields);
   Record.push_back(Data.HasPublicFields);
   Record.push_back(Data.HasMutableFields);
+  Record.push_back(Data.HasVariantMembers);
   Record.push_back(Data.HasOnlyCMembers);
   Record.push_back(Data.HasInClassInitializer);
   Record.push_back(Data.HasUninitializedReferenceMember);
@@ -5266,9 +5392,7 @@
 
   // A decl coming from PCH was modified.
   assert(RD->isCompleteDefinition());
-  UpdateRecord &Record = DeclUpdates[RD];
-  Record.push_back(UPD_CXX_ADDED_IMPLICIT_MEMBER);
-  Record.push_back(reinterpret_cast<uint64_t>(D));
+  DeclUpdates[RD].push_back(DeclUpdate(UPD_CXX_ADDED_IMPLICIT_MEMBER, D));
 }
 
 void ASTWriter::AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
@@ -5279,9 +5403,8 @@
   if (!(!D->isFromASTFile() && TD->isFromASTFile()))
     return; // Not a source specialization added to a template from PCH.
 
-  UpdateRecord &Record = DeclUpdates[TD];
-  Record.push_back(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION);
-  Record.push_back(reinterpret_cast<uint64_t>(D));
+  DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
+                                       D));
 }
 
 void ASTWriter::AddedCXXTemplateSpecialization(
@@ -5292,9 +5415,8 @@
   if (!(!D->isFromASTFile() && TD->isFromASTFile()))
     return; // Not a source specialization added to a template from PCH.
 
-  UpdateRecord &Record = DeclUpdates[TD];
-  Record.push_back(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION);
-  Record.push_back(reinterpret_cast<uint64_t>(D));
+  DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
+                                       D));
 }
 
 void ASTWriter::AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
@@ -5305,9 +5427,17 @@
   if (!(!D->isFromASTFile() && TD->isFromASTFile()))
     return; // Not a source specialization added to a template from PCH.
 
-  UpdateRecord &Record = DeclUpdates[TD];
-  Record.push_back(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION);
-  Record.push_back(reinterpret_cast<uint64_t>(D));
+  DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
+                                       D));
+}
+
+void ASTWriter::ResolvedExceptionSpec(const FunctionDecl *FD) {
+  assert(!WritingAST && "Already writing the AST!");
+  FD = FD->getCanonicalDecl();
+  if (!FD->isFromASTFile())
+    return; // Not a function declared in PCH and defined outside.
+
+  DeclUpdates[FD].push_back(UPD_CXX_RESOLVED_EXCEPTION_SPEC);
 }
 
 void ASTWriter::DeducedReturnType(const FunctionDecl *FD, QualType ReturnType) {
@@ -5316,9 +5446,7 @@
   if (!FD->isFromASTFile())
     return; // Not a function declared in PCH and defined outside.
 
-  UpdateRecord &Record = DeclUpdates[FD];
-  Record.push_back(UPD_CXX_DEDUCED_RETURN_TYPE);
-  Record.push_back(reinterpret_cast<uint64_t>(ReturnType.getAsOpaquePtr()));
+  DeclUpdates[FD].push_back(DeclUpdate(UPD_CXX_DEDUCED_RETURN_TYPE, ReturnType));
 }
 
 void ASTWriter::CompletedImplicitDefinition(const FunctionDecl *D) {
@@ -5331,6 +5459,17 @@
   RewriteDecl(D);
 }
 
+void ASTWriter::FunctionDefinitionInstantiated(const FunctionDecl *D) {
+  assert(!WritingAST && "Already writing the AST!");
+  if (!D->isFromASTFile())
+    return;
+
+  // Since the actual instantiation is delayed, this really means that we need
+  // to update the instantiation location.
+  DeclUpdates[D].push_back(
+      DeclUpdate(UPD_CXX_INSTANTIATED_FUNCTION_DEFINITION));
+}
+
 void ASTWriter::StaticDataMemberInstantiated(const VarDecl *D) {
   assert(!WritingAST && "Already writing the AST!");
   if (!D->isFromASTFile())
@@ -5338,10 +5477,9 @@
 
   // Since the actual instantiation is delayed, this really means that we need
   // to update the instantiation location.
-  UpdateRecord &Record = DeclUpdates[D];
-  Record.push_back(UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER);
-  AddSourceLocation(
-      D->getMemberSpecializationInfo()->getPointOfInstantiation(), Record);
+  DeclUpdates[D].push_back(
+      DeclUpdate(UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER,
+       D->getMemberSpecializationInfo()->getPointOfInstantiation()));
 }
 
 void ASTWriter::AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
@@ -5375,6 +5513,5 @@
   if (!D->isFromASTFile())
     return;
 
-  UpdateRecord &Record = DeclUpdates[D];
-  Record.push_back(UPD_DECL_MARKED_USED);
+  DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_MARKED_USED));
 }
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index 55f830a..14304ab 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -130,6 +130,14 @@
     void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
     void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
     void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+
+    void AddFunctionDefinition(const FunctionDecl *FD) {
+      assert(FD->doesThisDeclarationHaveABody());
+      if (auto *CD = dyn_cast<CXXConstructorDecl>(FD))
+        Writer.AddCXXCtorInitializers(CD->CtorInitializers,
+                                      CD->NumCtorInitializers, Record);
+      Writer.AddStmt(FD->getBody());
+    }
   };
 }
 
@@ -168,6 +176,24 @@
   Record.push_back(D->getAccess());
   Record.push_back(D->isModulePrivate());
   Record.push_back(Writer.inferSubmoduleIDFromLocation(D->getLocation()));
+
+  // If this declaration injected a name into a context different from its
+  // lexical context, and that context is an imported namespace, we need to
+  // update its visible declarations to include this name.
+  //
+  // This happens when we instantiate a class with a friend declaration or a
+  // function with a local extern declaration, for instance.
+  if (D->isOutOfLine()) {
+    auto *DC = D->getDeclContext();
+    while (auto *NS = dyn_cast<NamespaceDecl>(DC->getRedeclContext())) {
+      if (!NS->isFromASTFile())
+        break;
+      Writer.AddUpdatedDeclContext(NS->getPrimaryContext());
+      if (!NS->isInlineNamespace())
+        break;
+      DC = NS->getParent();
+    }
+  }
 }
 
 void ASTDeclWriter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
@@ -414,9 +440,8 @@
   }
 
   Record.push_back(D->param_size());
-  for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
-       P != PEnd; ++P)
-    Writer.AddDeclRef(*P, Record);
+  for (auto P : D->params())
+    Writer.AddDeclRef(P, Record);
   Code = serialization::DECL_FUNCTION;
 }
 
@@ -451,13 +476,12 @@
   // FIXME: stable encoding for in/out/inout/bycopy/byref/oneway
   Record.push_back(D->getObjCDeclQualifier());
   Record.push_back(D->hasRelatedResultType());
-  Writer.AddTypeRef(D->getResultType(), Record);
-  Writer.AddTypeSourceInfo(D->getResultTypeSourceInfo(), Record);
+  Writer.AddTypeRef(D->getReturnType(), Record);
+  Writer.AddTypeSourceInfo(D->getReturnTypeSourceInfo(), Record);
   Writer.AddSourceLocation(D->getLocEnd(), Record);
   Record.push_back(D->param_size());
-  for (ObjCMethodDecl::param_iterator P = D->param_begin(),
-                                   PEnd = D->param_end(); P != PEnd; ++P)
-    Writer.AddDeclRef(*P, Record);
+  for (const auto *P : D->params())
+    Writer.AddDeclRef(P, Record);
 
   Record.push_back(D->SelLocsKind);
   unsigned NumStoredSelLocs = D->getNumStoredSelLocs();
@@ -489,17 +513,14 @@
     Writer.AddDeclRef(D->getSuperClass(), Record);
     Writer.AddSourceLocation(D->getSuperClassLoc(), Record);
     Writer.AddSourceLocation(D->getEndOfDefinitionLoc(), Record);
+    Record.push_back(Data.HasDesignatedInitializers);
 
     // Write out the protocols that are directly referenced by the @interface.
     Record.push_back(Data.ReferencedProtocols.size());
-    for (ObjCInterfaceDecl::protocol_iterator P = D->protocol_begin(),
-                                           PEnd = D->protocol_end();
-         P != PEnd; ++P)
-      Writer.AddDeclRef(*P, Record);
-    for (ObjCInterfaceDecl::protocol_loc_iterator PL = D->protocol_loc_begin(),
-         PLEnd = D->protocol_loc_end();
-         PL != PLEnd; ++PL)
-      Writer.AddSourceLocation(*PL, Record);
+    for (const auto *P : D->protocols())
+      Writer.AddDeclRef(P, Record);
+    for (const auto &PL : D->protocol_locs())
+      Writer.AddSourceLocation(PL, Record);
     
     // Write out the protocols that are transitively referenced.
     Record.push_back(Data.AllReferencedProtocols.size());
@@ -528,7 +549,6 @@
   // FIXME: stable encoding for @public/@private/@protected/@package
   Record.push_back(D->getAccessControl());
   Record.push_back(D->getSynthesize());
-  Record.push_back(D->getBackingIvarReferencedInAccessor());
 
   if (!D->hasAttrs() &&
       !D->isImplicit() &&
@@ -551,13 +571,10 @@
   Record.push_back(D->isThisDeclarationADefinition());
   if (D->isThisDeclarationADefinition()) {
     Record.push_back(D->protocol_size());
-    for (ObjCProtocolDecl::protocol_iterator
-         I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
-      Writer.AddDeclRef(*I, Record);
-    for (ObjCProtocolDecl::protocol_loc_iterator PL = D->protocol_loc_begin(),
-           PLEnd = D->protocol_loc_end();
-         PL != PLEnd; ++PL)
-      Writer.AddSourceLocation(*PL, Record);
+    for (const auto *I : D->protocols())
+      Writer.AddDeclRef(I, Record);
+    for (const auto &PL : D->protocol_locs())
+      Writer.AddSourceLocation(PL, Record);
   }
   
   Code = serialization::DECL_OBJC_PROTOCOL;
@@ -575,13 +592,10 @@
   Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
   Writer.AddDeclRef(D->getClassInterface(), Record);
   Record.push_back(D->protocol_size());
-  for (ObjCCategoryDecl::protocol_iterator
-       I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
-    Writer.AddDeclRef(*I, Record);
-  for (ObjCCategoryDecl::protocol_loc_iterator 
-         PL = D->protocol_loc_begin(), PLEnd = D->protocol_loc_end();
-       PL != PLEnd; ++PL)
-    Writer.AddSourceLocation(*PL, Record);
+  for (const auto *I : D->protocols())
+    Writer.AddDeclRef(I, Record);
+  for (const auto &PL : D->protocol_locs())
+    Writer.AddSourceLocation(PL, Record);
   Code = serialization::DECL_OBJC_CATEGORY;
 }
 
@@ -688,10 +702,8 @@
   VisitValueDecl(D);
   Record.push_back(D->getChainingSize());
 
-  for (IndirectFieldDecl::chain_iterator
-       P = D->chain_begin(),
-       PEnd = D->chain_end(); P != PEnd; ++P)
-    Writer.AddDeclRef(*P, Record);
+  for (const auto *P : D->chain())
+    Writer.AddDeclRef(P, Record);
   Code = serialization::DECL_INDIRECTFIELD;
 }
 
@@ -833,9 +845,7 @@
   Record.push_back(D->isConversionFromLambda());
   Record.push_back(D->capturesCXXThis());
   Record.push_back(D->getNumCaptures());
-  for (BlockDecl::capture_iterator
-         i = D->capture_begin(), e = D->capture_end(); i != e; ++i) {
-    const BlockDecl::Capture &capture = *i;
+  for (const auto &capture : D->captures()) {
     Writer.AddDeclRef(capture.getVariable(), Record);
 
     unsigned flags = 0;
@@ -911,9 +921,8 @@
     Decl *Parent = cast<Decl>(
         D->getParent()->getRedeclContext()->getPrimaryContext());
     if (Parent->isFromASTFile() || isa<TranslationUnitDecl>(Parent)) {
-      ASTWriter::UpdateRecord &Record = Writer.DeclUpdates[Parent];
-      Record.push_back(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE);
-      Writer.AddDeclRef(D, Record);
+      Writer.DeclUpdates[Parent].push_back(
+          ASTWriter::DeclUpdate(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE, D));
     }
   }
 }
@@ -1021,6 +1030,7 @@
 void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
   VisitCXXMethodDecl(D);
 
+  Writer.AddDeclRef(D->getInheritedConstructor(), Record);
   Record.push_back(D->IsExplicitSpecified);
   Writer.AddCXXCtorInitializers(D->CtorInitializers, D->NumCtorInitializers,
                                 Record);
@@ -1429,10 +1439,8 @@
 void ASTDeclWriter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
   Record.push_back(D->varlist_size());
   VisitDecl(D);
-  for (OMPThreadPrivateDecl::varlist_iterator I = D->varlist_begin(),
-                                              E = D->varlist_end();
-       I != E; ++I)
-    Writer.AddStmt(*I);
+  for (auto *I : D->varlists())
+    Writer.AddStmt(I);
   Code = serialization::DECL_OMP_THREADPRIVATE;
 }
 
@@ -1817,8 +1825,10 @@
   // An ObjCMethodDecl is never considered as "required" because its
   // implementation container always is.
 
-  // File scoped assembly or obj-c implementation must be seen.
-  if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D))
+  // File scoped assembly or obj-c implementation must be seen. ImportDecl is
+  // used by codegen to determine the set of imported modules to search for
+  // inputs for automatic linking.
+  if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D) || isa<ImportDecl>(D))
     return true;
 
   return Context.DeclMustBeEmitted(D);
@@ -1906,10 +1916,16 @@
   // Flush C++ base specifiers, if there are any.
   FlushCXXBaseSpecifiers();
   
-  // Note "external" declarations so that we can add them to a record in the
-  // AST file later.
-  //
-  // FIXME: This should be renamed, the predicate is much more complicated.
+  // Note declarations that should be deserialized eagerly so that we can add
+  // them to a record in the AST file later.
   if (isRequiredDecl(D, Context))
-    ExternalDefinitions.push_back(ID);
+    EagerlyDeserializedDecls.push_back(ID);
+}
+
+void ASTWriter::AddFunctionDefinition(const FunctionDecl *FD,
+                                      RecordData &Record) {
+  ClearSwitchCaseIDs();
+
+  ASTDeclWriter W(*this, FD->getASTContext(), Record);
+  W.AddFunctionDefinition(FD);
 }
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index 072fc98..dd44a69 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -71,9 +71,8 @@
 void ASTStmtWriter::VisitCompoundStmt(CompoundStmt *S) {
   VisitStmt(S);
   Record.push_back(S->size());
-  for (CompoundStmt::body_iterator CS = S->body_begin(), CSEnd = S->body_end();
-       CS != CSEnd; ++CS)
-    Writer.AddStmt(*CS);
+  for (auto *CS : S->body())
+    Writer.AddStmt(CS);
   Writer.AddSourceLocation(S->getLBracLoc(), Record);
   Writer.AddSourceLocation(S->getRBracLoc(), Record);
   Code = serialization::STMT_COMPOUND;
@@ -300,24 +299,20 @@
   Writer.AddDeclRef(S->getCapturedRecordDecl(), Record);
 
   // Capture inits
-  for (CapturedStmt::capture_init_iterator I = S->capture_init_begin(),
-                                           E = S->capture_init_end();
-       I != E; ++I)
-    Writer.AddStmt(*I);
+  for (auto *I : S->capture_inits())
+    Writer.AddStmt(I);
 
   // Body
   Writer.AddStmt(S->getCapturedStmt());
 
   // Captures
-  for (CapturedStmt::capture_iterator I = S->capture_begin(),
-                                      E = S->capture_end();
-       I != E; ++I) {
-    if (I->capturesThis())
+  for (const auto &I : S->captures()) {
+    if (I.capturesThis())
       Writer.AddDeclRef(0, Record);
     else
-      Writer.AddDeclRef(I->getCapturedVar(), Record);
-    Record.push_back(I->getCaptureKind());
-    Writer.AddSourceLocation(I->getLocation(), Record);
+      Writer.AddDeclRef(I.getCapturedVar(), Record);
+    Record.push_back(I.getCaptureKind());
+    Writer.AddSourceLocation(I.getLocation(), Record);
   }
 
   Code = serialization::STMT_CAPTURED;
@@ -1486,30 +1481,12 @@
   Code = serialization::EXPR_CXX_UNRESOLVED_LOOKUP;
 }
 
-void ASTStmtWriter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
-  VisitExpr(E);
-  Record.push_back(E->getTrait());
-  Record.push_back(E->getValue());
-  Writer.AddSourceRange(E->getSourceRange(), Record);
-  Writer.AddTypeSourceInfo(E->getQueriedTypeSourceInfo(), Record);
-  Code = serialization::EXPR_CXX_UNARY_TYPE_TRAIT;
-}
-
-void ASTStmtWriter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
-  VisitExpr(E);
-  Record.push_back(E->getTrait());
-  Record.push_back(E->getValue());
-  Writer.AddSourceRange(E->getSourceRange(), Record);
-  Writer.AddTypeSourceInfo(E->getLhsTypeSourceInfo(), Record);
-  Writer.AddTypeSourceInfo(E->getRhsTypeSourceInfo(), Record);
-  Code = serialization::EXPR_BINARY_TYPE_TRAIT;
-}
-
 void ASTStmtWriter::VisitTypeTraitExpr(TypeTraitExpr *E) {
   VisitExpr(E);
   Record.push_back(E->TypeTraitExprBits.NumArgs);
   Record.push_back(E->TypeTraitExprBits.Kind); // FIXME: Stable encoding
   Record.push_back(E->TypeTraitExprBits.Value);
+  Writer.AddSourceRange(E->getSourceRange(), Record);
   for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
     Writer.AddTypeSourceInfo(E->getArg(I), Record);
   Code = serialization::EXPR_TYPE_TRAIT;
@@ -1697,6 +1674,21 @@
   Writer->Writer.AddSourceLocation(C->getLocEnd(), Record);
 }
 
+void OMPClauseWriter::VisitOMPIfClause(OMPIfClause *C) {
+  Writer->Writer.AddStmt(C->getCondition());
+  Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
+  Writer->Writer.AddStmt(C->getNumThreads());
+  Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPSafelenClause(OMPSafelenClause *C) {
+  Writer->Writer.AddStmt(C->getSafelen());
+  Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
 void OMPClauseWriter::VisitOMPDefaultClause(OMPDefaultClause *C) {
   Record.push_back(C->getDefaultKind());
   Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
@@ -1706,36 +1698,35 @@
 void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
   Record.push_back(C->varlist_size());
   Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
-  for (OMPPrivateClause::varlist_iterator I = C->varlist_begin(),
-                                          E = C->varlist_end();
-       I != E; ++I)
-    Writer->Writer.AddStmt(*I);
+  for (auto *I : C->varlists())
+    Writer->Writer.AddStmt(I);
 }
 
 void OMPClauseWriter::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
   Record.push_back(C->varlist_size());
   Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
-  for (OMPFirstprivateClause::varlist_iterator I = C->varlist_begin(),
-                                               E = C->varlist_end();
-       I != E; ++I)
-    Writer->Writer.AddStmt(*I);
+  for (auto *I : C->varlists())
+    Writer->Writer.AddStmt(I);
 }
 
 void OMPClauseWriter::VisitOMPSharedClause(OMPSharedClause *C) {
   Record.push_back(C->varlist_size());
   Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
-  for (OMPSharedClause::varlist_iterator I = C->varlist_begin(),
-                                         E = C->varlist_end();
-       I != E; ++I)
-    Writer->Writer.AddStmt(*I);
+  for (auto *I : C->varlists())
+    Writer->Writer.AddStmt(I);
+}
+
+void OMPClauseWriter::VisitOMPCopyinClause(OMPCopyinClause *C) {
+  Record.push_back(C->varlist_size());
+  Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+  for (auto *I : C->varlists())
+    Writer->Writer.AddStmt(I);
 }
 
 //===----------------------------------------------------------------------===//
 // OpenMP Directives.
 //===----------------------------------------------------------------------===//
 void ASTStmtWriter::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
-  VisitStmt(E);
-  Record.push_back(E->getNumClauses());
   Writer.AddSourceLocation(E->getLocStart(), Record);
   Writer.AddSourceLocation(E->getLocEnd(), Record);
   OMPClauseWriter ClauseWriter(this, Record);
@@ -1746,10 +1737,20 @@
 }
 
 void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
+  VisitStmt(D);
+  Record.push_back(D->getNumClauses());
   VisitOMPExecutableDirective(D);
   Code = serialization::STMT_OMP_PARALLEL_DIRECTIVE;
 }
 
+void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
+  VisitStmt(D);
+  Record.push_back(D->getNumClauses());
+  Record.push_back(D->getCollapsedNumber());
+  VisitOMPExecutableDirective(D);
+  Code = serialization::STMT_OMP_SIMD_DIRECTIVE;
+}
+
 //===----------------------------------------------------------------------===//
 // ASTWriter Implementation
 //===----------------------------------------------------------------------===//
@@ -1812,7 +1813,7 @@
   ParentStmtInserterRAII ParentStmtInserter(S, ParentStmts);
 #endif
 
-  // Redirect ASTWriter::AddStmt to collect sub stmts.
+  // Redirect ASTWriter::AddStmt to collect sub-stmts.
   SmallVector<Stmt *, 16> SubStmts;
   CollectedStmts = &SubStmts;
 
@@ -1825,16 +1826,16 @@
     SourceManager &SrcMgr
       = DeclIDs.begin()->first->getASTContext().getSourceManager();
     S->dump(SrcMgr);
-    llvm_unreachable("Unhandled sub statement writing AST file");
+    llvm_unreachable("Unhandled sub-statement writing AST file");
   }
 #endif
 
   // Revert ASTWriter::AddStmt.
   CollectedStmts = &StmtsToEmit;
 
-  // Write the sub stmts in reverse order, last to first. When reading them back
+  // Write the sub-stmts in reverse order, last to first. When reading them back
   // we will read them in correct order by "pop"ing them from the Stmts stack.
-  // This simplifies reading and allows to store a variable number of sub stmts
+  // This simplifies reading and allows to store a variable number of sub-stmts
   // without knowing it in advance.
   while (!SubStmts.empty())
     WriteSubStmt(SubStmts.pop_back_val(), SubStmtEntries, ParentStmts);
@@ -1851,7 +1852,7 @@
 
   // We expect to be the only consumer of the two temporary statement maps,
   // assert that they are empty.
-  assert(SubStmtEntries.empty() && "unexpected entries in sub stmt map");
+  assert(SubStmtEntries.empty() && "unexpected entries in sub-stmt map");
   assert(ParentStmts.empty() && "unexpected entries in parent stmt map");
 
   for (unsigned I = 0, N = StmtsToEmit.size(); I != N; ++I) {
diff --git a/lib/Serialization/CMakeLists.txt b/lib/Serialization/CMakeLists.txt
index 3c68b64..d885db2 100644
--- a/lib/Serialization/CMakeLists.txt
+++ b/lib/Serialization/CMakeLists.txt
@@ -1,8 +1,10 @@
-set(LLVM_LINK_COMPONENTS bitreader)
+set(LLVM_LINK_COMPONENTS
+  BitReader
+  Support
+  )
+
 
 add_clang_library(clangSerialization
-  ASTCommon.h
-  ASTReaderInternals.h
   ASTCommon.cpp
   ASTReader.cpp
   ASTReaderDecl.cpp
@@ -14,23 +16,14 @@
   GlobalModuleIndex.cpp
   Module.cpp
   ModuleManager.cpp
-  )
 
-add_dependencies(clangSerialization
-  ClangAttrClasses
-  ClangAttrList
-  ClangAttrParsedAttrList
-  ClangAttrPCHRead
-  ClangAttrPCHWrite
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticLex
-  ClangDiagnosticSema
-  ClangDiagnosticSerialization
-  ClangStmtNodes
-  )
+  ADDITIONAL_HEADERS
+  ASTCommon.h
+  ASTReaderInternals.h
 
-target_link_libraries(clangSerialization
+  LINK_LIBS
+  clangAST
+  clangBasic
+  clangLex
   clangSema
   )
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index fb647b0..14b149f 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -82,9 +82,9 @@
 
   static std::pair<unsigned, unsigned>
   ReadKeyDataLength(const unsigned char*& d) {
-    using namespace clang::io;
-    unsigned KeyLen = ReadUnalignedLE16(d);
-    unsigned DataLen = ReadUnalignedLE16(d);
+    using namespace llvm::support;
+    unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
+    unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
     return std::make_pair(KeyLen, DataLen);
   }
 
@@ -101,11 +101,11 @@
   static data_type ReadData(const internal_key_type& k,
                             const unsigned char* d,
                             unsigned DataLen) {
-    using namespace clang::io;
+    using namespace llvm::support;
 
     data_type Result;
     while (DataLen > 0) {
-      unsigned ID = ReadUnalignedLE32(d);
+      unsigned ID = endian::readNext<uint32_t, little, unaligned>(d);
       Result.push_back(ID);
       DataLen -= 4;
     }
@@ -228,7 +228,7 @@
   IndexPath += Path;
   llvm::sys::path::append(IndexPath, IndexFileName);
 
-  llvm::OwningPtr<llvm::MemoryBuffer> Buffer;
+  std::unique_ptr<llvm::MemoryBuffer> Buffer;
   if (llvm::MemoryBuffer::getFile(IndexPath.c_str(), Buffer) !=
       llvm::errc::success)
     return std::make_pair((GlobalModuleIndex *)0, EC_NotFound);
@@ -247,8 +247,9 @@
       Cursor.Read(8) != 'I') {
     return std::make_pair((GlobalModuleIndex *)0, EC_IOError);
   }
-  
-  return std::make_pair(new GlobalModuleIndex(Buffer.take(), Cursor), EC_None);
+
+  return std::make_pair(new GlobalModuleIndex(Buffer.release(), Cursor),
+                        EC_None);
 }
 
 void
@@ -458,8 +459,8 @@
                        unsigned DataLen) {
       // The first bit indicates whether this identifier is interesting.
       // That's all we care about.
-      using namespace clang::io;
-      unsigned RawID = ReadUnalignedLE32(d);
+      using namespace llvm::support;
+      unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
       bool IsInteresting = RawID & 0x01;
       return std::make_pair(k, IsInteresting);
     }
@@ -468,7 +469,7 @@
 
 bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
   // Open the module file.
-  OwningPtr<llvm::MemoryBuffer> Buffer;
+  std::unique_ptr<llvm::MemoryBuffer> Buffer;
   std::string ErrorStr;
   Buffer.reset(FileMgr.getBufferForFile(File, &ErrorStr, /*isVolatile=*/true));
   if (!Buffer) {
@@ -592,10 +593,10 @@
     if (State == ASTBlock && Code == IDENTIFIER_TABLE && Record[0] > 0) {
       typedef OnDiskChainedHashTable<InterestingASTIdentifierLookupTrait>
         InterestingIdentifierTable;
-      llvm::OwningPtr<InterestingIdentifierTable>
-        Table(InterestingIdentifierTable::Create(
-                (const unsigned char *)Blob.data() + Record[0],
-                (const unsigned char *)Blob.data()));
+      std::unique_ptr<InterestingIdentifierTable> Table(
+          InterestingIdentifierTable::Create(
+              (const unsigned char *)Blob.data() + Record[0],
+              (const unsigned char *)Blob.data()));
       for (InterestingIdentifierTable::data_iterator D = Table->data_begin(),
                                                      DEnd = Table->data_end();
            D != DEnd; ++D) {
@@ -630,10 +631,12 @@
 
   std::pair<unsigned,unsigned>
   EmitKeyDataLength(raw_ostream& Out, key_type_ref Key, data_type_ref Data) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
     unsigned KeyLen = Key.size();
     unsigned DataLen = Data.size() * 4;
-    clang::io::Emit16(Out, KeyLen);
-    clang::io::Emit16(Out, DataLen);
+    LE.write<uint16_t>(KeyLen);
+    LE.write<uint16_t>(DataLen);
     return std::make_pair(KeyLen, DataLen);
   }
   
@@ -643,8 +646,9 @@
 
   void EmitData(raw_ostream& Out, key_type_ref Key, data_type_ref Data,
                 unsigned DataLen) {
+    using namespace llvm::support;
     for (unsigned I = 0, N = Data.size(); I != N; ++I)
-      clang::io::Emit32(Out, Data[I]);
+      endian::Writer<little>(Out).write<uint32_t>(Data[I]);
   }
 };
 
@@ -706,9 +710,10 @@
     SmallString<4096> IdentifierTable;
     uint32_t BucketOffset;
     {
+      using namespace llvm::support;
       llvm::raw_svector_ostream Out(IdentifierTable);
       // Make sure that no bucket is at offset 0
-      clang::io::Emit32(Out, 0);
+      endian::Writer<little>(Out).write<uint32_t>(0);
       BucketOffset = Generator.Emit(Out, Trait);
     }
 
@@ -807,13 +812,12 @@
     return EC_IOError;
 
   // Remove the old index file. It isn't relevant any more.
-  bool OldIndexExisted;
-  llvm::sys::fs::remove(IndexPath.str(), OldIndexExisted);
+  llvm::sys::fs::remove(IndexPath.str());
 
   // Rename the newly-written index file to the proper name.
   if (llvm::sys::fs::rename(IndexTmpPath.str(), IndexPath.str())) {
     // Rename failed; just remove the 
-    llvm::sys::fs::remove(IndexTmpPath.str(), OldIndexExisted);
+    llvm::sys::fs::remove(IndexTmpPath.str());
     return EC_IOError;
   }
 
@@ -835,7 +839,7 @@
       End = Idx.key_end();
     }
 
-    virtual StringRef Next() {
+    StringRef Next() override {
       if (Current == End)
         return StringRef();
 
diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp
index 9c4b3d9..3513eba 100644
--- a/lib/Serialization/ModuleManager.cpp
+++ b/lib/Serialization/ModuleManager.cpp
@@ -86,6 +86,16 @@
     NewModule = true;
     ModuleEntry = New;
 
+    New->InputFilesValidationTimestamp = 0;
+    if (New->Kind == MK_Module) {
+      std::string TimestampFilename = New->getTimestampFilename();
+      vfs::Status Status;
+      // A cached stat value would be fine as well.
+      if (!FileMgr.getNoncachedStatValue(TimestampFilename, Status))
+        New->InputFilesValidationTimestamp =
+            Status.getLastModificationTime().toEpochTime();
+    }
+
     // Load the contents of the module
     if (llvm::MemoryBuffer *Buffer = lookupBuffer(FileName)) {
       // The buffer was already provided for us.
@@ -124,22 +134,6 @@
   return NewModule? NewlyLoaded : AlreadyLoaded;
 }
 
-namespace {
-  /// \brief Predicate that checks whether a module file occurs within
-  /// the given set.
-  class IsInModuleFileSet : public std::unary_function<ModuleFile *, bool> {
-    llvm::SmallPtrSet<ModuleFile *, 4> &Removed;
-
-  public:
-    IsInModuleFileSet(llvm::SmallPtrSet<ModuleFile *, 4> &Removed)
-    : Removed(Removed) { }
-
-    bool operator()(ModuleFile *MF) const {
-      return Removed.count(MF);
-    }
-  };
-}
-
 void ModuleManager::removeModules(ModuleIterator first, ModuleIterator last,
                                   ModuleMap *modMap) {
   if (first == last)
@@ -149,9 +143,10 @@
   llvm::SmallPtrSet<ModuleFile *, 4> victimSet(first, last);
 
   // Remove any references to the now-destroyed modules.
-  IsInModuleFileSet checkInSet(victimSet);
   for (unsigned i = 0, n = Chain.size(); i != n; ++i) {
-    Chain[i]->ImportedBy.remove_if(checkInSet);
+    Chain[i]->ImportedBy.remove_if([&](ModuleFile *MF) {
+      return victimSet.count(MF);
+    });
   }
 
   // Delete the modules and erase them from the various structures.
diff --git a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index 9af0a5a..3becdca 100644
--- a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -112,7 +112,7 @@
       << " | Empty WorkList: "
       << (Eng.hasEmptyWorkList() ? "yes" : "no");
 
-  B.EmitBasicReport(D, "Analyzer Statistics", "Internal Statistics",
+  B.EmitBasicReport(D, this, "Analyzer Statistics", "Internal Statistics",
                     output.str(), PathDiagnosticLocation(D, SM));
 
   // Emit warning for each block we bailed out on.
@@ -129,7 +129,7 @@
       outputI << "(" << NameOfRootFunction << ")" <<
                  ": The analyzer generated a sink at this point";
       B.EmitBasicReport(
-          D, "Sink Point", "Internal Statistics", outputI.str(),
+          D, this, "Sink Point", "Internal Statistics", outputI.str(),
           PathDiagnosticLocation::createBegin(CS->getStmt(), SM, LC));
     }
   }
diff --git a/lib/StaticAnalyzer/Checkers/Android.mk b/lib/StaticAnalyzer/Checkers/Android.mk
index 902fc11..bb2a539 100644
--- a/lib/StaticAnalyzer/Checkers/Android.mk
+++ b/lib/StaticAnalyzer/Checkers/Android.mk
@@ -5,6 +5,7 @@
   AttrList.inc \
   AttrParsedAttrList.inc \
   Attrs.inc \
+  AttrVisitor.inc \
   Checkers.inc \
   CommentCommandList.inc \
   CommentNodes.inc \
@@ -41,7 +42,6 @@
   ExprInspectionChecker.cpp \
   FixedAddressChecker.cpp \
   GenericTaintChecker.cpp \
-  IdempotentOperationChecker.cpp \
   IdenticalExprChecker.cpp \
   IvarInvalidationChecker.cpp \
   LLVMConventionsChecker.cpp \
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 312bc74..cb5b010 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -25,7 +25,8 @@
 namespace {
 class ArrayBoundChecker : 
     public Checker<check::Location> {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
+
 public:
   void checkLocation(SVal l, bool isLoad, const Stmt* S,
                      CheckerContext &C) const;
@@ -66,8 +67,9 @@
       return;
   
     if (!BT)
-      BT.reset(new BuiltinBug("Out-of-bound array access",
-                       "Access out-of-bound array element (buffer overflow)"));
+      BT.reset(new BuiltinBug(
+          this, "Out-of-bound array access",
+          "Access out-of-bound array element (buffer overflow)"));
 
     // FIXME: It would be nice to eventually make this diagnostic more clear,
     // e.g., by referencing the original declaration or by saying *why* this
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 5e4b824..a8d7b3a 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -28,8 +28,8 @@
 namespace {
 class ArrayBoundCheckerV2 : 
     public Checker<check::Location> {
-  mutable OwningPtr<BuiltinBug> BT;
-      
+  mutable std::unique_ptr<BuiltinBug> BT;
+
   enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
   
   void reportOOB(CheckerContext &C, ProgramStateRef errorState,
@@ -120,7 +120,7 @@
       return;
     
     ProgramStateRef state_precedesLowerBound, state_withinLowerBound;
-    llvm::tie(state_precedesLowerBound, state_withinLowerBound) =
+    std::tie(state_precedesLowerBound, state_withinLowerBound) =
       state->assume(*lowerBoundToCheck);
 
     // Are we constrained enough to definitely precede the lower bound?
@@ -152,7 +152,7 @@
       break;
   
     ProgramStateRef state_exceedsUpperBound, state_withinUpperBound;
-    llvm::tie(state_exceedsUpperBound, state_withinUpperBound) =
+    std::tie(state_exceedsUpperBound, state_withinUpperBound) =
       state->assume(*upperboundToCheck);
 
     // If we are under constrained and the index variables are tainted, report.
@@ -187,7 +187,7 @@
     return;
 
   if (!BT)
-    BT.reset(new BuiltinBug("Out-of-bound access"));
+    BT.reset(new BuiltinBug(this, "Out-of-bound access"));
 
   // FIXME: This diagnostics are preliminary.  We should get far better
   // diagnostics for explaining buffer overruns.
@@ -311,7 +311,6 @@
   return RegionRawOffsetV2();
 }
 
-
 void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
   mgr.registerChecker<ArrayBoundCheckerV2>();
 }
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index f66f8b7..4b2ccd4 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -39,7 +39,8 @@
 namespace {
 class APIMisuse : public BugType {
 public:
-  APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
+  APIMisuse(const CheckerBase *checker, const char *name)
+      : BugType(checker, name, "API Misuse (Apple)") {}
 };
 } // end anonymous namespace
 
@@ -94,7 +95,7 @@
   class NilArgChecker : public Checker<check::PreObjCMessage,
                                        check::PostStmt<ObjCDictionaryLiteral>,
                                        check::PostStmt<ObjCArrayLiteral> > {
-    mutable OwningPtr<APIMisuse> BT;
+    mutable std::unique_ptr<APIMisuse> BT;
 
     void warnIfNilExpr(const Expr *E,
                        const char *Msg,
@@ -170,10 +171,13 @@
           assert(Arg == 1);
           os << "Key argument ";
         }
-        os << "to '" << msg.getSelector().getAsString() << "' cannot be nil";
+        os << "to '";
+        msg.getSelector().print(os);
+        os << "' cannot be nil";
       } else {
-        os << "Argument to '" << GetReceiverInterfaceName(msg) << "' method '"
-        << msg.getSelector().getAsString() << "' cannot be nil";
+        os << "Argument to '" << GetReceiverInterfaceName(msg) << "' method '";
+        msg.getSelector().print(os);
+        os << "' cannot be nil";
       }
     }
     
@@ -188,7 +192,7 @@
                                       const Expr *E,
                                       CheckerContext &C) const {
   if (!BT)
-    BT.reset(new APIMisuse("nil argument"));
+    BT.reset(new APIMisuse(this, "nil argument"));
 
   BugReport *R = new BugReport(*BT, Msg, N);
   R->addRange(Range);
@@ -309,7 +313,7 @@
 
 namespace {
 class CFNumberCreateChecker : public Checker< check::PreStmt<CallExpr> > {
-  mutable OwningPtr<APIMisuse> BT;
+  mutable std::unique_ptr<APIMisuse> BT;
   mutable IdentifierInfo* II;
 public:
   CFNumberCreateChecker() : II(0) {}
@@ -480,8 +484,8 @@
       << " bits of the input integer will be lost.";
 
     if (!BT)
-      BT.reset(new APIMisuse("Bad use of CFNumberCreate"));
-    
+      BT.reset(new APIMisuse(this, "Bad use of CFNumberCreate"));
+
     BugReport *report = new BugReport(*BT, os.str(), N);
     report->addRange(CE->getArg(2)->getSourceRange());
     C.emitReport(report);
@@ -494,7 +498,7 @@
 
 namespace {
 class CFRetainReleaseChecker : public Checker< check::PreStmt<CallExpr> > {
-  mutable OwningPtr<APIMisuse> BT;
+  mutable std::unique_ptr<APIMisuse> BT;
   mutable IdentifierInfo *Retain, *Release, *MakeCollectable;
 public:
   CFRetainReleaseChecker(): Retain(0), Release(0), MakeCollectable(0) {}
@@ -519,8 +523,8 @@
     Retain = &Ctx.Idents.get("CFRetain");
     Release = &Ctx.Idents.get("CFRelease");
     MakeCollectable = &Ctx.Idents.get("CFMakeCollectable");
-    BT.reset(
-      new APIMisuse("null passed to CFRetain/CFRelease/CFMakeCollectable"));
+    BT.reset(new APIMisuse(
+        this, "null passed to CFRetain/CFRelease/CFMakeCollectable"));
   }
 
   // Check if we called CFRetain/CFRelease/CFMakeCollectable.
@@ -548,7 +552,7 @@
 
   // Are they equal?
   ProgramStateRef stateTrue, stateFalse;
-  llvm::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
+  std::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
 
   if (stateTrue && !stateFalse) {
     ExplodedNode *N = C.generateSink(stateTrue);
@@ -586,7 +590,7 @@
   mutable Selector retainS;
   mutable Selector autoreleaseS;
   mutable Selector drainS;
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
 
 public:
   void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
@@ -597,9 +601,9 @@
                                               CheckerContext &C) const {
   
   if (!BT) {
-    BT.reset(new APIMisuse("message incorrectly sent to class instead of class "
-                           "instance"));
-  
+    BT.reset(new APIMisuse(
+        this, "message incorrectly sent to class instead of class instance"));
+
     ASTContext &Ctx = C.getASTContext();
     releaseS = GetNullarySelector("release", Ctx);
     retainS = GetNullarySelector("retain", Ctx);
@@ -620,7 +624,9 @@
     SmallString<200> buf;
     llvm::raw_svector_ostream os(buf);
 
-    os << "The '" << S.getAsString() << "' message should be sent to instances "
+    os << "The '";
+    S.print(os);
+    os << "' message should be sent to instances "
           "of class '" << Class->getName()
        << "' and not the class directly";
   
@@ -643,7 +649,7 @@
   mutable Selector orderedSetWithObjectsS;
   mutable Selector initWithObjectsS;
   mutable Selector initWithObjectsAndKeysS;
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
 
   bool isVariadicMessage(const ObjCMethodCall &msg) const;
 
@@ -703,7 +709,8 @@
 void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
                                                     CheckerContext &C) const {
   if (!BT) {
-    BT.reset(new APIMisuse("Arguments passed to variadic method aren't all "
+    BT.reset(new APIMisuse(this,
+                           "Arguments passed to variadic method aren't all "
                            "Objective-C pointer types"));
 
     ASTContext &Ctx = C.getASTContext();
@@ -733,8 +740,7 @@
 
   // Verify that all arguments have Objective-C types.
   Optional<ExplodedNode*> errorNode;
-  ProgramStateRef state = C.getState();
-  
+
   for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
     QualType ArgTy = msg.getArgExpr(I)->getType();
     if (ArgTy->isObjCObjectPointerType())
@@ -772,8 +778,8 @@
     else
       os << "Argument to method '";
 
-    os << msg.getSelector().getAsString() 
-       << "' should be an Objective-C pointer type, not '";
+    msg.getSelector().print(os);
+    os << "' should be an Objective-C pointer type, not '";
     ArgTy.print(os, C.getLangOpts());
     os << "'";
 
@@ -852,7 +858,7 @@
     return State;
 
   ProgramStateRef StNonNil, StNil;
-  llvm::tie(StNonNil, StNil) = State->assume(*KnownCollection);
+  std::tie(StNonNil, StNil) = State->assume(*KnownCollection);
   if (StNil && !StNonNil) {
     // The collection is nil. This path is infeasible.
     return NULL;
@@ -1135,7 +1141,10 @@
 /// \brief The checker restricts the return values of APIs known to
 /// never (or almost never) return 'nil'.
 class ObjCNonNilReturnValueChecker
-  : public Checker<check::PostObjCMessage> {
+  : public Checker<check::PostObjCMessage,
+                   check::PostStmt<ObjCArrayLiteral>,
+                   check::PostStmt<ObjCDictionaryLiteral>,
+                   check::PostStmt<ObjCBoxedExpr> > {
     mutable bool Initialized;
     mutable Selector ObjectAtIndex;
     mutable Selector ObjectAtIndexedSubscript;
@@ -1143,13 +1152,32 @@
 
 public:
   ObjCNonNilReturnValueChecker() : Initialized(false) {}
+
+  ProgramStateRef assumeExprIsNonNull(const Expr *NonNullExpr,
+                                      ProgramStateRef State,
+                                      CheckerContext &C) const;
+  void assumeExprIsNonNull(const Expr *E, CheckerContext &C) const {
+    C.addTransition(assumeExprIsNonNull(E, C.getState(), C));
+  }
+
+  void checkPostStmt(const ObjCArrayLiteral *E, CheckerContext &C) const {
+    assumeExprIsNonNull(E, C);
+  }
+  void checkPostStmt(const ObjCDictionaryLiteral *E, CheckerContext &C) const {
+    assumeExprIsNonNull(E, C);
+  }
+  void checkPostStmt(const ObjCBoxedExpr *E, CheckerContext &C) const {
+    assumeExprIsNonNull(E, C);
+  }
+
   void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
 };
 }
 
-static ProgramStateRef assumeExprIsNonNull(const Expr *NonNullExpr,
-                                           ProgramStateRef State,
-                                           CheckerContext &C) {
+ProgramStateRef
+ObjCNonNilReturnValueChecker::assumeExprIsNonNull(const Expr *NonNullExpr,
+                                                  ProgramStateRef State,
+                                                  CheckerContext &C) const {
   SVal Val = State->getSVal(NonNullExpr, C.getLocationContext());
   if (Optional<DefinedOrUnknownSVal> DV = Val.getAs<DefinedOrUnknownSVal>())
     return State->assume(*DV, true);
@@ -1237,6 +1265,7 @@
   mgr.registerChecker<ObjCLoopChecker>();
 }
 
-void ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
+void
+ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
   mgr.registerChecker<ObjCNonNilReturnValueChecker>();
 }
diff --git a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index 5169244..83a37c9 100644
--- a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -23,7 +23,7 @@
 
 namespace {
   class BoolAssignmentChecker : public Checker< check::Bind > {
-    mutable OwningPtr<BuiltinBug> BT;
+    mutable std::unique_ptr<BuiltinBug> BT;
     void emitReport(ProgramStateRef state, CheckerContext &C) const;
   public:
     void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
@@ -34,7 +34,7 @@
                                        CheckerContext &C) const {
   if (ExplodedNode *N = C.addTransition(state)) {
     if (!BT)
-      BT.reset(new BuiltinBug("Assignment of a non-Boolean value"));    
+      BT.reset(new BuiltinBug(this, "Assignment of a non-Boolean value"));
     C.emitReport(new BugReport(*BT, BT->getDescription(), N));
   }
 }
@@ -96,7 +96,7 @@
   }
   
   ProgramStateRef stateLT, stateGE;
-  llvm::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
+  std::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
   
   // Is it possible for the value to be less than zero?
   if (stateLT) {
@@ -132,7 +132,7 @@
   }
   
   ProgramStateRef stateGT, stateLE;
-  llvm::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
+  std::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
   
   // Is it possible for the value to be greater than one?
   if (stateGT) {
diff --git a/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
index ebd3377..8e7a839 100644
--- a/lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -3,6 +3,10 @@
   SOURCE Checkers.td
   TARGET ClangSACheckers)
 
+set(LLVM_LINK_COMPONENTS
+  Support
+  )
+
 add_clang_library(clangStaticAnalyzerCheckers
   AllocationDiagnostics.cpp
   AnalyzerStatsChecker.cpp
@@ -32,7 +36,6 @@
   ExprInspectionChecker.cpp
   FixedAddressChecker.cpp
   GenericTaintChecker.cpp
-  IdempotentOperationChecker.cpp
   IdenticalExprChecker.cpp
   IvarInvalidationChecker.cpp
   LLVMConventionsChecker.cpp
@@ -71,21 +74,13 @@
   UnreachableCodeChecker.cpp
   VLASizeChecker.cpp
   VirtualCallChecker.cpp
-  )
 
-add_dependencies(clangStaticAnalyzerCheckers
-  clangStaticAnalyzerCore
-  ClangAttrClasses
-  ClangAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangStmtNodes
+  DEPENDS
   ClangSACheckers
-  )
 
-target_link_libraries(clangStaticAnalyzerCheckers
-  clangBasic
+  LINK_LIBS
   clangAST
+  clangAnalysis
+  clangBasic
   clangStaticAnalyzerCore
   )
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index c3736d7..d7c1d94 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -35,11 +35,8 @@
                                          check::DeadSymbols,
                                          check::RegionChanges
                                          > {
-  mutable OwningPtr<BugType> BT_Null,
-                             BT_Bounds,
-                             BT_Overlap,
-                             BT_NotCString,
-                             BT_AdditionOverflow;
+  mutable std::unique_ptr<BugType> BT_Null, BT_Bounds, BT_Overlap,
+      BT_NotCString, BT_AdditionOverflow;
 
   mutable const char *CurrentFunctionDescription;
 
@@ -51,6 +48,11 @@
     DefaultBool CheckCStringOutOfBounds;
     DefaultBool CheckCStringBufferOverlap;
     DefaultBool CheckCStringNotNullTerm;
+
+    CheckName CheckNameCStringNullArg;
+    CheckName CheckNameCStringOutOfBounds;
+    CheckName CheckNameCStringBufferOverlap;
+    CheckName CheckNameCStringNotNullTerm;
   };
 
   CStringChecksFilter Filter;
@@ -221,7 +223,7 @@
     return NULL;
 
   ProgramStateRef stateNull, stateNonNull;
-  llvm::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
+  std::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
 
   if (stateNull && !stateNonNull) {
     if (!Filter.CheckCStringNullArg)
@@ -232,8 +234,9 @@
       return NULL;
 
     if (!BT_Null)
-      BT_Null.reset(new BuiltinBug(categories::UnixAPI,
-        "Null pointer argument in call to byte string function"));
+      BT_Null.reset(new BuiltinBug(
+          Filter.CheckNameCStringNullArg, categories::UnixAPI,
+          "Null pointer argument in call to byte string function"));
 
     SmallString<80> buf;
     llvm::raw_svector_ostream os(buf);
@@ -294,8 +297,9 @@
       return NULL;
 
     if (!BT_Bounds) {
-      BT_Bounds.reset(new BuiltinBug("Out-of-bound array access",
-        "Byte string function accesses out-of-bound array element"));
+      BT_Bounds.reset(new BuiltinBug(
+          Filter.CheckNameCStringOutOfBounds, "Out-of-bound array access",
+          "Byte string function accesses out-of-bound array element"));
     }
     BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds.get());
 
@@ -439,7 +443,7 @@
 
   // Are the two values the same?
   SValBuilder &svalBuilder = C.getSValBuilder();  
-  llvm::tie(stateTrue, stateFalse) =
+  std::tie(stateTrue, stateFalse) =
     state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
 
   if (stateTrue && !stateFalse) {
@@ -461,7 +465,7 @@
   if (!reverseTest)
     return state;
 
-  llvm::tie(stateTrue, stateFalse) = state->assume(*reverseTest);
+  std::tie(stateTrue, stateFalse) = state->assume(*reverseTest);
   if (stateTrue) {
     if (stateFalse) {
       // If we don't know which one comes first, we can't perform this test.
@@ -506,7 +510,7 @@
   if (!OverlapTest)
     return state;
 
-  llvm::tie(stateTrue, stateFalse) = state->assume(*OverlapTest);
+  std::tie(stateTrue, stateFalse) = state->assume(*OverlapTest);
 
   if (stateTrue && !stateFalse) {
     // Overlap!
@@ -526,7 +530,8 @@
     return;
 
   if (!BT_Overlap)
-    BT_Overlap.reset(new BugType(categories::UnixAPI, "Improper arguments"));
+    BT_Overlap.reset(new BugType(Filter.CheckNameCStringBufferOverlap,
+                                 categories::UnixAPI, "Improper arguments"));
 
   // Generate a report for this bug.
   BugReport *report = 
@@ -576,7 +581,7 @@
                                                 *maxMinusRightNL, cmpTy);
 
     ProgramStateRef stateOverflow, stateOkay;
-    llvm::tie(stateOverflow, stateOkay) =
+    std::tie(stateOverflow, stateOkay) =
       state->assume(willOverflow.castAs<DefinedOrUnknownSVal>());
 
     if (stateOverflow && !stateOkay) {
@@ -586,8 +591,9 @@
         return NULL;
 
       if (!BT_AdditionOverflow)
-        BT_AdditionOverflow.reset(new BuiltinBug("API",
-          "Sum of expressions causes overflow"));
+        BT_AdditionOverflow.reset(
+            new BuiltinBug(Filter.CheckNameCStringOutOfBounds, "API",
+                           "Sum of expressions causes overflow"));
 
       // This isn't a great error message, but this should never occur in real
       // code anyway -- you'd have to create a buffer longer than a size_t can
@@ -703,8 +709,9 @@
 
       if (ExplodedNode *N = C.addTransition(state)) {
         if (!BT_NotCString)
-          BT_NotCString.reset(new BuiltinBug(categories::UnixAPI,
-            "Argument is not a null-terminated string."));
+          BT_NotCString.reset(new BuiltinBug(
+              Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
+              "Argument is not a null-terminated string."));
 
         SmallString<120> buf;
         llvm::raw_svector_ostream os(buf);
@@ -714,8 +721,7 @@
            << "', which is not a null-terminated string";
 
         // Generate a report for this bug.
-        BugReport *report = new BugReport(*BT_NotCString,
-                                                          os.str(), N);
+        BugReport *report = new BugReport(*BT_NotCString, os.str(), N);
 
         report->addRange(Ex->getSourceRange());
         C.emitReport(report);        
@@ -763,8 +769,9 @@
 
     if (ExplodedNode *N = C.addTransition(state)) {
       if (!BT_NotCString)
-        BT_NotCString.reset(new BuiltinBug(categories::UnixAPI,
-          "Argument is not a null-terminated string."));
+        BT_NotCString.reset(new BuiltinBug(
+            Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
+            "Argument is not a null-terminated string."));
 
       SmallString<120> buf;
       llvm::raw_svector_ostream os(buf);
@@ -909,7 +916,7 @@
   QualType sizeTy = Size->getType();
 
   ProgramStateRef stateZeroSize, stateNonZeroSize;
-  llvm::tie(stateZeroSize, stateNonZeroSize) =
+  std::tie(stateZeroSize, stateNonZeroSize) =
     assumeZero(C, state, sizeVal, sizeTy);
 
   // Get the value of the Dest.
@@ -1066,7 +1073,7 @@
   QualType sizeTy = Size->getType();
 
   ProgramStateRef stateZeroSize, stateNonZeroSize;
-  llvm::tie(stateZeroSize, stateNonZeroSize) =
+  std::tie(stateZeroSize, stateNonZeroSize) =
     assumeZero(C, state, sizeVal, sizeTy);
 
   // If the size can be zero, the result will be 0 in that case, and we don't
@@ -1092,7 +1099,7 @@
     // See if they are the same.
     DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
     ProgramStateRef StSameBuf, StNotSameBuf;
-    llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+    std::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
 
     // If the two arguments might be the same buffer, we know the result is 0,
     // and we only need to check one size.
@@ -1150,7 +1157,7 @@
     SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
 
     ProgramStateRef stateZeroSize, stateNonZeroSize;
-    llvm::tie(stateZeroSize, stateNonZeroSize) =
+    std::tie(stateZeroSize, stateNonZeroSize) =
       assumeZero(C, state, maxlenVal, maxlenExpr->getType());
 
     // If the size can be zero, the result will be 0 in that case, and we don't
@@ -1204,10 +1211,10 @@
       ProgramStateRef stateStringTooLong, stateStringNotTooLong;
 
       // Check if the strLength is greater than the maxlen.
-      llvm::tie(stateStringTooLong, stateStringNotTooLong) =
-          state->assume(C.getSValBuilder().evalBinOpNN(
-              state, BO_GT, *strLengthNL, *maxlenValNL, cmpTy)
-                            .castAs<DefinedOrUnknownSVal>());
+      std::tie(stateStringTooLong, stateStringNotTooLong) = state->assume(
+          C.getSValBuilder()
+              .evalBinOpNN(state, BO_GT, *strLengthNL, *maxlenValNL, cmpTy)
+              .castAs<DefinedOrUnknownSVal>());
 
       if (stateStringTooLong && !stateStringNotTooLong) {
         // If the string is longer than maxlen, return maxlen.
@@ -1371,7 +1378,7 @@
       // Check if the max number to copy is less than the length of the src.
       // If the bound is equal to the source length, strncpy won't null-
       // terminate the result!
-      llvm::tie(stateSourceTooLong, stateSourceNotTooLong) = state->assume(
+      std::tie(stateSourceTooLong, stateSourceNotTooLong) = state->assume(
           svalBuilder.evalBinOpNN(state, BO_GE, *strLengthNL, *lenValNL, cmpTy)
               .castAs<DefinedOrUnknownSVal>());
 
@@ -1418,7 +1425,7 @@
         // case strncpy will do no work at all. Our bounds check uses n-1
         // as the last element accessed, so n == 0 is problematic.
         ProgramStateRef StateZeroSize, StateNonZeroSize;
-        llvm::tie(StateZeroSize, StateNonZeroSize) =
+        std::tie(StateZeroSize, StateNonZeroSize) =
           assumeZero(C, state, *lenValNL, sizeTy);
 
         // If the size is known to be zero, we're done.
@@ -1711,7 +1718,7 @@
   SValBuilder &svalBuilder = C.getSValBuilder();
   DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
   ProgramStateRef StSameBuf, StNotSameBuf;
-  llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+  std::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
 
   // If the two arguments might be the same buffer, we know the result is 0,
   // and we only need to check one size.
@@ -1928,9 +1935,8 @@
   // Record string length for char a[] = "abc";
   ProgramStateRef state = C.getState();
 
-  for (DeclStmt::const_decl_iterator I = DS->decl_begin(), E = DS->decl_end();
-       I != E; ++I) {
-    const VarDecl *D = dyn_cast<VarDecl>(*I);
+  for (const auto *I : DS->decls()) {
+    const VarDecl *D = dyn_cast<VarDecl>(I);
     if (!D)
       continue;
 
@@ -2057,10 +2063,12 @@
   C.addTransition(state);
 }
 
-#define REGISTER_CHECKER(name) \
-void ento::register##name(CheckerManager &mgr) {\
-  mgr.registerChecker<CStringChecker>()->Filter.Check##name = true; \
-}
+#define REGISTER_CHECKER(name)                                                 \
+  void ento::register##name(CheckerManager &mgr) {                             \
+    CStringChecker *checker = mgr.registerChecker<CStringChecker>();           \
+    checker->Filter.Check##name = true;                                        \
+    checker->Filter.CheckName##name = mgr.getCurrentCheckName();               \
+  }
 
 REGISTER_CHECKER(CStringNullArg)
 REGISTER_CHECKER(CStringOutOfBounds)
diff --git a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
index d29a12a..abfb971 100644
--- a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -31,6 +31,7 @@
 
 namespace {
 class WalkAST: public StmtVisitor<WalkAST> {
+  const CheckerBase *Checker;
   BugReporter &BR;
   AnalysisDeclContext* AC;
 
@@ -81,9 +82,8 @@
   bool containsBadStrncatPattern(const CallExpr *CE);
 
 public:
-  WalkAST(BugReporter &br, AnalysisDeclContext* ac) :
-      BR(br), AC(ac) {
-  }
+  WalkAST(const CheckerBase *checker, BugReporter &br, AnalysisDeclContext *ac)
+      : Checker(checker), BR(br), AC(ac) {}
 
   // Statement visitor methods.
   void VisitChildren(Stmt *S);
@@ -157,8 +157,9 @@
         os << "U";
       os << "se a safer 'strlcat' API";
 
-      BR.EmitBasicReport(FD, "Anti-pattern in the argument", "C String API",
-                         os.str(), Loc, LenArg->getSourceRange());
+      BR.EmitBasicReport(FD, Checker, "Anti-pattern in the argument",
+                         "C String API", os.str(), Loc,
+                         LenArg->getSourceRange());
     }
   }
 
@@ -179,7 +180,7 @@
 
   void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
       BugReporter &BR) const {
-    WalkAST walker(BR, Mgr.getAnalysisDeclContext(D));
+    WalkAST walker(this, BR, Mgr.getAnalysisDeclContext(D));
     walker.Visit(D->getBody());
   }
 };
diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index fefcbe7..907f516 100644
--- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -27,24 +27,35 @@
 using namespace ento;
 
 namespace {
+
+struct ChecksFilter {
+  DefaultBool Check_CallAndMessageUnInitRefArg;
+  DefaultBool Check_CallAndMessageChecker;
+
+  CheckName CheckName_CallAndMessageUnInitRefArg;
+  CheckName CheckName_CallAndMessageChecker;
+};
+
 class CallAndMessageChecker
   : public Checker< check::PreStmt<CallExpr>,
                     check::PreStmt<CXXDeleteExpr>,
                     check::PreObjCMessage,
                     check::PreCall > {
-  mutable OwningPtr<BugType> BT_call_null;
-  mutable OwningPtr<BugType> BT_call_undef;
-  mutable OwningPtr<BugType> BT_cxx_call_null;
-  mutable OwningPtr<BugType> BT_cxx_call_undef;
-  mutable OwningPtr<BugType> BT_call_arg;
-  mutable OwningPtr<BugType> BT_cxx_delete_undef;
-  mutable OwningPtr<BugType> BT_msg_undef;
-  mutable OwningPtr<BugType> BT_objc_prop_undef;
-  mutable OwningPtr<BugType> BT_objc_subscript_undef;
-  mutable OwningPtr<BugType> BT_msg_arg;
-  mutable OwningPtr<BugType> BT_msg_ret;
-  mutable OwningPtr<BugType> BT_call_few_args;
+  mutable std::unique_ptr<BugType> BT_call_null;
+  mutable std::unique_ptr<BugType> BT_call_undef;
+  mutable std::unique_ptr<BugType> BT_cxx_call_null;
+  mutable std::unique_ptr<BugType> BT_cxx_call_undef;
+  mutable std::unique_ptr<BugType> BT_call_arg;
+  mutable std::unique_ptr<BugType> BT_cxx_delete_undef;
+  mutable std::unique_ptr<BugType> BT_msg_undef;
+  mutable std::unique_ptr<BugType> BT_objc_prop_undef;
+  mutable std::unique_ptr<BugType> BT_objc_subscript_undef;
+  mutable std::unique_ptr<BugType> BT_msg_arg;
+  mutable std::unique_ptr<BugType> BT_msg_ret;
+  mutable std::unique_ptr<BugType> BT_call_few_args;
+
 public:
+  ChecksFilter Filter;
 
   void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
   void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
@@ -52,10 +63,11 @@
   void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
 
 private:
-  static bool PreVisitProcessArg(CheckerContext &C, SVal V,
-                                 SourceRange argRange, const Expr *argEx,
-                                 bool IsFirstArgument, bool checkUninitFields,
-                                 const CallEvent &Call, OwningPtr<BugType> &BT);
+  bool PreVisitProcessArg(CheckerContext &C, SVal V, SourceRange ArgRange,
+                          const Expr *ArgEx, bool IsFirstArgument,
+                          bool CheckUninitFields, const CallEvent &Call,
+                          std::unique_ptr<BugType> &BT,
+                          const ParmVarDecl *ParamDecl) const;
 
   static void emitBadCall(BugType *BT, CheckerContext &C, const Expr *BadE);
   void emitNilReceiverBug(CheckerContext &C, const ObjCMethodCall &msg,
@@ -65,10 +77,14 @@
                          ProgramStateRef state,
                          const ObjCMethodCall &msg) const;
 
-  static void LazyInit_BT(const char *desc, OwningPtr<BugType> &BT) {
+  void LazyInit_BT(const char *desc, std::unique_ptr<BugType> &BT) const {
     if (!BT)
-      BT.reset(new BuiltinBug(desc));
+      BT.reset(new BuiltinBug(this, desc));
   }
+  bool uninitRefOrPointer(CheckerContext &C, const SVal &V,
+                          const SourceRange &ArgRange,
+                          const Expr *ArgEx, std::unique_ptr<BugType> &BT,
+                          const ParmVarDecl *ParamDecl, const char *BD) const;
 };
 } // end anonymous namespace
 
@@ -113,30 +129,86 @@
   }
 }
 
+bool CallAndMessageChecker::uninitRefOrPointer(CheckerContext &C,
+                                               const SVal &V,
+                                               const SourceRange &ArgRange,
+                                               const Expr *ArgEx,
+                                               std::unique_ptr<BugType> &BT,
+                                               const ParmVarDecl *ParamDecl,
+                                               const char *BD) const {
+  if (!Filter.Check_CallAndMessageUnInitRefArg)
+    return false;
+
+  // No parameter declaration available, i.e. variadic function argument.
+  if(!ParamDecl)
+    return false;
+
+  // If parameter is declared as pointer to const in function declaration,
+  // then check if corresponding argument in function call is
+  // pointing to undefined symbol value (uninitialized memory).
+  StringRef Message;
+
+  if (ParamDecl->getType()->isPointerType()) {
+    Message = "Function call argument is a pointer to uninitialized value";
+  } else if (ParamDecl->getType()->isReferenceType()) {
+    Message = "Function call argument is an uninitialized value";
+  } else
+    return false;
+
+  if(!ParamDecl->getType()->getPointeeType().isConstQualified())
+    return false;
+
+  if (const MemRegion *SValMemRegion = V.getAsRegion()) {
+    const ProgramStateRef State = C.getState();
+    const SVal PSV = State->getSVal(SValMemRegion);
+    if (PSV.isUndef()) {
+      if (ExplodedNode *N = C.generateSink()) {
+        LazyInit_BT(BD, BT);
+        BugReport *R = new BugReport(*BT, Message, N);
+        R->addRange(ArgRange);
+        if (ArgEx) {
+          bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
+        }
+        C.emitReport(R);
+      }
+      return true;
+    }
+  }
+  return false;
+}
+
 bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
-                                               SVal V, SourceRange argRange,
-                                               const Expr *argEx,
+                                               SVal V,
+                                               SourceRange ArgRange,
+                                               const Expr *ArgEx,
                                                bool IsFirstArgument,
-                                               bool checkUninitFields,
+                                               bool CheckUninitFields,
                                                const CallEvent &Call,
-                                               OwningPtr<BugType> &BT) {
+                                               std::unique_ptr<BugType> &BT,
+                                               const ParmVarDecl *ParamDecl
+                                               ) const {
+  const char *BD = "Uninitialized argument value";
+
+  if (uninitRefOrPointer(C, V, ArgRange, ArgEx, BT, ParamDecl, BD))
+    return true;
+
   if (V.isUndef()) {
     if (ExplodedNode *N = C.generateSink()) {
-      LazyInit_BT("Uninitialized argument value", BT);
+      LazyInit_BT(BD, BT);
 
       // Generate a report for this bug.
-      StringRef Desc = describeUninitializedArgumentInCall(Call,
-                                                           IsFirstArgument);
+      StringRef Desc =
+          describeUninitializedArgumentInCall(Call, IsFirstArgument);
       BugReport *R = new BugReport(*BT, Desc, N);
-      R->addRange(argRange);
-      if (argEx)
-        bugreporter::trackNullOrUndefValue(N, argEx, *R);
+      R->addRange(ArgRange);
+      if (ArgEx)
+        bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
       C.emitReport(R);
     }
     return true;
   }
 
-  if (!checkUninitFields)
+  if (!CheckUninitFields)
     return false;
 
   if (Optional<nonloc::LazyCompoundVal> LV =
@@ -159,10 +231,9 @@
         if (const RecordType *RT = T->getAsStructureType()) {
           const RecordDecl *RD = RT->getDecl()->getDefinition();
           assert(RD && "Referred record has no definition");
-          for (RecordDecl::field_iterator I =
-               RD->field_begin(), E = RD->field_end(); I!=E; ++I) {
-            const FieldRegion *FR = MrMgr.getFieldRegion(*I, R);
-            FieldChain.push_back(*I);
+          for (const auto *I : RD->fields()) {
+            const FieldRegion *FR = MrMgr.getFieldRegion(I, R);
+            FieldChain.push_back(I);
             T = I->getType();
             if (T->getAsStructureType()) {
               if (Find(FR))
@@ -188,7 +259,7 @@
 
     if (F.Find(D->getRegion())) {
       if (ExplodedNode *N = C.generateSink()) {
-        LazyInit_BT("Uninitialized argument value", BT);
+        LazyInit_BT(BD, BT);
         SmallString<512> Str;
         llvm::raw_svector_ostream os(Str);
         os << "Passed-by-value struct argument contains uninitialized data";
@@ -211,7 +282,7 @@
 
         // Generate a report for this bug.
         BugReport *R = new BugReport(*BT, os.str(), N);
-        R->addRange(argRange);
+        R->addRange(ArgRange);
 
         // FIXME: enhance track back for uninitialized value for arbitrary
         // memregions
@@ -234,20 +305,19 @@
 
   if (L.isUndef()) {
     if (!BT_call_undef)
-      BT_call_undef.reset(new BuiltinBug("Called function pointer is an "
-                                         "uninitalized pointer value"));
+      BT_call_undef.reset(new BuiltinBug(
+          this, "Called function pointer is an uninitalized pointer value"));
     emitBadCall(BT_call_undef.get(), C, Callee);
     return;
   }
 
   ProgramStateRef StNonNull, StNull;
-  llvm::tie(StNonNull, StNull) =
-      State->assume(L.castAs<DefinedOrUnknownSVal>());
+  std::tie(StNonNull, StNull) = State->assume(L.castAs<DefinedOrUnknownSVal>());
 
   if (StNull && !StNonNull) {
     if (!BT_call_null)
-      BT_call_null.reset(
-        new BuiltinBug("Called function pointer is null (null dereference)"));
+      BT_call_null.reset(new BuiltinBug(
+          this, "Called function pointer is null (null dereference)"));
     emitBadCall(BT_call_null.get(), C, Callee);
     return;
   }
@@ -265,7 +335,8 @@
     if (!N)
       return;
     if (!BT_cxx_delete_undef)
-      BT_cxx_delete_undef.reset(new BuiltinBug("Uninitialized argument value"));
+      BT_cxx_delete_undef.reset(
+          new BuiltinBug(this, "Uninitialized argument value"));
     if (DE->isArrayFormAsWritten())
       Desc = "Argument to 'delete[]' is uninitialized";
     else
@@ -289,20 +360,20 @@
     SVal V = CC->getCXXThisVal();
     if (V.isUndef()) {
       if (!BT_cxx_call_undef)
-        BT_cxx_call_undef.reset(new BuiltinBug("Called C++ object pointer is "
-                                               "uninitialized"));
+        BT_cxx_call_undef.reset(
+            new BuiltinBug(this, "Called C++ object pointer is uninitialized"));
       emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
       return;
     }
 
     ProgramStateRef StNonNull, StNull;
-    llvm::tie(StNonNull, StNull) =
+    std::tie(StNonNull, StNull) =
         State->assume(V.castAs<DefinedOrUnknownSVal>());
 
     if (StNull && !StNonNull) {
       if (!BT_cxx_call_null)
-        BT_cxx_call_null.reset(new BuiltinBug("Called C++ object pointer "
-                                              "is null"));
+        BT_cxx_call_null.reset(
+            new BuiltinBug(this, "Called C++ object pointer is null"));
       emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
       return;
     }
@@ -311,7 +382,8 @@
   }
 
   const Decl *D = Call.getDecl();
-  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+  if (FD) {
     // If we have a declaration, we can make sure we pass enough parameters to
     // the function.
     unsigned Params = FD->getNumParams();
@@ -340,17 +412,21 @@
   const bool checkUninitFields =
     !(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
 
-  OwningPtr<BugType> *BT;
+  std::unique_ptr<BugType> *BT;
   if (isa<ObjCMethodCall>(Call))
     BT = &BT_msg_arg;
   else
     BT = &BT_call_arg;
 
-  for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i)
+  for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i) {
+    const ParmVarDecl *ParamDecl = NULL;
+    if(FD && i < FD->getNumParams())
+      ParamDecl = FD->getParamDecl(i);
     if (PreVisitProcessArg(C, Call.getArgSVal(i), Call.getArgSourceRange(i),
                            Call.getArgExpr(i), /*IsFirstArgument=*/i == 0,
-                           checkUninitFields, Call, *BT))
+                           checkUninitFields, Call, *BT, ParamDecl))
       return;
+  }
 
   // If we make it here, record our assumptions about the callee.
   C.addTransition(State);
@@ -365,22 +441,21 @@
       switch (msg.getMessageKind()) {
       case OCM_Message:
         if (!BT_msg_undef)
-          BT_msg_undef.reset(new BuiltinBug("Receiver in message expression "
+          BT_msg_undef.reset(new BuiltinBug(this,
+                                            "Receiver in message expression "
                                             "is an uninitialized value"));
         BT = BT_msg_undef.get();
         break;
       case OCM_PropertyAccess:
         if (!BT_objc_prop_undef)
-          BT_objc_prop_undef.reset(new BuiltinBug("Property access on an "
-                                                  "uninitialized object "
-                                                  "pointer"));
+          BT_objc_prop_undef.reset(new BuiltinBug(
+              this, "Property access on an uninitialized object pointer"));
         BT = BT_objc_prop_undef.get();
         break;
       case OCM_Subscript:
         if (!BT_objc_subscript_undef)
-          BT_objc_subscript_undef.reset(new BuiltinBug("Subscript access on an "
-                                                       "uninitialized object "
-                                                       "pointer"));
+          BT_objc_subscript_undef.reset(new BuiltinBug(
+              this, "Subscript access on an uninitialized object pointer"));
         BT = BT_objc_subscript_undef.get();
         break;
       }
@@ -402,7 +477,7 @@
 
     ProgramStateRef state = C.getState();
     ProgramStateRef notNilState, nilState;
-    llvm::tie(notNilState, nilState) = state->assume(receiverVal);
+    std::tie(notNilState, nilState) = state->assume(receiverVal);
 
     // Handle receiver must be nil.
     if (nilState && !notNilState) {
@@ -418,7 +493,7 @@
 
   if (!BT_msg_ret)
     BT_msg_ret.reset(
-      new BuiltinBug("Receiver in message expression is 'nil'"));
+        new BuiltinBug(this, "Receiver in message expression is 'nil'"));
 
   const ObjCMessageExpr *ME = msg.getOriginExpr();
 
@@ -426,8 +501,9 @@
 
   SmallString<200> buf;
   llvm::raw_svector_ostream os(buf);
-  os << "The receiver of message '" << ME->getSelector().getAsString()
-     << "' is nil";
+  os << "The receiver of message '";
+  ME->getSelector().print(os);
+  os << "' is nil";
   if (ResTy->isReferenceType()) {
     os << ", which results in forming a null reference";
   } else {
@@ -454,7 +530,7 @@
                                               ProgramStateRef state,
                                               const ObjCMethodCall &Msg) const {
   ASTContext &Ctx = C.getASTContext();
-  static SimpleProgramPointTag Tag("CallAndMessageChecker : NilReceiver");
+  static CheckerProgramPointTag Tag(this, "NilReceiver");
 
   // Check the return type of the message expression.  A message to nil will
   // return different values depending on the return type and the architecture.
@@ -510,6 +586,13 @@
   C.addTransition(state);
 }
 
-void ento::registerCallAndMessageChecker(CheckerManager &mgr) {
-  mgr.registerChecker<CallAndMessageChecker>();
-}
+#define REGISTER_CHECKER(name)                                                 \
+  void ento::register##name(CheckerManager &mgr) {                             \
+    CallAndMessageChecker *Checker =                                           \
+        mgr.registerChecker<CallAndMessageChecker>();                          \
+    Checker->Filter.Check_##name = true;                                       \
+    Checker->Filter.CheckName_##name = mgr.getCurrentCheckName();              \
+  }
+
+REGISTER_CHECKER(CallAndMessageUnInitRefArg)
+REGISTER_CHECKER(CallAndMessageChecker)
diff --git a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 5e6e105..e9adf30 100644
--- a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -23,12 +23,71 @@
 
 namespace {
 class CastSizeChecker : public Checker< check::PreStmt<CastExpr> > {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
+
 public:
   void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
 };
 }
 
+/// Check if we are casting to a struct with a flexible array at the end.
+/// \code
+/// struct foo {
+///   size_t len;
+///   struct bar data[];
+/// };
+/// \endcode
+/// or
+/// \code
+/// struct foo {
+///   size_t len;
+///   struct bar data[0];
+/// }
+/// \endcode
+/// In these cases it is also valid to allocate size of struct foo + a multiple
+/// of struct bar.
+static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
+                                  CharUnits TypeSize, QualType ToPointeeTy) {
+  const RecordType *RT = ToPointeeTy->getAs<RecordType>();
+  if (!RT)
+    return false;
+
+  const RecordDecl *RD = RT->getDecl();
+  RecordDecl::field_iterator Iter(RD->field_begin());
+  RecordDecl::field_iterator End(RD->field_end());
+  const FieldDecl *Last = 0;
+  for (; Iter != End; ++Iter)
+    Last = *Iter;
+  assert(Last && "empty structs should already be handled");
+
+  const Type *ElemType = Last->getType()->getArrayElementTypeNoTypeQual();
+  CharUnits FlexSize;
+  if (const ConstantArrayType *ArrayTy =
+        Ctx.getAsConstantArrayType(Last->getType())) {
+    FlexSize = Ctx.getTypeSizeInChars(ElemType);
+    if (ArrayTy->getSize() == 1 && TypeSize > FlexSize)
+      TypeSize -= FlexSize;
+    else if (ArrayTy->getSize() != 0)
+      return false;
+  } else if (RD->hasFlexibleArrayMember()) {
+    FlexSize = Ctx.getTypeSizeInChars(ElemType);
+  } else {
+    return false;
+  }
+
+  if (FlexSize.isZero())
+    return false;
+
+  CharUnits Left = RegionSize - TypeSize;
+  if (Left.isNegative())
+    return false;
+
+  if (Left % FlexSize == 0)
+    return true;
+
+  return false;
+}
+
 void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
   const Expr *E = CE->getSubExpr();
   ASTContext &Ctx = C.getASTContext();
@@ -66,21 +125,23 @@
   if (typeSize.isZero())
     return;
 
-  if (regionSize % typeSize != 0) {
-    if (ExplodedNode *errorNode = C.generateSink()) {
-      if (!BT)
-        BT.reset(new BuiltinBug("Cast region with wrong size.",
-                            "Cast a region whose size is not a multiple of the"
-                            " destination type size."));
-      BugReport *R = new BugReport(*BT, BT->getDescription(),
-                                               errorNode);
-      R->addRange(CE->getSourceRange());
-      C.emitReport(R);
-    }
+  if (regionSize % typeSize == 0)
+    return;
+
+  if (evenFlexibleArraySize(Ctx, regionSize, typeSize, ToPointeeTy))
+    return;
+
+  if (ExplodedNode *errorNode = C.generateSink()) {
+    if (!BT)
+      BT.reset(new BuiltinBug(this, "Cast region with wrong size.",
+                                    "Cast a region whose size is not a multiple"
+                                    " of the destination type size."));
+    BugReport *R = new BugReport(*BT, BT->getDescription(), errorNode);
+    R->addRange(CE->getSourceRange());
+    C.emitReport(R);
   }
 }
 
-
 void ento::registerCastSizeChecker(CheckerManager &mgr) {
-  mgr.registerChecker<CastSizeChecker>();  
+  mgr.registerChecker<CastSizeChecker>();
 }
diff --git a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index 60348c7..d765315 100644
--- a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -24,7 +24,7 @@
 
 namespace {
 class CastToStructChecker : public Checker< check::PreStmt<CastExpr> > {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
 
 public:
   void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
@@ -58,10 +58,11 @@
   if (!OrigPointeeTy->isRecordType()) {
     if (ExplodedNode *N = C.addTransition()) {
       if (!BT)
-        BT.reset(new BuiltinBug("Cast from non-struct type to struct type",
-                            "Casting a non-structure type to a structure type "
-                            "and accessing a field can lead to memory access "
-                            "errors or data corruption."));
+        BT.reset(
+            new BuiltinBug(this, "Cast from non-struct type to struct type",
+                           "Casting a non-structure type to a structure type "
+                           "and accessing a field can lead to memory access "
+                           "errors or data corruption."));
       BugReport *R = new BugReport(*BT,BT->getDescription(), N);
       R->addRange(CE->getSourceRange());
       C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 3f9b3cc..827a10a 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -97,8 +97,9 @@
   return false;
 }
 
-static void checkObjCDealloc(const ObjCImplementationDecl *D,
-                             const LangOptions& LOpts, BugReporter& BR) {
+static void checkObjCDealloc(const CheckerBase *Checker,
+                             const ObjCImplementationDecl *D,
+                             const LangOptions &LOpts, BugReporter &BR) {
 
   assert (LOpts.getGC() != LangOptions::GCOnly);
 
@@ -112,15 +113,12 @@
 
   bool containsPointerIvar = false;
 
-  for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
-       I!=E; ++I) {
-
-    ObjCIvarDecl *ID = *I;
-    QualType T = ID->getType();
+  for (const auto *Ivar : ID->ivars()) {
+    QualType T = Ivar->getType();
 
     if (!T->isObjCObjectPointerType() ||
-        ID->getAttr<IBOutletAttr>() || // Skip IBOutlets.
-        ID->getAttr<IBOutletCollectionAttr>()) // Skip IBOutletCollections.
+        Ivar->hasAttr<IBOutletAttr>() || // Skip IBOutlets.
+        Ivar->hasAttr<IBOutletCollectionAttr>()) // Skip IBOutletCollections.
       continue;
 
     containsPointerIvar = true;
@@ -155,14 +153,12 @@
   // Get the "dealloc" selector.
   IdentifierInfo* II = &Ctx.Idents.get("dealloc");
   Selector S = Ctx.Selectors.getSelector(0, &II);
-  ObjCMethodDecl *MD = 0;
+  const ObjCMethodDecl *MD = 0;
 
   // Scan the instance methods for "dealloc".
-  for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
-       E = D->instmeth_end(); I!=E; ++I) {
-
-    if ((*I)->getSelector() == S) {
-      MD = *I;
+  for (const auto *I : D->instance_methods()) {
+    if (I->getSelector() == S) {
+      MD = I;
       break;
     }
   }
@@ -180,7 +176,7 @@
     llvm::raw_string_ostream os(buf);
     os << "Objective-C class '" << *D << "' lacks a 'dealloc' instance method";
 
-    BR.EmitBasicReport(D, name, categories::CoreFoundationObjectiveC,
+    BR.EmitBasicReport(D, Checker, name, categories::CoreFoundationObjectiveC,
                        os.str(), DLoc);
     return;
   }
@@ -198,7 +194,7 @@
        << "' does not send a 'dealloc' message to its super class"
            " (missing [super dealloc])";
 
-    BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
+    BR.EmitBasicReport(MD, Checker, name, categories::CoreFoundationObjectiveC,
                        os.str(), DLoc);
     return;
   }
@@ -212,9 +208,7 @@
 
   // Scan for missing and extra releases of ivars used by implementations
   // of synthesized properties
-  for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(),
-       E = D->propimpl_end(); I!=E; ++I) {
-
+  for (const auto *I : D->property_impls()) {
     // We can only check the synthesized properties
     if (I->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
       continue;
@@ -262,10 +256,10 @@
       }
 
       PathDiagnosticLocation SDLoc =
-        PathDiagnosticLocation::createBegin(*I, BR.getSourceManager());
+        PathDiagnosticLocation::createBegin(I, BR.getSourceManager());
 
-      BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
-                         os.str(), SDLoc);
+      BR.EmitBasicReport(MD, Checker, name,
+                         categories::CoreFoundationObjectiveC, os.str(), SDLoc);
     }
   }
 }
@@ -282,7 +276,8 @@
                     BugReporter &BR) const {
     if (mgr.getLangOpts().getGC() == LangOptions::GCOnly)
       return;
-    checkObjCDealloc(cast<ObjCImplementationDecl>(D), mgr.getLangOpts(), BR);
+    checkObjCDealloc(this, cast<ObjCImplementationDecl>(D), mgr.getLangOpts(),
+                     BR);
   }
 };
 }
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index 9cb1d2d..dc53602 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -40,10 +40,11 @@
 static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
                                const ObjCMethodDecl *MethAncestor,
                                BugReporter &BR, ASTContext &Ctx,
-                               const ObjCImplementationDecl *ID) {
+                               const ObjCImplementationDecl *ID,
+                               const CheckerBase *Checker) {
 
-  QualType ResDerived  = MethDerived->getResultType();
-  QualType ResAncestor = MethAncestor->getResultType();
+  QualType ResDerived = MethDerived->getReturnType();
+  QualType ResAncestor = MethAncestor->getReturnType();
 
   if (!AreTypesCompatible(ResDerived, ResAncestor, Ctx)) {
     std::string sbuf;
@@ -53,9 +54,9 @@
        << *MethDerived->getClassInterface()
        << "', which is derived from class '"
        << *MethAncestor->getClassInterface()
-       << "', defines the instance method '"
-       << MethDerived->getSelector().getAsString()
-       << "' whose return type is '"
+       << "', defines the instance method '";
+    MethDerived->getSelector().print(os);
+    os << "' whose return type is '"
        << ResDerived.getAsString()
        << "'.  A method with the same name (same selector) is also defined in "
           "class '"
@@ -69,15 +70,15 @@
       PathDiagnosticLocation::createBegin(MethDerived,
                                           BR.getSourceManager());
 
-    BR.EmitBasicReport(MethDerived,
-                       "Incompatible instance method return type",
-                       categories::CoreFoundationObjectiveC,
-                       os.str(), MethDLoc);
+    BR.EmitBasicReport(
+        MethDerived, Checker, "Incompatible instance method return type",
+        categories::CoreFoundationObjectiveC, os.str(), MethDLoc);
   }
 }
 
 static void CheckObjCInstMethSignature(const ObjCImplementationDecl *ID,
-                                       BugReporter& BR) {
+                                       BugReporter &BR,
+                                       const CheckerBase *Checker) {
 
   const ObjCInterfaceDecl *D = ID->getClassInterface();
   const ObjCInterfaceDecl *C = D->getSuperClass();
@@ -92,10 +93,7 @@
   MapTy IMeths;
   unsigned NumMethods = 0;
 
-  for (ObjCImplementationDecl::instmeth_iterator I=ID->instmeth_begin(),
-       E=ID->instmeth_end(); I!=E; ++I) {
-
-    ObjCMethodDecl *M = *I;
+  for (auto *M : ID->instance_methods()) {
     IMeths[M->getSelector()] = M;
     ++NumMethods;
   }
@@ -103,10 +101,7 @@
   // Now recurse the class hierarchy chain looking for methods with the
   // same signatures.
   while (C && NumMethods) {
-    for (ObjCInterfaceDecl::instmeth_iterator I=C->instmeth_begin(),
-         E=C->instmeth_end(); I!=E; ++I) {
-
-      ObjCMethodDecl *M = *I;
+    for (const auto *M : C->instance_methods()) {
       Selector S = M->getSelector();
 
       MapTy::iterator MI = IMeths.find(S);
@@ -118,7 +113,7 @@
       ObjCMethodDecl *MethDerived = MI->second;
       MI->second = 0;
 
-      CompareReturnTypes(MethDerived, M, BR, Ctx, ID);
+      CompareReturnTypes(MethDerived, M, BR, Ctx, ID, Checker);
     }
 
     C = C->getSuperClass();
@@ -135,7 +130,7 @@
 public:
   void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
                     BugReporter &BR) const {
-    CheckObjCInstMethSignature(D, BR);
+    CheckObjCInstMethSignature(D, BR, this);
   }
 };
 }
diff --git a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index 415d3ec..5706364 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -46,8 +46,18 @@
   DefaultBool check_vfork;
   DefaultBool check_FloatLoopCounter;
   DefaultBool check_UncheckedReturn;
+
+  CheckName checkName_gets;
+  CheckName checkName_getpw;
+  CheckName checkName_mktemp;
+  CheckName checkName_mkstemp;
+  CheckName checkName_strcpy;
+  CheckName checkName_rand;
+  CheckName checkName_vfork;
+  CheckName checkName_FloatLoopCounter;
+  CheckName checkName_UncheckedReturn;
 };
-  
+
 class WalkAST : public StmtVisitor<WalkAST> {
   BugReporter &BR;
   AnalysisDeclContext* AC;
@@ -281,7 +291,7 @@
 
   PathDiagnosticLocation FSLoc =
     PathDiagnosticLocation::createBegin(FS, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_FloatLoopCounter,
                      bugType, "Security", os.str(),
                      FSLoc, ranges);
 }
@@ -302,11 +312,11 @@
     return;
 
   // Verify that the function takes a single argument.
-  if (FPT->getNumArgs() != 1)
+  if (FPT->getNumParams() != 1)
     return;
 
   // Is the argument a 'char*'?
-  const PointerType *PT = FPT->getArgType(0)->getAs<PointerType>();
+  const PointerType *PT = FPT->getParamType(0)->getAs<PointerType>();
   if (!PT)
     return;
 
@@ -316,7 +326,7 @@
   // Issue a warning.
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_gets,
                      "Potential buffer overflow in call to 'gets'",
                      "Security",
                      "Call to function 'gets' is extremely insecure as it can "
@@ -338,15 +348,15 @@
     return;
 
   // Verify that the function takes two arguments.
-  if (FPT->getNumArgs() != 2)
+  if (FPT->getNumParams() != 2)
     return;
 
   // Verify the first argument type is integer.
-  if (!FPT->getArgType(0)->isIntegralOrUnscopedEnumerationType())
+  if (!FPT->getParamType(0)->isIntegralOrUnscopedEnumerationType())
     return;
 
   // Verify the second argument type is char*.
-  const PointerType *PT = FPT->getArgType(1)->getAs<PointerType>();
+  const PointerType *PT = FPT->getParamType(1)->getAs<PointerType>();
   if (!PT)
     return;
 
@@ -356,7 +366,7 @@
   // Issue a warning.
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_getpw,
                      "Potential buffer overflow in call to 'getpw'",
                      "Security",
                      "The getpw() function is dangerous as it may overflow the "
@@ -382,11 +392,11 @@
     return;
 
   // Verify that the function takes a single argument.
-  if (FPT->getNumArgs() != 1)
+  if (FPT->getNumParams() != 1)
     return;
 
   // Verify that the argument is Pointer Type.
-  const PointerType *PT = FPT->getArgType(0)->getAs<PointerType>();
+  const PointerType *PT = FPT->getParamType(0)->getAs<PointerType>();
   if (!PT)
     return;
 
@@ -397,7 +407,7 @@
   // Issue a waring.
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_mktemp,
                      "Potential insecure temporary file in call 'mktemp'",
                      "Security",
                      "Call to function 'mktemp' is insecure as it always "
@@ -483,7 +493,7 @@
     out << " used as a suffix";
   }
   out << ')';
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_mkstemp,
                      "Insecure temporary file creation", "Security",
                      out.str(), CELoc, strArg->getSourceRange());
 }
@@ -504,7 +514,7 @@
   // Issue a warning.
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_strcpy,
                      "Potential insecure memory buffer bounds restriction in "
                      "call 'strcpy'",
                      "Security",
@@ -531,7 +541,7 @@
   // Issue a warning.
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_strcpy,
                      "Potential insecure memory buffer bounds restriction in "
                      "call 'strcat'",
                      "Security",
@@ -551,14 +561,14 @@
     return false;
 
   // Verify the function takes two arguments, three in the _chk version.
-  int numArgs = FPT->getNumArgs();
+  int numArgs = FPT->getNumParams();
   if (numArgs != 2 && numArgs != 3)
     return false;
 
   // Verify the type for both arguments.
   for (int i = 0; i < 2; i++) {
     // Verify that the arguments are pointers.
-    const PointerType *PT = FPT->getArgType(i)->getAs<PointerType>();
+    const PointerType *PT = FPT->getParamType(i)->getAs<PointerType>();
     if (!PT)
       return false;
 
@@ -584,17 +594,16 @@
   if (!FTP)
     return;
 
-  if (FTP->getNumArgs() == 1) {
+  if (FTP->getNumParams() == 1) {
     // Is the argument an 'unsigned short *'?
     // (Actually any integer type is allowed.)
-    const PointerType *PT = FTP->getArgType(0)->getAs<PointerType>();
+    const PointerType *PT = FTP->getParamType(0)->getAs<PointerType>();
     if (!PT)
       return;
 
     if (! PT->getPointeeType()->isIntegralOrUnscopedEnumerationType())
       return;
-  }
-  else if (FTP->getNumArgs() != 0)
+  } else if (FTP->getNumParams() != 0)
     return;
 
   // Issue a warning.
@@ -610,8 +619,9 @@
 
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(), os1.str(), "Security", os2.str(),
-                     CELoc, CE->getCallee()->getSourceRange());
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_rand, os1.str(),
+                     "Security", os2.str(), CELoc,
+                     CE->getCallee()->getSourceRange());
 }
 
 //===----------------------------------------------------------------------===//
@@ -628,13 +638,13 @@
     return;
 
   // Verify that the function takes no argument.
-  if (FTP->getNumArgs() != 0)
+  if (FTP->getNumParams() != 0)
     return;
 
   // Issue a warning.
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_rand,
                      "'random' is not a secure random number generator",
                      "Security",
                      "The 'random' function produces a sequence of values that "
@@ -654,7 +664,7 @@
   // All calls to vfork() are insecure, issue a warning.
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(),
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_vfork,
                      "Potential insecure implementation-specific behavior in "
                      "call 'vfork'",
                      "Security",
@@ -704,12 +714,12 @@
 
   // Verify that the function takes one or two arguments (depending on
   //   the function).
-  if (FTP->getNumArgs() != (identifierid < 4 ? 1 : 2))
+  if (FTP->getNumParams() != (identifierid < 4 ? 1 : 2))
     return;
 
   // The arguments must be integers.
-  for (unsigned i = 0; i < FTP->getNumArgs(); i++)
-    if (! FTP->getArgType(i)->isIntegralOrUnscopedEnumerationType())
+  for (unsigned i = 0; i < FTP->getNumParams(); i++)
+    if (!FTP->getParamType(i)->isIntegralOrUnscopedEnumerationType())
       return;
 
   // Issue a warning.
@@ -725,8 +735,9 @@
 
   PathDiagnosticLocation CELoc =
     PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-  BR.EmitBasicReport(AC->getDecl(), os1.str(), "Security", os2.str(),
-                     CELoc, CE->getCallee()->getSourceRange());
+  BR.EmitBasicReport(AC->getDecl(), filter.checkName_UncheckedReturn, os1.str(),
+                     "Security", os2.str(), CELoc,
+                     CE->getCallee()->getSourceRange());
 }
 
 //===----------------------------------------------------------------------===//
@@ -746,10 +757,13 @@
 };
 }
 
-#define REGISTER_CHECKER(name) \
-void ento::register##name(CheckerManager &mgr) {\
-  mgr.registerChecker<SecuritySyntaxChecker>()->filter.check_##name = true;\
-}
+#define REGISTER_CHECKER(name)                                                 \
+  void ento::register##name(CheckerManager &mgr) {                             \
+    SecuritySyntaxChecker *checker =                                           \
+        mgr.registerChecker<SecuritySyntaxChecker>();                          \
+    checker->filter.check_##name = true;                                       \
+    checker->filter.checkName_##name = mgr.getCurrentCheckName();              \
+  }
 
 REGISTER_CHECKER(gets)
 REGISTER_CHECKER(getpw)
diff --git a/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
index 1207b67..5106b06 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -24,10 +24,12 @@
 namespace {
 class WalkAST : public StmtVisitor<WalkAST> {
   BugReporter &BR;
+  const CheckerBase *Checker;
   AnalysisDeclContext* AC;
 
 public:
-  WalkAST(BugReporter &br, AnalysisDeclContext* ac) : BR(br), AC(ac) {}
+  WalkAST(BugReporter &br, const CheckerBase *checker, AnalysisDeclContext *ac)
+      : BR(br), Checker(checker), AC(ac) {}
   void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
   void VisitStmt(Stmt *S) { VisitChildren(S); }
   void VisitChildren(Stmt *S);
@@ -62,7 +64,7 @@
 
     PathDiagnosticLocation ELoc =
       PathDiagnosticLocation::createBegin(E, BR.getSourceManager(), AC);
-    BR.EmitBasicReport(AC->getDecl(),
+    BR.EmitBasicReport(AC->getDecl(), Checker,
                        "Potential unintended use of sizeof() on pointer type",
                        categories::LogicError,
                        "The code calls sizeof() on a pointer type. "
@@ -80,7 +82,7 @@
 public:
   void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
                         BugReporter &BR) const {
-    WalkAST walker(BR, mgr.getAnalysisDeclContext(D));
+    WalkAST walker(BR, this, mgr.getAnalysisDeclContext(D));
     walker.Visit(D->getBody());
   }
 };
diff --git a/lib/StaticAnalyzer/Checkers/Checkers.td b/lib/StaticAnalyzer/Checkers/Checkers.td
index 862212d..a457b44 100644
--- a/lib/StaticAnalyzer/Checkers/Checkers.td
+++ b/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -120,6 +120,10 @@
   HelpText<"Warn about unintended use of sizeof() on pointer expressions">,
   DescFile<"CheckSizeofPointer.cpp">;
 
+def CallAndMessageUnInitRefArg : Checker<"CallAndMessageUnInitRefArg">,
+  HelpText<"Check for logical errors for function calls and Objective-C message expressions (e.g., uninitialized arguments, null function pointers, and pointer to undefined variables)">,
+  DescFile<"CallAndMessageChecker.cpp">;
+
 } // end "alpha.core"
 
 //===----------------------------------------------------------------------===//
@@ -203,10 +207,6 @@
 
 let ParentPackage = DeadCodeAlpha in {
 
-def IdempotentOperationChecker : Checker<"IdempotentOperations">,
-  HelpText<"Warn about idempotent operations">,
-  DescFile<"IdempotentOperationChecker.cpp">;
-
 def UnreachableCodeChecker : Checker<"UnreachableCode">,
   HelpText<"Check unreachable code">,
   DescFile<"UnreachableCodeChecker.cpp">;
@@ -416,6 +416,10 @@
   HelpText<"Model the APIs that are guaranteed to return a non-nil value">,
   DescFile<"BasicObjCFoundationChecks.cpp">;
 
+def ObjCSuperCallChecker : Checker<"MissingSuperCall">,
+  HelpText<"Warn about Objective-C methods that lack a necessary call to super">,
+  DescFile<"ObjCMissingSuperCallChecker.cpp">;
+
 def NSErrorChecker : Checker<"NSError">,
   HelpText<"Check usage of NSError** parameters">,
   DescFile<"NSErrorChecker.cpp">;
@@ -448,10 +452,6 @@
   HelpText<"Check for direct assignments to instance variables in the methods annotated with objc_no_direct_instance_variable_assignment">,
   DescFile<"DirectIvarAssignment.cpp">;
 
-def ObjCSuperCallChecker : Checker<"MissingSuperCall">,
-  HelpText<"Warn about Objective-C methods that lack a necessary call to super">,
-  DescFile<"ObjCMissingSuperCallChecker.cpp">;
-
 } // end "alpha.osx.cocoa"
 
 let ParentPackage = CoreFoundation in {
diff --git a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 9912965..628cf2c 100644
--- a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -41,7 +41,7 @@
 class ChrootChecker : public Checker<eval::Call, check::PreStmt<CallExpr> > {
   mutable IdentifierInfo *II_chroot, *II_chdir;
   // This bug refers to possibly break out of a chroot() jail.
-  mutable OwningPtr<BuiltinBug> BT_BreakJail;
+  mutable std::unique_ptr<BuiltinBug> BT_BreakJail;
 
 public:
   ChrootChecker() : II_chroot(0), II_chdir(0) {}
@@ -142,9 +142,9 @@
     if (isRootChanged((intptr_t) *k))
       if (ExplodedNode *N = C.addTransition()) {
         if (!BT_BreakJail)
-          BT_BreakJail.reset(new BuiltinBug("Break out of jail",
-                                        "No call of chdir(\"/\") immediately "
-                                        "after chroot"));
+          BT_BreakJail.reset(new BuiltinBug(
+              this, "Break out of jail", "No call of chdir(\"/\") immediately "
+                                         "after chroot"));
         BugReport *R = new BugReport(*BT_BreakJail, 
                                      BT_BreakJail->getDescription(), N);
         C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 9d855ce..6001a3c 100644
--- a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -124,21 +124,23 @@
   const CFG &cfg;
   ASTContext &Ctx;
   BugReporter& BR;
+  const CheckerBase *Checker;
   AnalysisDeclContext* AC;
   ParentMap& Parents;
   llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
-  OwningPtr<ReachableCode> reachableCode;
+  std::unique_ptr<ReachableCode> reachableCode;
   const CFGBlock *currentBlock;
-  OwningPtr<llvm::DenseSet<const VarDecl *> > InEH;
+  std::unique_ptr<llvm::DenseSet<const VarDecl *>> InEH;
 
   enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
 
 public:
-  DeadStoreObs(const CFG &cfg, ASTContext &ctx,
-               BugReporter& br, AnalysisDeclContext* ac, ParentMap& parents,
-               llvm::SmallPtrSet<const VarDecl*, 20> &escaped)
-    : cfg(cfg), Ctx(ctx), BR(br), AC(ac), Parents(parents),
-      Escaped(escaped), currentBlock(0) {}
+  DeadStoreObs(const CFG &cfg, ASTContext &ctx, BugReporter &br,
+               const CheckerBase *checker, AnalysisDeclContext *ac,
+               ParentMap &parents,
+               llvm::SmallPtrSet<const VarDecl *, 20> &escaped)
+      : cfg(cfg), Ctx(ctx), BR(br), Checker(checker), AC(ac), Parents(parents),
+        Escaped(escaped), currentBlock(0) {}
 
   virtual ~DeadStoreObs() {}
 
@@ -199,7 +201,8 @@
         return;
     }
 
-    BR.EmitBasicReport(AC->getDecl(), BugType, "Dead store", os.str(), L, R);
+    BR.EmitBasicReport(AC->getDecl(), Checker, BugType, "Dead store", os.str(),
+                       L, R);
   }
 
   void CheckVarDecl(const VarDecl *VD, const Expr *Ex, const Expr *Val,
@@ -214,7 +217,8 @@
       return;
 
     if (!isLive(Live, VD) &&
-        !(VD->getAttr<UnusedAttr>() || VD->getAttr<BlocksAttr>())) {
+        !(VD->hasAttr<UnusedAttr>() || VD->hasAttr<BlocksAttr>() ||
+          VD->hasAttr<ObjCPreciseLifetimeAttr>())) {
 
       PathDiagnosticLocation ExLoc =
         PathDiagnosticLocation::createBegin(Ex, BR.getSourceManager(), AC);
@@ -251,8 +255,8 @@
     return false;
   }
 
-  virtual void observeStmt(const Stmt *S, const CFGBlock *block,
-                           const LiveVariables::LivenessValues &Live) {
+  void observeStmt(const Stmt *S, const CFGBlock *block,
+                   const LiveVariables::LivenessValues &Live) override {
 
     currentBlock = block;
     
@@ -309,10 +313,8 @@
     else if (const DeclStmt *DS = dyn_cast<DeclStmt>(S))
       // Iterate through the decls.  Warn if any initializers are complex
       // expressions that are not live (never used).
-      for (DeclStmt::const_decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
-           DI != DE; ++DI) {
-
-        VarDecl *V = dyn_cast<VarDecl>(*DI);
+      for (const auto *DI : DS->decls()) {
+        const auto *V = dyn_cast<VarDecl>(DI);
 
         if (!V)
           continue;
@@ -339,8 +341,10 @@
             
             // A dead initialization is a variable that is dead after it
             // is initialized.  We don't flag warnings for those variables
-            // marked 'unused'.
-            if (!isLive(Live, V) && V->getAttr<UnusedAttr>() == 0) {
+            // marked 'unused' or 'objc_precise_lifetime'.
+            if (!isLive(Live, V) &&
+                !V->hasAttr<UnusedAttr>() &&
+                !V->hasAttr<ObjCPreciseLifetimeAttr>()) {
               // Special case: check for initializations with constants.
               //
               //  e.g. : int x = 0;
@@ -436,7 +440,7 @@
       ParentMap &pmap = mgr.getParentMap(D);
       FindEscaped FS;
       cfg.VisitBlockStmts(FS);
-      DeadStoreObs A(cfg, BR.getContext(), BR, AC, pmap, FS.Escaped);
+      DeadStoreObs A(cfg, BR.getContext(), BR, this, AC, pmap, FS.Escaped);
       L->runOnAllBlocks(A);
     }
   }
diff --git a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index a2c8d1f..51e7a3d 100644
--- a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -95,6 +95,11 @@
 public:
   void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
                         BugReporter &BR) const {
+    PrintingPolicy Policy(mgr.getLangOpts());
+    Policy.TerseOutput = true;
+    Policy.PolishForDeclaration = true;
+    D->print(llvm::errs(), Policy);
+
     if (CFG *cfg = mgr.getCFG(D)) {
       cfg->dump(mgr.getLangOpts(),
                 llvm::sys::Process::StandardErrHasColors());
diff --git a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 72d46c5..efdc213 100644
--- a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -29,8 +29,8 @@
     : public Checker< check::Location,
                       check::Bind,
                       EventDispatcher<ImplicitNullDerefEvent> > {
-  mutable OwningPtr<BuiltinBug> BT_null;
-  mutable OwningPtr<BuiltinBug> BT_undef;
+  mutable std::unique_ptr<BuiltinBug> BT_null;
+  mutable std::unique_ptr<BuiltinBug> BT_undef;
 
   void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C,
                  bool IsBind = false) const;
@@ -97,7 +97,7 @@
   // We know that 'location' cannot be non-null.  This is what
   // we call an "explicit" null dereference.
   if (!BT_null)
-    BT_null.reset(new BuiltinBug("Dereference of null pointer"));
+    BT_null.reset(new BuiltinBug(this, "Dereference of null pointer"));
 
   SmallString<100> buf;
   llvm::raw_svector_ostream os(buf);
@@ -180,7 +180,8 @@
   if (l.isUndef()) {
     if (ExplodedNode *N = C.generateSink()) {
       if (!BT_undef)
-        BT_undef.reset(new BuiltinBug("Dereference of undefined pointer value"));
+        BT_undef.reset(
+            new BuiltinBug(this, "Dereference of undefined pointer value"));
 
       BugReport *report =
         new BugReport(*BT_undef, BT_undef->getDescription(), N);
@@ -200,7 +201,7 @@
   ProgramStateRef state = C.getState();
 
   ProgramStateRef notNullState, nullState;
-  llvm::tie(notNullState, nullState) = state->assume(location);
+  std::tie(notNullState, nullState) = state->assume(location);
 
   // The explicit NULL case.
   if (nullState) {
@@ -239,8 +240,7 @@
   ProgramStateRef State = C.getState();
 
   ProgramStateRef StNonNull, StNull;
-  llvm::tie(StNonNull, StNull) =
-      State->assume(V.castAs<DefinedOrUnknownSVal>());
+  std::tie(StNonNull, StNull) = State->assume(V.castAs<DefinedOrUnknownSVal>());
 
   if (StNull) {
     if (!StNonNull) {
diff --git a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
index b43dc18..0bcebf6 100644
--- a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
+++ b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -63,13 +63,15 @@
     const ObjCMethodDecl *MD;
     const ObjCInterfaceDecl *InterfD;
     BugReporter &BR;
+    const CheckerBase *Checker;
     LocationOrAnalysisDeclContext DCtx;
 
   public:
     MethodCrawler(const IvarToPropertyMapTy &InMap, const ObjCMethodDecl *InMD,
-        const ObjCInterfaceDecl *InID,
-        BugReporter &InBR, AnalysisDeclContext *InDCtx)
-    : IvarToPropMap(InMap), MD(InMD), InterfD(InID), BR(InBR), DCtx(InDCtx) {}
+                  const ObjCInterfaceDecl *InID, BugReporter &InBR,
+                  const CheckerBase *Checker, AnalysisDeclContext *InDCtx)
+        : IvarToPropMap(InMap), MD(InMD), InterfD(InID), BR(InBR),
+          Checker(Checker), DCtx(InDCtx) {}
 
     void VisitStmt(const Stmt *S) { VisitChildren(S); }
 
@@ -122,10 +124,7 @@
   IvarToPropertyMapTy IvarToPropMap;
 
   // Find all properties for this class.
-  for (ObjCInterfaceDecl::prop_iterator I = InterD->prop_begin(),
-      E = InterD->prop_end(); I != E; ++I) {
-    ObjCPropertyDecl *PD = *I;
-
+  for (const auto *PD : InterD->properties()) {
     // Find the corresponding IVar.
     const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterD,
                                                      Mgr.getASTContext());
@@ -140,10 +139,7 @@
   if (IvarToPropMap.empty())
     return;
 
-  for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
-      E = D->instmeth_end(); I != E; ++I) {
-
-    ObjCMethodDecl *M = *I;
+  for (const auto *M : D->instance_methods()) {
     AnalysisDeclContext *DCtx = Mgr.getAnalysisDeclContext(M);
 
     if ((*ShouldSkipMethod)(M))
@@ -152,20 +148,17 @@
     const Stmt *Body = M->getBody();
     assert(Body);
 
-    MethodCrawler MC(IvarToPropMap, M->getCanonicalDecl(), InterD, BR, DCtx);
+    MethodCrawler MC(IvarToPropMap, M->getCanonicalDecl(), InterD, BR, this,
+                     DCtx);
     MC.VisitStmt(Body);
   }
 }
 
 static bool isAnnotatedToAllowDirectAssignment(const Decl *D) {
-  for (specific_attr_iterator<AnnotateAttr>
-       AI = D->specific_attr_begin<AnnotateAttr>(),
-       AE = D->specific_attr_end<AnnotateAttr>(); AI != AE; ++AI) {
-    const AnnotateAttr *Ann = *AI;
+  for (const auto *Ann : D->specific_attrs<AnnotateAttr>())
     if (Ann->getAnnotation() ==
         "objc_allow_direct_instance_variable_assignment")
       return true;
-  }
   return false;
 }
 
@@ -204,13 +197,11 @@
       if (GetterMethod && GetterMethod->getCanonicalDecl() == MD)
         return;
 
-      BR.EmitBasicReport(MD,
-          "Property access",
-          categories::CoreFoundationObjectiveC,
+      BR.EmitBasicReport(
+          MD, Checker, "Property access", categories::CoreFoundationObjectiveC,
           "Direct assignment to an instance variable backing a property; "
-          "use the setter instead", PathDiagnosticLocation(IvarRef,
-                                                          BR.getSourceManager(),
-                                                          DCtx));
+          "use the setter instead",
+          PathDiagnosticLocation(IvarRef, BR.getSourceManager(), DCtx));
     }
   }
 }
@@ -225,14 +216,9 @@
 // Register the checker that checks for direct accesses in functions annotated
 // with __attribute__((annotate("objc_no_direct_instance_variable_assignment"))).
 static bool AttrFilter(const ObjCMethodDecl *M) {
-  for (specific_attr_iterator<AnnotateAttr>
-           AI = M->specific_attr_begin<AnnotateAttr>(),
-           AE = M->specific_attr_end<AnnotateAttr>();
-       AI != AE; ++AI) {
-    const AnnotateAttr *Ann = *AI;
+  for (const auto *Ann : M->specific_attrs<AnnotateAttr>())
     if (Ann->getAnnotation() == "objc_no_direct_instance_variable_assignment")
       return false;
-  }
   return true;
 }
 
diff --git a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 93daf94..e060c36 100644
--- a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -23,7 +23,7 @@
 
 namespace {
 class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
   void reportBug(const char *Msg,
                  ProgramStateRef StateZero,
                  CheckerContext &C) const ;
@@ -37,7 +37,7 @@
                                CheckerContext &C) const {
   if (ExplodedNode *N = C.generateSink(StateZero)) {
     if (!BT)
-      BT.reset(new BuiltinBug("Division by zero"));
+      BT.reset(new BuiltinBug(this, "Division by zero"));
 
     BugReport *R = new BugReport(*BT, Msg, N);
     bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R);
@@ -68,7 +68,7 @@
   // Check for divide by zero.
   ConstraintManager &CM = C.getConstraintManager();
   ProgramStateRef stateNotZero, stateZero;
-  llvm::tie(stateNotZero, stateZero) = CM.assumeDual(C.getState(), *DV);
+  std::tie(stateNotZero, stateZero) = CM.assumeDual(C.getState(), *DV);
 
   if (!stateNotZero) {
     assert(stateZero);
diff --git a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 3ed2435..9a0fa09 100644
--- a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -18,7 +18,7 @@
 
 namespace {
 class ExprInspectionChecker : public Checker< eval::Call > {
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
 
   void analyzerEval(const CallExpr *CE, CheckerContext &C) const;
   void analyzerCheckInlined(const CallExpr *CE, CheckerContext &C) const;
@@ -68,7 +68,7 @@
     return "UNDEFINED";
 
   ProgramStateRef StTrue, StFalse;
-  llvm::tie(StTrue, StFalse) =
+  std::tie(StTrue, StFalse) =
     State->assume(AssertionVal.castAs<DefinedOrUnknownSVal>());
 
   if (StTrue) {
@@ -95,7 +95,7 @@
     return;
 
   if (!BT)
-    BT.reset(new BugType("Checking analyzer assumptions", "debug"));
+    BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
 
   BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N);
   C.emitReport(R);
@@ -106,7 +106,7 @@
   ExplodedNode *N = C.getPredecessor();
 
   if (!BT)
-    BT.reset(new BugType("Checking analyzer assumptions", "debug"));
+    BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
 
   BugReport *R = new BugReport(*BT, "REACHABLE", N);
   C.emitReport(R);
@@ -126,7 +126,7 @@
     return;
 
   if (!BT)
-    BT.reset(new BugType("Checking analyzer assumptions", "debug"));
+    BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
 
   BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N);
   C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index 085a991..60bb036 100644
--- a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -25,7 +25,7 @@
 namespace {
 class FixedAddressChecker 
   : public Checker< check::PreStmt<BinaryOperator> > {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
 
 public:
   void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -52,10 +52,11 @@
 
   if (ExplodedNode *N = C.addTransition()) {
     if (!BT)
-      BT.reset(new BuiltinBug("Use fixed address", 
-                          "Using a fixed address is not portable because that "
-                          "address will probably not be valid in all "
-                          "environments or platforms."));
+      BT.reset(
+          new BuiltinBug(this, "Use fixed address",
+                         "Using a fixed address is not portable because that "
+                         "address will probably not be valid in all "
+                         "environments or platforms."));
     BugReport *R = new BugReport(*BT, BT->getDescription(), N);
     R->addRange(B->getRHS()->getSourceRange());
     C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 1dc60c6..93bc12c 100644
--- a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -34,7 +34,6 @@
   static void *getTag() { static int Tag; return &Tag; }
 
   void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
-  void checkPostStmt(const DeclRefExpr *DRE, CheckerContext &C) const;
 
   void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
 
@@ -43,10 +42,10 @@
   /// Denotes the return vale.
   static const unsigned ReturnValueIndex = UINT_MAX - 1;
 
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
   inline void initBugType() const {
     if (!BT)
-      BT.reset(new BugType("Use of Untrusted Data", "Untrusted Data"));
+      BT.reset(new BugType(this, "Use of Untrusted Data", "Untrusted Data"));
   }
 
   /// \brief Catch taint related bugs. Check if tainted data is passed to a
@@ -613,11 +612,7 @@
   const FunctionDecl *FDecl = C.getCalleeDecl(CE);
   if (!FDecl)
     return false;
-  for (specific_attr_iterator<FormatAttr>
-         i = FDecl->specific_attr_begin<FormatAttr>(),
-         e = FDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
-
-    const FormatAttr *Format = *i;
+  for (const auto *Format : FDecl->specific_attrs<FormatAttr>()) {
     ArgNum = Format->getFormatIdx() - 1;
     if ((Format->getType()->getName() == "printf") &&
          CE->getNumArgs() > ArgNum)
diff --git a/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp b/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
deleted file mode 100644
index 4997f8d..0000000
--- a/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
+++ /dev/null
@@ -1,737 +0,0 @@
-//==- IdempotentOperationChecker.cpp - Idempotent Operations ----*- C++ -*-==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a set of path-sensitive checks for idempotent and/or
-// tautological operations. Each potential operation is checked along all paths
-// to see if every path results in a pointless operation.
-//                 +-------------------------------------------+
-//                 |Table of idempotent/tautological operations|
-//                 +-------------------------------------------+
-//+--------------------------------------------------------------------------+
-//|Operator | x op x | x op 1 | 1 op x | x op 0 | 0 op x | x op ~0 | ~0 op x |
-//+--------------------------------------------------------------------------+
-//  +, +=   |        |        |        |   x    |   x    |         |
-//  -, -=   |        |        |        |   x    |   -x   |         |
-//  *, *=   |        |   x    |   x    |   0    |   0    |         |
-//  /, /=   |   1    |   x    |        |  N/A   |   0    |         |
-//  &, &=   |   x    |        |        |   0    |   0    |   x     |    x
-//  |, |=   |   x    |        |        |   x    |   x    |   ~0    |    ~0
-//  ^, ^=   |   0    |        |        |   x    |   x    |         |
-//  <<, <<= |        |        |        |   x    |   0    |         |
-//  >>, >>= |        |        |        |   x    |   0    |         |
-//  ||      |   x    |   1    |   1    |   x    |   x    |   1     |    1
-//  &&      |   x    |   x    |   x    |   0    |   0    |   x     |    x
-//  =       |   x    |        |        |        |        |         |
-//  ==      |   1    |        |        |        |        |         |
-//  >=      |   1    |        |        |        |        |         |
-//  <=      |   1    |        |        |        |        |         |
-//  >       |   0    |        |        |        |        |         |
-//  <       |   0    |        |        |        |        |         |
-//  !=      |   0    |        |        |        |        |         |
-//===----------------------------------------------------------------------===//
-//
-// Things TODO:
-// - Improved error messages
-// - Handle mixed assumptions (which assumptions can belong together?)
-// - Finer grained false positive control (levels)
-// - Handling ~0 values
-
-#include "ClangSACheckers.h"
-#include "clang/AST/Stmt.h"
-#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
-#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
-#include "clang/Analysis/CFGStmtMap.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace clang;
-using namespace ento;
-
-namespace {
-class IdempotentOperationChecker
-  : public Checker<check::PreStmt<BinaryOperator>,
-                     check::PostStmt<BinaryOperator>,
-                     check::EndAnalysis> {
-public:
-  void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
-  void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
-  void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const;
-
-private:
-  // Our assumption about a particular operation.
-  enum Assumption { Possible = 0, Impossible, Equal, LHSis1, RHSis1, LHSis0,
-      RHSis0 };
-
-  static void UpdateAssumption(Assumption &A, const Assumption &New);
-
-  // False positive reduction methods
-  static bool isSelfAssign(const Expr *LHS, const Expr *RHS);
-  static bool isUnused(const Expr *E, AnalysisDeclContext *AC);
-  static bool isTruncationExtensionAssignment(const Expr *LHS,
-                                              const Expr *RHS);
-  static bool pathWasCompletelyAnalyzed(AnalysisDeclContext *AC,
-                                        const CFGBlock *CB,
-                                        const CoreEngine &CE);
-  static bool CanVary(const Expr *Ex,
-                      AnalysisDeclContext *AC);
-  static bool isConstantOrPseudoConstant(const DeclRefExpr *DR,
-                                         AnalysisDeclContext *AC);
-  static bool containsNonLocalVarDecl(const Stmt *S);
-
-  // Hash table and related data structures
-  struct BinaryOperatorData {
-    BinaryOperatorData() : assumption(Possible) {}
-
-    Assumption assumption;
-    ExplodedNodeSet explodedNodes; // Set of ExplodedNodes that refer to a
-                                   // BinaryOperator
-  };
-  typedef llvm::DenseMap<const BinaryOperator *, BinaryOperatorData>
-      AssumptionMap;
-  mutable AssumptionMap hash;
-  mutable OwningPtr<BugType> BT;
-};
-}
-
-void IdempotentOperationChecker::checkPreStmt(const BinaryOperator *B,
-                                              CheckerContext &C) const {
-  // Find or create an entry in the hash for this BinaryOperator instance.
-  // If we haven't done a lookup before, it will get default initialized to
-  // 'Possible'. At this stage we do not store the ExplodedNode, as it has not
-  // been created yet.
-  BinaryOperatorData &Data = hash[B];
-  Assumption &A = Data.assumption;
-  AnalysisDeclContext *AC = C.getCurrentAnalysisDeclContext();
-
-  // If we already have visited this node on a path that does not contain an
-  // idempotent operation, return immediately.
-  if (A == Impossible)
-    return;
-
-  // Retrieve both sides of the operator and determine if they can vary (which
-  // may mean this is a false positive.
-  const Expr *LHS = B->getLHS();
-  const Expr *RHS = B->getRHS();
-
-  // At this stage we can calculate whether each side contains a false positive
-  // that applies to all operators. We only need to calculate this the first
-  // time.
-  bool LHSContainsFalsePositive = false, RHSContainsFalsePositive = false;
-  if (A == Possible) {
-    // An expression contains a false positive if it can't vary, or if it
-    // contains a known false positive VarDecl.
-    LHSContainsFalsePositive = !CanVary(LHS, AC)
-        || containsNonLocalVarDecl(LHS);
-    RHSContainsFalsePositive = !CanVary(RHS, AC)
-        || containsNonLocalVarDecl(RHS);
-  }
-
-  ProgramStateRef state = C.getState();
-  const LocationContext *LCtx = C.getLocationContext();
-  SVal LHSVal = state->getSVal(LHS, LCtx);
-  SVal RHSVal = state->getSVal(RHS, LCtx);
-
-  // If either value is unknown, we can't be 100% sure of all paths.
-  if (LHSVal.isUnknownOrUndef() || RHSVal.isUnknownOrUndef()) {
-    A = Impossible;
-    return;
-  }
-  BinaryOperator::Opcode Op = B->getOpcode();
-
-  // Dereference the LHS SVal if this is an assign operation
-  switch (Op) {
-  default:
-    break;
-
-  // Fall through intentional
-  case BO_AddAssign:
-  case BO_SubAssign:
-  case BO_MulAssign:
-  case BO_DivAssign:
-  case BO_AndAssign:
-  case BO_OrAssign:
-  case BO_XorAssign:
-  case BO_ShlAssign:
-  case BO_ShrAssign:
-  case BO_Assign:
-  // Assign statements have one extra level of indirection
-    if (!LHSVal.getAs<Loc>()) {
-      A = Impossible;
-      return;
-    }
-    LHSVal = state->getSVal(LHSVal.castAs<Loc>(), LHS->getType());
-  }
-
-
-  // We now check for various cases which result in an idempotent operation.
-
-  // x op x
-  switch (Op) {
-  default:
-    break; // We don't care about any other operators.
-
-  // Fall through intentional
-  case BO_Assign:
-    // x Assign x can be used to silence unused variable warnings intentionally.
-    // If this is a self assignment and the variable is referenced elsewhere,
-    // and the assignment is not a truncation or extension, then it is a false
-    // positive.
-    if (isSelfAssign(LHS, RHS)) {
-      if (!isUnused(LHS, AC) && !isTruncationExtensionAssignment(LHS, RHS)) {
-        UpdateAssumption(A, Equal);
-        return;
-      }
-      else {
-        A = Impossible;
-        return;
-      }
-    }
-
-  case BO_SubAssign:
-  case BO_DivAssign:
-  case BO_AndAssign:
-  case BO_OrAssign:
-  case BO_XorAssign:
-  case BO_Sub:
-  case BO_Div:
-  case BO_And:
-  case BO_Or:
-  case BO_Xor:
-  case BO_LOr:
-  case BO_LAnd:
-  case BO_EQ:
-  case BO_NE:
-    if (LHSVal != RHSVal || LHSContainsFalsePositive
-        || RHSContainsFalsePositive)
-      break;
-    UpdateAssumption(A, Equal);
-    return;
-  }
-
-  // x op 1
-  switch (Op) {
-   default:
-     break; // We don't care about any other operators.
-
-   // Fall through intentional
-   case BO_MulAssign:
-   case BO_DivAssign:
-   case BO_Mul:
-   case BO_Div:
-   case BO_LOr:
-   case BO_LAnd:
-     if (!RHSVal.isConstant(1) || RHSContainsFalsePositive)
-       break;
-     UpdateAssumption(A, RHSis1);
-     return;
-  }
-
-  // 1 op x
-  switch (Op) {
-  default:
-    break; // We don't care about any other operators.
-
-  // Fall through intentional
-  case BO_MulAssign:
-  case BO_Mul:
-  case BO_LOr:
-  case BO_LAnd:
-    if (!LHSVal.isConstant(1) || LHSContainsFalsePositive)
-      break;
-    UpdateAssumption(A, LHSis1);
-    return;
-  }
-
-  // x op 0
-  switch (Op) {
-  default:
-    break; // We don't care about any other operators.
-
-  // Fall through intentional
-  case BO_AddAssign:
-  case BO_SubAssign:
-  case BO_MulAssign:
-  case BO_AndAssign:
-  case BO_OrAssign:
-  case BO_XorAssign:
-  case BO_Add:
-  case BO_Sub:
-  case BO_Mul:
-  case BO_And:
-  case BO_Or:
-  case BO_Xor:
-  case BO_Shl:
-  case BO_Shr:
-  case BO_LOr:
-  case BO_LAnd:
-    if (!RHSVal.isConstant(0) || RHSContainsFalsePositive)
-      break;
-    UpdateAssumption(A, RHSis0);
-    return;
-  }
-
-  // 0 op x
-  switch (Op) {
-  default:
-    break; // We don't care about any other operators.
-
-  // Fall through intentional
-  //case BO_AddAssign: // Common false positive
-  case BO_SubAssign: // Check only if unsigned
-  case BO_MulAssign:
-  case BO_DivAssign:
-  case BO_AndAssign:
-  //case BO_OrAssign: // Common false positive
-  //case BO_XorAssign: // Common false positive
-  case BO_ShlAssign:
-  case BO_ShrAssign:
-  case BO_Add:
-  case BO_Sub:
-  case BO_Mul:
-  case BO_Div:
-  case BO_And:
-  case BO_Or:
-  case BO_Xor:
-  case BO_Shl:
-  case BO_Shr:
-  case BO_LOr:
-  case BO_LAnd:
-    if (!LHSVal.isConstant(0) || LHSContainsFalsePositive)
-      break;
-    UpdateAssumption(A, LHSis0);
-    return;
-  }
-
-  // If we get to this point, there has been a valid use of this operation.
-  A = Impossible;
-}
-
-// At the post visit stage, the predecessor ExplodedNode will be the
-// BinaryOperator that was just created. We use this hook to collect the
-// ExplodedNode.
-void IdempotentOperationChecker::checkPostStmt(const BinaryOperator *B,
-                                               CheckerContext &C) const {
-  // Add the ExplodedNode we just visited
-  BinaryOperatorData &Data = hash[B];
-
-  const Stmt *predStmt =
-      C.getPredecessor()->getLocation().castAs<StmtPoint>().getStmt();
-
-  // Ignore implicit calls to setters.
-  if (!isa<BinaryOperator>(predStmt))
-    return;
-
-  Data.explodedNodes.Add(C.getPredecessor());
-}
-
-void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
-                                                  BugReporter &BR,
-                                                  ExprEngine &Eng) const {
-  if (!BT)
-    BT.reset(new BugType("Idempotent operation", "Dead code"));
-
-  // Iterate over the hash to see if we have any paths with definite
-  // idempotent operations.
-  for (AssumptionMap::const_iterator i = hash.begin(); i != hash.end(); ++i) {
-    // Unpack the hash contents
-    const BinaryOperatorData &Data = i->second;
-    const Assumption &A = Data.assumption;
-    const ExplodedNodeSet &ES = Data.explodedNodes;
-
-    // If there are no nodes accosted with the expression, nothing to report.
-    // FIXME: This is possible because the checker does part of processing in
-    // checkPreStmt and part in checkPostStmt.
-    if (ES.begin() == ES.end())
-      continue;
-
-    const BinaryOperator *B = i->first;
-
-    if (A == Impossible)
-      continue;
-
-    // If the analyzer did not finish, check to see if we can still emit this
-    // warning
-    if (Eng.hasWorkRemaining()) {
-      // If we can trace back
-      AnalysisDeclContext *AC = (*ES.begin())->getLocationContext()
-                                         ->getAnalysisDeclContext();
-      if (!pathWasCompletelyAnalyzed(AC,
-                                     AC->getCFGStmtMap()->getBlock(B),
-                                     Eng.getCoreEngine()))
-        continue;
-    }
-
-    // Select the error message and SourceRanges to report.
-    SmallString<128> buf;
-    llvm::raw_svector_ostream os(buf);
-    bool LHSRelevant = false, RHSRelevant = false;
-    switch (A) {
-    case Equal:
-      LHSRelevant = true;
-      RHSRelevant = true;
-      if (B->getOpcode() == BO_Assign)
-        os << "Assigned value is always the same as the existing value";
-      else
-        os << "Both operands to '" << B->getOpcodeStr()
-           << "' always have the same value";
-      break;
-    case LHSis1:
-      LHSRelevant = true;
-      os << "The left operand to '" << B->getOpcodeStr() << "' is always 1";
-      break;
-    case RHSis1:
-      RHSRelevant = true;
-      os << "The right operand to '" << B->getOpcodeStr() << "' is always 1";
-      break;
-    case LHSis0:
-      LHSRelevant = true;
-      os << "The left operand to '" << B->getOpcodeStr() << "' is always 0";
-      break;
-    case RHSis0:
-      RHSRelevant = true;
-      os << "The right operand to '" << B->getOpcodeStr() << "' is always 0";
-      break;
-    case Possible:
-      llvm_unreachable("Operation was never marked with an assumption");
-    case Impossible:
-      llvm_unreachable(0);
-    }
-
-    // Add a report for each ExplodedNode
-    for (ExplodedNodeSet::iterator I = ES.begin(), E = ES.end(); I != E; ++I) {
-      BugReport *report = new BugReport(*BT, os.str(), *I);
-
-      // Add source ranges and visitor hooks
-      if (LHSRelevant) {
-        const Expr *LHS = i->first->getLHS();
-        report->addRange(LHS->getSourceRange());
-        FindLastStoreBRVisitor::registerStatementVarDecls(*report, LHS, false);
-      }
-      if (RHSRelevant) {
-        const Expr *RHS = i->first->getRHS();
-        report->addRange(i->first->getRHS()->getSourceRange());
-        FindLastStoreBRVisitor::registerStatementVarDecls(*report, RHS, false);
-      }
-
-      BR.emitReport(report);
-    }
-  }
-
-  hash.clear();
-}
-
-// Updates the current assumption given the new assumption
-inline void IdempotentOperationChecker::UpdateAssumption(Assumption &A,
-                                                        const Assumption &New) {
-// If the assumption is the same, there is nothing to do
-  if (A == New)
-    return;
-
-  switch (A) {
-  // If we don't currently have an assumption, set it
-  case Possible:
-    A = New;
-    return;
-
-  // If we have determined that a valid state happened, ignore the new
-  // assumption.
-  case Impossible:
-    return;
-
-  // Any other case means that we had a different assumption last time. We don't
-  // currently support mixing assumptions for diagnostic reasons, so we set
-  // our assumption to be impossible.
-  default:
-    A = Impossible;
-    return;
-  }
-}
-
-// Check for a statement where a variable is self assigned to possibly avoid an
-// unused variable warning.
-bool IdempotentOperationChecker::isSelfAssign(const Expr *LHS, const Expr *RHS) {
-  LHS = LHS->IgnoreParenCasts();
-  RHS = RHS->IgnoreParenCasts();
-
-  const DeclRefExpr *LHS_DR = dyn_cast<DeclRefExpr>(LHS);
-  if (!LHS_DR)
-    return false;
-
-  const VarDecl *VD = dyn_cast<VarDecl>(LHS_DR->getDecl());
-  if (!VD)
-    return false;
-
-  const DeclRefExpr *RHS_DR = dyn_cast<DeclRefExpr>(RHS);
-  if (!RHS_DR)
-    return false;
-
-  if (VD != RHS_DR->getDecl())
-    return false;
-
-  return true;
-}
-
-// Returns true if the Expr points to a VarDecl that is not read anywhere
-// outside of self-assignments.
-bool IdempotentOperationChecker::isUnused(const Expr *E,
-                                          AnalysisDeclContext *AC) {
-  if (!E)
-    return false;
-
-  const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
-  if (!DR)
-    return false;
-
-  const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
-  if (!VD)
-    return false;
-
-  if (AC->getPseudoConstantAnalysis()->wasReferenced(VD))
-    return false;
-
-  return true;
-}
-
-// Check for self casts truncating/extending a variable
-bool IdempotentOperationChecker::isTruncationExtensionAssignment(
-                                                              const Expr *LHS,
-                                                              const Expr *RHS) {
-
-  const DeclRefExpr *LHS_DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParenCasts());
-  if (!LHS_DR)
-    return false;
-
-  const VarDecl *VD = dyn_cast<VarDecl>(LHS_DR->getDecl());
-  if (!VD)
-    return false;
-
-  const DeclRefExpr *RHS_DR = dyn_cast<DeclRefExpr>(RHS->IgnoreParenCasts());
-  if (!RHS_DR)
-    return false;
-
-  if (VD != RHS_DR->getDecl())
-     return false;
-
-  return dyn_cast<DeclRefExpr>(RHS->IgnoreParenLValueCasts()) == NULL;
-}
-
-// Returns false if a path to this block was not completely analyzed, or true
-// otherwise.
-bool
-IdempotentOperationChecker::pathWasCompletelyAnalyzed(AnalysisDeclContext *AC,
-                                                      const CFGBlock *CB,
-                                                      const CoreEngine &CE) {
-
-  CFGReverseBlockReachabilityAnalysis *CRA = AC->getCFGReachablityAnalysis();
-  
-  // Test for reachability from any aborted blocks to this block
-  typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
-  for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
-      E = CE.blocks_exhausted_end(); I != E; ++I) {
-    const BlockEdge &BE =  I->first;
-
-    // The destination block on the BlockEdge is the first block that was not
-    // analyzed. If we can reach this block from the aborted block, then this
-    // block was not completely analyzed.
-    //
-    // Also explicitly check if the current block is the destination block.
-    // While technically reachable, it means we aborted the analysis on
-    // a path that included that block.
-    const CFGBlock *destBlock = BE.getDst();
-    if (destBlock == CB || CRA->isReachable(destBlock, CB))
-      return false;
-  }
-
-  // Test for reachability from blocks we just gave up on.
-  typedef CoreEngine::BlocksAborted::const_iterator AbortedIterator;
-  for (AbortedIterator I = CE.blocks_aborted_begin(),
-       E = CE.blocks_aborted_end(); I != E; ++I) {
-    const CFGBlock *destBlock = I->first;
-    if (destBlock == CB || CRA->isReachable(destBlock, CB))
-      return false;
-  }
-  
-  // For the items still on the worklist, see if they are in blocks that
-  // can eventually reach 'CB'.
-  class VisitWL : public WorkList::Visitor {
-    const CFGStmtMap *CBM;
-    const CFGBlock *TargetBlock;
-    CFGReverseBlockReachabilityAnalysis &CRA;
-  public:
-    VisitWL(const CFGStmtMap *cbm, const CFGBlock *targetBlock,
-            CFGReverseBlockReachabilityAnalysis &cra)
-      : CBM(cbm), TargetBlock(targetBlock), CRA(cra) {}
-    virtual bool visit(const WorkListUnit &U) {
-      ProgramPoint P = U.getNode()->getLocation();
-      const CFGBlock *B = 0;
-      if (Optional<StmtPoint> SP = P.getAs<StmtPoint>()) {
-        B = CBM->getBlock(SP->getStmt());
-      } else if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
-        B = BE->getDst();
-      } else if (Optional<BlockEntrance> BEnt = P.getAs<BlockEntrance>()) {
-        B = BEnt->getBlock();
-      } else if (Optional<BlockExit> BExit = P.getAs<BlockExit>()) {
-        B = BExit->getBlock();
-      }
-      if (!B)
-        return true;
-      
-      return B == TargetBlock || CRA.isReachable(B, TargetBlock);
-    }
-  };
-  VisitWL visitWL(AC->getCFGStmtMap(), CB, *CRA);
-  // Were there any items in the worklist that could potentially reach
-  // this block?
-  if (CE.getWorkList()->visitItemsInWorkList(visitWL))
-    return false;
-
-  // Verify that this block is reachable from the entry block
-  if (!CRA->isReachable(&AC->getCFG()->getEntry(), CB))
-    return false;
-
-  // If we get to this point, there is no connection to the entry block or an
-  // aborted block. This path is unreachable and we can report the error.
-  return true;
-}
-
-// Recursive function that determines whether an expression contains any element
-// that varies. This could be due to a compile-time constant like sizeof. An
-// expression may also involve a variable that behaves like a constant. The
-// function returns true if the expression varies, and false otherwise.
-bool IdempotentOperationChecker::CanVary(const Expr *Ex,
-                                         AnalysisDeclContext *AC) {
-  // Parentheses and casts are irrelevant here
-  Ex = Ex->IgnoreParenCasts();
-
-  if (Ex->getLocStart().isMacroID())
-    return false;
-
-  switch (Ex->getStmtClass()) {
-  // Trivially true cases
-  case Stmt::ArraySubscriptExprClass:
-  case Stmt::MemberExprClass:
-  case Stmt::StmtExprClass:
-  case Stmt::CallExprClass:
-  case Stmt::VAArgExprClass:
-  case Stmt::ShuffleVectorExprClass:
-    return true;
-  default:
-    return true;
-
-  // Trivially false cases
-  case Stmt::IntegerLiteralClass:
-  case Stmt::CharacterLiteralClass:
-  case Stmt::FloatingLiteralClass:
-  case Stmt::PredefinedExprClass:
-  case Stmt::ImaginaryLiteralClass:
-  case Stmt::StringLiteralClass:
-  case Stmt::OffsetOfExprClass:
-  case Stmt::CompoundLiteralExprClass:
-  case Stmt::AddrLabelExprClass:
-  case Stmt::BinaryTypeTraitExprClass:
-  case Stmt::GNUNullExprClass:
-  case Stmt::InitListExprClass:
-  case Stmt::DesignatedInitExprClass:
-  case Stmt::BlockExprClass:
-    return false;
-
-  // Cases requiring custom logic
-  case Stmt::UnaryExprOrTypeTraitExprClass: {
-    const UnaryExprOrTypeTraitExpr *SE = 
-                       cast<const UnaryExprOrTypeTraitExpr>(Ex);
-    if (SE->getKind() != UETT_SizeOf)
-      return false;
-    return SE->getTypeOfArgument()->isVariableArrayType();
-  }
-  case Stmt::DeclRefExprClass:
-    // Check for constants/pseudoconstants
-    return !isConstantOrPseudoConstant(cast<DeclRefExpr>(Ex), AC);
-
-  // The next cases require recursion for subexpressions
-  case Stmt::BinaryOperatorClass: {
-    const BinaryOperator *B = cast<const BinaryOperator>(Ex);
-
-    // Exclude cases involving pointer arithmetic.  These are usually
-    // false positives.
-    if (B->getOpcode() == BO_Sub || B->getOpcode() == BO_Add)
-      if (B->getLHS()->getType()->getAs<PointerType>())
-        return false;
-
-    return CanVary(B->getRHS(), AC)
-        || CanVary(B->getLHS(), AC);
-   }
-  case Stmt::UnaryOperatorClass:
-    return CanVary(cast<UnaryOperator>(Ex)->getSubExpr(), AC);
-  case Stmt::ConditionalOperatorClass:
-  case Stmt::BinaryConditionalOperatorClass:
-    return CanVary(cast<AbstractConditionalOperator>(Ex)->getCond(), AC);
-  }
-}
-
-// Returns true if a DeclRefExpr is or behaves like a constant.
-bool IdempotentOperationChecker::isConstantOrPseudoConstant(
-                                                          const DeclRefExpr *DR,
-                                                          AnalysisDeclContext *AC) {
-  // Check if the type of the Decl is const-qualified
-  if (DR->getType().isConstQualified())
-    return true;
-
-  // Check for an enum
-  if (isa<EnumConstantDecl>(DR->getDecl()))
-    return true;
-
-  const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
-  if (!VD)
-    return true;
-
-  // Check if the Decl behaves like a constant. This check also takes care of
-  // static variables, which can only change between function calls if they are
-  // modified in the AST.
-  PseudoConstantAnalysis *PCA = AC->getPseudoConstantAnalysis();
-  if (PCA->isPseudoConstant(VD))
-    return true;
-
-  return false;
-}
-
-// Recursively find any substatements containing VarDecl's with storage other
-// than local
-bool IdempotentOperationChecker::containsNonLocalVarDecl(const Stmt *S) {
-  const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
-
-  if (DR)
-    if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
-      if (!VD->hasLocalStorage())
-        return true;
-
-  for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
-      ++I)
-    if (const Stmt *child = *I)
-      if (containsNonLocalVarDecl(child))
-        return true;
-
-  return false;
-}
-
-
-void ento::registerIdempotentOperationChecker(CheckerManager &mgr) {
-  mgr.registerChecker<IdempotentOperationChecker>();
-}
diff --git a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index e696e38..d5c52b4 100644
--- a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -11,22 +11,23 @@
 /// \brief This defines IdenticalExprChecker, a check that warns about
 /// unintended use of identical expressions.
 ///
-/// It checks for use of identical expressions with comparison operators.
+/// It checks for use of identical expressions with comparison operators and
+/// inside conditional expressions.
 ///
 //===----------------------------------------------------------------------===//
 
 #include "ClangSACheckers.h"
+#include "clang/AST/RecursiveASTVisitor.h"
 #include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
 #include "clang/StaticAnalyzer/Core/Checker.h"
 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
 #include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/AST/RecursiveASTVisitor.h"
 
 using namespace clang;
 using namespace ento;
 
-static bool isIdenticalExpr(const ASTContext &Ctx, const Expr *Expr1,
-                            const Expr *Expr2);
+static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
+                            const Stmt *Stmt2, bool IgnoreSideEffects = false);
 //===----------------------------------------------------------------------===//
 // FindIdenticalExprVisitor - Identify nodes using identical expressions.
 //===----------------------------------------------------------------------===//
@@ -34,23 +35,153 @@
 namespace {
 class FindIdenticalExprVisitor
     : public RecursiveASTVisitor<FindIdenticalExprVisitor> {
+  BugReporter &BR;
+  const CheckerBase *Checker;
+  AnalysisDeclContext *AC;
 public:
-  explicit FindIdenticalExprVisitor(BugReporter &B, AnalysisDeclContext *A)
-      : BR(B), AC(A) {}
+  explicit FindIdenticalExprVisitor(BugReporter &B,
+                                    const CheckerBase *Checker,
+                                    AnalysisDeclContext *A)
+      : BR(B), Checker(Checker), AC(A) {}
   // FindIdenticalExprVisitor only visits nodes
-  // that are binary operators.
+  // that are binary operators, if statements or
+  // conditional operators.
   bool VisitBinaryOperator(const BinaryOperator *B);
+  bool VisitIfStmt(const IfStmt *I);
+  bool VisitConditionalOperator(const ConditionalOperator *C);
 
 private:
-  BugReporter &BR;
-  AnalysisDeclContext *AC;
+  void reportIdenticalExpr(const BinaryOperator *B, bool CheckBitwise,
+                           ArrayRef<SourceRange> Sr);
+  void checkBitwiseOrLogicalOp(const BinaryOperator *B, bool CheckBitwise);
+  void checkComparisonOp(const BinaryOperator *B);
 };
 } // end anonymous namespace
 
+void FindIdenticalExprVisitor::reportIdenticalExpr(const BinaryOperator *B,
+                                                   bool CheckBitwise,
+                                                   ArrayRef<SourceRange> Sr) {
+  StringRef Message;
+  if (CheckBitwise)
+    Message = "identical expressions on both sides of bitwise operator";
+  else
+    Message = "identical expressions on both sides of logical operator";
+
+  PathDiagnosticLocation ELoc =
+      PathDiagnosticLocation::createOperatorLoc(B, BR.getSourceManager());
+  BR.EmitBasicReport(AC->getDecl(), Checker,
+                     "Use of identical expressions",
+                     categories::LogicError,
+                     Message, ELoc, Sr);
+}
+
+void FindIdenticalExprVisitor::checkBitwiseOrLogicalOp(const BinaryOperator *B,
+                                                       bool CheckBitwise) {
+  SourceRange Sr[2];
+
+  const Expr *LHS = B->getLHS();
+  const Expr *RHS = B->getRHS();
+
+  // Split operators as long as we still have operators to split on. We will
+  // get called for every binary operator in an expression so there is no need
+  // to check every one against each other here, just the right most one with
+  // the others.
+  while (const BinaryOperator *B2 = dyn_cast<BinaryOperator>(LHS)) {
+    if (B->getOpcode() != B2->getOpcode())
+      break;
+    if (isIdenticalStmt(AC->getASTContext(), RHS, B2->getRHS())) {
+      Sr[0] = RHS->getSourceRange();
+      Sr[1] = B2->getRHS()->getSourceRange();
+      reportIdenticalExpr(B, CheckBitwise, Sr);
+    }
+    LHS = B2->getLHS();
+  }
+  
+  if (isIdenticalStmt(AC->getASTContext(), RHS, LHS)) {
+    Sr[0] = RHS->getSourceRange();
+    Sr[1] = LHS->getSourceRange();
+    reportIdenticalExpr(B, CheckBitwise, Sr);
+  }
+}
+
+bool FindIdenticalExprVisitor::VisitIfStmt(const IfStmt *I) {
+  const Stmt *Stmt1 = I->getThen();
+  const Stmt *Stmt2 = I->getElse();
+
+  // Check for identical conditions:
+  //
+  // if (b) {
+  //   foo1();
+  // } else if (b) {
+  //   foo2();
+  // }
+  if (Stmt1 && Stmt2) {
+    const Expr *Cond1 = I->getCond();
+    const Stmt *Else = Stmt2;
+    while (const IfStmt *I2 = dyn_cast_or_null<IfStmt>(Else)) {
+      const Expr *Cond2 = I2->getCond();
+      if (isIdenticalStmt(AC->getASTContext(), Cond1, Cond2, false)) {
+        SourceRange Sr = Cond1->getSourceRange();
+        PathDiagnosticLocation ELoc(Cond2, BR.getSourceManager(), AC);
+        BR.EmitBasicReport(AC->getDecl(), Checker, "Identical conditions",
+                           categories::LogicError,
+                           "expression is identical to previous condition",
+                           ELoc, Sr);
+      }
+      Else = I2->getElse();
+    }
+  }
+
+  if (!Stmt1 || !Stmt2)
+    return true;
+
+  // Special handling for code like:
+  //
+  // if (b) {
+  //   i = 1;
+  // } else
+  //   i = 1;
+  if (const CompoundStmt *CompStmt = dyn_cast<CompoundStmt>(Stmt1)) {
+    if (CompStmt->size() == 1)
+      Stmt1 = CompStmt->body_back();
+  }
+  if (const CompoundStmt *CompStmt = dyn_cast<CompoundStmt>(Stmt2)) {
+    if (CompStmt->size() == 1)
+      Stmt2 = CompStmt->body_back();
+  }
+
+  if (isIdenticalStmt(AC->getASTContext(), Stmt1, Stmt2, true)) {
+      PathDiagnosticLocation ELoc =
+          PathDiagnosticLocation::createBegin(I, BR.getSourceManager(), AC);
+      BR.EmitBasicReport(AC->getDecl(), Checker,
+                         "Identical branches",
+                         categories::LogicError,
+                         "true and false branches are identical", ELoc);
+  }
+  return true;
+}
+
 bool FindIdenticalExprVisitor::VisitBinaryOperator(const BinaryOperator *B) {
   BinaryOperator::Opcode Op = B->getOpcode();
-  if (!BinaryOperator::isComparisonOp(Op))
-    return true;
+
+  if (BinaryOperator::isBitwiseOp(Op))
+    checkBitwiseOrLogicalOp(B, true);
+
+  if (BinaryOperator::isLogicalOp(Op))
+    checkBitwiseOrLogicalOp(B, false);
+
+  if (BinaryOperator::isComparisonOp(Op))
+    checkComparisonOp(B);
+
+  // We want to visit ALL nodes (subexpressions of binary comparison
+  // expressions too) that contains comparison operators.
+  // True is always returned to traverse ALL nodes.
+  return true;
+}
+
+void FindIdenticalExprVisitor::checkComparisonOp(const BinaryOperator *B) {
+  BinaryOperator::Opcode Op = B->getOpcode();
+
   //
   // Special case for floating-point representation.
   //
@@ -83,26 +214,26 @@
         (DeclRef2->getType()->hasFloatingRepresentation())) {
       if (DeclRef1->getDecl() == DeclRef2->getDecl()) {
         if ((Op == BO_EQ) || (Op == BO_NE)) {
-          return true;
+          return;
         }
       }
     }
   } else if ((FloatLit1) && (FloatLit2)) {
     if (FloatLit1->getValue().bitwiseIsEqual(FloatLit2->getValue())) {
       if ((Op == BO_EQ) || (Op == BO_NE)) {
-        return true;
+        return;
       }
     }
   } else if (LHS->getType()->hasFloatingRepresentation()) {
     // If any side of comparison operator still has floating-point
     // representation, then it's an expression. Don't warn.
     // Here only LHS is checked since RHS will be implicit casted to float.
-    return true;
+    return;
   } else {
     // No special case with floating-point representation, report as usual.
   }
 
-  if (isIdenticalExpr(AC->getASTContext(), B->getLHS(), B->getRHS())) {
+  if (isIdenticalStmt(AC->getASTContext(), B->getLHS(), B->getRHS())) {
     PathDiagnosticLocation ELoc =
         PathDiagnosticLocation::createOperatorLoc(B, BR.getSourceManager());
     StringRef Message;
@@ -110,15 +241,41 @@
       Message = "comparison of identical expressions always evaluates to true";
     else
       Message = "comparison of identical expressions always evaluates to false";
-    BR.EmitBasicReport(AC->getDecl(), "Compare of identical expressions",
+    BR.EmitBasicReport(AC->getDecl(), Checker,
+                       "Compare of identical expressions",
                        categories::LogicError, Message, ELoc);
   }
-  // We want to visit ALL nodes (subexpressions of binary comparison
-  // expressions too) that contains comparison operators.
-  // True is always returned to traverse ALL nodes.
+}
+
+bool FindIdenticalExprVisitor::VisitConditionalOperator(
+    const ConditionalOperator *C) {
+
+  // Check if expressions in conditional expression are identical
+  // from a symbolic point of view.
+
+  if (isIdenticalStmt(AC->getASTContext(), C->getTrueExpr(),
+                      C->getFalseExpr(), true)) {
+    PathDiagnosticLocation ELoc =
+        PathDiagnosticLocation::createConditionalColonLoc(
+            C, BR.getSourceManager());
+
+    SourceRange Sr[2];
+    Sr[0] = C->getTrueExpr()->getSourceRange();
+    Sr[1] = C->getFalseExpr()->getSourceRange();
+    BR.EmitBasicReport(
+        AC->getDecl(), Checker,
+        "Identical expressions in conditional expression",
+        categories::LogicError,
+        "identical expressions on both sides of ':' in conditional expression",
+        ELoc, Sr);
+  }
+  // We want to visit ALL nodes (expressions in conditional
+  // expressions too) that contains conditional operators,
+  // thus always return true to traverse ALL nodes.
   return true;
 }
-/// \brief Determines whether two expression trees are identical regarding
+
+/// \brief Determines whether two statement trees are identical regarding
 /// operators and symbols.
 ///
 /// Exceptions: expressions containing macros or functions with possible side
@@ -126,82 +283,189 @@
 /// Limitations: (t + u) and (u + t) are not considered identical.
 /// t*(u + t) and t*u + t*t are not considered identical.
 ///
-static bool isIdenticalExpr(const ASTContext &Ctx, const Expr *Expr1,
-                            const Expr *Expr2) {
-  // If Expr1 & Expr2 are of different class then they are not
-  // identical expression.
-  if (Expr1->getStmtClass() != Expr2->getStmtClass())
+static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
+                            const Stmt *Stmt2, bool IgnoreSideEffects) {
+
+  if (!Stmt1 || !Stmt2) {
+    if (!Stmt1 && !Stmt2)
+      return true;
     return false;
-  // If Expr1 has side effects then don't warn even if expressions
-  // are identical.
-  if (Expr1->HasSideEffects(Ctx))
-    return false;
-  // Is expression is based on macro then don't warn even if
-  // the expressions are identical.
-  if ((Expr1->getExprLoc().isMacroID()) || (Expr2->getExprLoc().isMacroID()))
-    return false;
-  // If all children of two expressions are identical, return true.
-  Expr::const_child_iterator I1 = Expr1->child_begin();
-  Expr::const_child_iterator I2 = Expr2->child_begin();
-  while (I1 != Expr1->child_end() && I2 != Expr2->child_end()) {
-    const Expr *Child1 = dyn_cast<Expr>(*I1);
-    const Expr *Child2 = dyn_cast<Expr>(*I2);
-    if (!Child1 || !Child2 || !isIdenticalExpr(Ctx, Child1, Child2))
-      return false;
-    ++I1;
-    ++I2;
   }
-  // If there are different number of children in the expressions, return false.
-  // (TODO: check if this is a redundant condition.)
-  if (I1 != Expr1->child_end())
-    return false;
-  if (I2 != Expr2->child_end())
+
+  // If Stmt1 & Stmt2 are of different class then they are not
+  // identical statements.
+  if (Stmt1->getStmtClass() != Stmt2->getStmtClass())
     return false;
 
-  switch (Expr1->getStmtClass()) {
+  const Expr *Expr1 = dyn_cast<Expr>(Stmt1);
+  const Expr *Expr2 = dyn_cast<Expr>(Stmt2);
+
+  if (Expr1 && Expr2) {
+    // If Stmt1 has side effects then don't warn even if expressions
+    // are identical.
+    if (!IgnoreSideEffects && Expr1->HasSideEffects(Ctx))
+      return false;
+    // If either expression comes from a macro then don't warn even if
+    // the expressions are identical.
+    if ((Expr1->getExprLoc().isMacroID()) || (Expr2->getExprLoc().isMacroID()))
+      return false;
+
+    // If all children of two expressions are identical, return true.
+    Expr::const_child_iterator I1 = Expr1->child_begin();
+    Expr::const_child_iterator I2 = Expr2->child_begin();
+    while (I1 != Expr1->child_end() && I2 != Expr2->child_end()) {
+      if (!*I1 || !*I2 || !isIdenticalStmt(Ctx, *I1, *I2, IgnoreSideEffects))
+        return false;
+      ++I1;
+      ++I2;
+    }
+    // If there are different number of children in the statements, return
+    // false.
+    if (I1 != Expr1->child_end())
+      return false;
+    if (I2 != Expr2->child_end())
+      return false;
+  }
+
+  switch (Stmt1->getStmtClass()) {
   default:
     return false;
+  case Stmt::CallExprClass:
   case Stmt::ArraySubscriptExprClass:
-  case Stmt::CStyleCastExprClass:
   case Stmt::ImplicitCastExprClass:
   case Stmt::ParenExprClass:
+  case Stmt::BreakStmtClass:
+  case Stmt::ContinueStmtClass:
+  case Stmt::NullStmtClass:
     return true;
+  case Stmt::CStyleCastExprClass: {
+    const CStyleCastExpr* CastExpr1 = cast<CStyleCastExpr>(Stmt1);
+    const CStyleCastExpr* CastExpr2 = cast<CStyleCastExpr>(Stmt2);
+
+    return CastExpr1->getTypeAsWritten() == CastExpr2->getTypeAsWritten();
+  }
+  case Stmt::ReturnStmtClass: {
+    const ReturnStmt *ReturnStmt1 = cast<ReturnStmt>(Stmt1);
+    const ReturnStmt *ReturnStmt2 = cast<ReturnStmt>(Stmt2);
+
+    return isIdenticalStmt(Ctx, ReturnStmt1->getRetValue(),
+                           ReturnStmt2->getRetValue(), IgnoreSideEffects);
+  }
+  case Stmt::ForStmtClass: {
+    const ForStmt *ForStmt1 = cast<ForStmt>(Stmt1);
+    const ForStmt *ForStmt2 = cast<ForStmt>(Stmt2);
+
+    if (!isIdenticalStmt(Ctx, ForStmt1->getInit(), ForStmt2->getInit(),
+                         IgnoreSideEffects))
+      return false;
+    if (!isIdenticalStmt(Ctx, ForStmt1->getCond(), ForStmt2->getCond(),
+                         IgnoreSideEffects))
+      return false;
+    if (!isIdenticalStmt(Ctx, ForStmt1->getInc(), ForStmt2->getInc(),
+                         IgnoreSideEffects))
+      return false;
+    if (!isIdenticalStmt(Ctx, ForStmt1->getBody(), ForStmt2->getBody(),
+                         IgnoreSideEffects))
+      return false;
+    return true;
+  }
+  case Stmt::DoStmtClass: {
+    const DoStmt *DStmt1 = cast<DoStmt>(Stmt1);
+    const DoStmt *DStmt2 = cast<DoStmt>(Stmt2);
+
+    if (!isIdenticalStmt(Ctx, DStmt1->getCond(), DStmt2->getCond(),
+                         IgnoreSideEffects))
+      return false;
+    if (!isIdenticalStmt(Ctx, DStmt1->getBody(), DStmt2->getBody(),
+                         IgnoreSideEffects))
+      return false;
+    return true;
+  }
+  case Stmt::WhileStmtClass: {
+    const WhileStmt *WStmt1 = cast<WhileStmt>(Stmt1);
+    const WhileStmt *WStmt2 = cast<WhileStmt>(Stmt2);
+
+    if (!isIdenticalStmt(Ctx, WStmt1->getCond(), WStmt2->getCond(),
+                         IgnoreSideEffects))
+      return false;
+    if (!isIdenticalStmt(Ctx, WStmt1->getBody(), WStmt2->getBody(),
+                         IgnoreSideEffects))
+      return false;
+    return true;
+  }
+  case Stmt::IfStmtClass: {
+    const IfStmt *IStmt1 = cast<IfStmt>(Stmt1);
+    const IfStmt *IStmt2 = cast<IfStmt>(Stmt2);
+
+    if (!isIdenticalStmt(Ctx, IStmt1->getCond(), IStmt2->getCond(),
+                         IgnoreSideEffects))
+      return false;
+    if (!isIdenticalStmt(Ctx, IStmt1->getThen(), IStmt2->getThen(),
+                         IgnoreSideEffects))
+      return false;
+    if (!isIdenticalStmt(Ctx, IStmt1->getElse(), IStmt2->getElse(),
+                         IgnoreSideEffects))
+      return false;
+    return true;
+  }
+  case Stmt::CompoundStmtClass: {
+    const CompoundStmt *CompStmt1 = cast<CompoundStmt>(Stmt1);
+    const CompoundStmt *CompStmt2 = cast<CompoundStmt>(Stmt2);
+
+    if (CompStmt1->size() != CompStmt2->size())
+      return false;
+
+    CompoundStmt::const_body_iterator I1 = CompStmt1->body_begin();
+    CompoundStmt::const_body_iterator I2 = CompStmt2->body_begin();
+    while (I1 != CompStmt1->body_end() && I2 != CompStmt2->body_end()) {
+      if (!isIdenticalStmt(Ctx, *I1, *I2, IgnoreSideEffects))
+        return false;
+      ++I1;
+      ++I2;
+    }
+
+    return true;
+  }
+  case Stmt::CompoundAssignOperatorClass:
   case Stmt::BinaryOperatorClass: {
-    const BinaryOperator *BinOp1 = dyn_cast<BinaryOperator>(Expr1);
-    const BinaryOperator *BinOp2 = dyn_cast<BinaryOperator>(Expr2);
+    const BinaryOperator *BinOp1 = cast<BinaryOperator>(Stmt1);
+    const BinaryOperator *BinOp2 = cast<BinaryOperator>(Stmt2);
     return BinOp1->getOpcode() == BinOp2->getOpcode();
   }
   case Stmt::CharacterLiteralClass: {
-    const CharacterLiteral *CharLit1 = dyn_cast<CharacterLiteral>(Expr1);
-    const CharacterLiteral *CharLit2 = dyn_cast<CharacterLiteral>(Expr2);
+    const CharacterLiteral *CharLit1 = cast<CharacterLiteral>(Stmt1);
+    const CharacterLiteral *CharLit2 = cast<CharacterLiteral>(Stmt2);
     return CharLit1->getValue() == CharLit2->getValue();
   }
   case Stmt::DeclRefExprClass: {
-    const DeclRefExpr *DeclRef1 = dyn_cast<DeclRefExpr>(Expr1);
-    const DeclRefExpr *DeclRef2 = dyn_cast<DeclRefExpr>(Expr2);
+    const DeclRefExpr *DeclRef1 = cast<DeclRefExpr>(Stmt1);
+    const DeclRefExpr *DeclRef2 = cast<DeclRefExpr>(Stmt2);
     return DeclRef1->getDecl() == DeclRef2->getDecl();
   }
   case Stmt::IntegerLiteralClass: {
-    const IntegerLiteral *IntLit1 = dyn_cast<IntegerLiteral>(Expr1);
-    const IntegerLiteral *IntLit2 = dyn_cast<IntegerLiteral>(Expr2);
+    const IntegerLiteral *IntLit1 = cast<IntegerLiteral>(Stmt1);
+    const IntegerLiteral *IntLit2 = cast<IntegerLiteral>(Stmt2);
     return IntLit1->getValue() == IntLit2->getValue();
   }
   case Stmt::FloatingLiteralClass: {
-    const FloatingLiteral *FloatLit1 = dyn_cast<FloatingLiteral>(Expr1);
-    const FloatingLiteral *FloatLit2 = dyn_cast<FloatingLiteral>(Expr2);
+    const FloatingLiteral *FloatLit1 = cast<FloatingLiteral>(Stmt1);
+    const FloatingLiteral *FloatLit2 = cast<FloatingLiteral>(Stmt2);
     return FloatLit1->getValue().bitwiseIsEqual(FloatLit2->getValue());
   }
+  case Stmt::StringLiteralClass: {
+    const StringLiteral *StringLit1 = cast<StringLiteral>(Stmt1);
+    const StringLiteral *StringLit2 = cast<StringLiteral>(Stmt2);
+    return StringLit1->getString() == StringLit2->getString();
+  }
   case Stmt::MemberExprClass: {
-    const MemberExpr *MemberExpr1 = dyn_cast<MemberExpr>(Expr1);
-    const MemberExpr *MemberExpr2 = dyn_cast<MemberExpr>(Expr2);
-    return MemberExpr1->getMemberDecl() == MemberExpr2->getMemberDecl();
+    const MemberExpr *MemberStmt1 = cast<MemberExpr>(Stmt1);
+    const MemberExpr *MemberStmt2 = cast<MemberExpr>(Stmt2);
+    return MemberStmt1->getMemberDecl() == MemberStmt2->getMemberDecl();
   }
   case Stmt::UnaryOperatorClass: {
-    const UnaryOperator *UnaryOp1 = dyn_cast<UnaryOperator>(Expr1);
-    const UnaryOperator *UnaryOp2 = dyn_cast<UnaryOperator>(Expr2);
-    if (UnaryOp1->getOpcode() != UnaryOp2->getOpcode())
-      return false;
-    return !UnaryOp1->isIncrementDecrementOp();
+    const UnaryOperator *UnaryOp1 = cast<UnaryOperator>(Stmt1);
+    const UnaryOperator *UnaryOp2 = cast<UnaryOperator>(Stmt2);
+    return UnaryOp1->getOpcode() == UnaryOp2->getOpcode();
   }
   }
 }
@@ -215,7 +479,7 @@
 public:
   void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
                         BugReporter &BR) const {
-    FindIdenticalExprVisitor Visitor(BR, Mgr.getAnalysisDeclContext(D));
+    FindIdenticalExprVisitor Visitor(BR, this, Mgr.getAnalysisDeclContext(D));
     Visitor.TraverseDecl(const_cast<Decl *>(D));
   }
 };
diff --git a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index cc940be..fdd1fdc 100644
--- a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -49,6 +49,9 @@
   DefaultBool check_MissingInvalidationMethod;
   /// Check that all ivars are invalidated.
   DefaultBool check_InstanceVariableInvalidation;
+
+  CheckName checkName_MissingInvalidationMethod;
+  CheckName checkName_InstanceVariableInvalidation;
 };
 
 class IvarInvalidationCheckerImpl {
@@ -200,7 +203,8 @@
                         const ObjCIvarDecl *IvarDecl,
                         const IvarToPropMapTy &IvarToPopertyMap);
 
-  void reportNoInvalidationMethod(const ObjCIvarDecl *FirstIvarDecl,
+  void reportNoInvalidationMethod(CheckName CheckName,
+                                  const ObjCIvarDecl *FirstIvarDecl,
                                   const IvarToPropMapTy &IvarToPopertyMap,
                                   const ObjCInterfaceDecl *InterfaceD,
                                   bool MissingDeclaration) const;
@@ -223,10 +227,7 @@
 };
 
 static bool isInvalidationMethod(const ObjCMethodDecl *M, bool LookForPartial) {
-  for (specific_attr_iterator<AnnotateAttr>
-       AI = M->specific_attr_begin<AnnotateAttr>(),
-       AE = M->specific_attr_end<AnnotateAttr>(); AI != AE; ++AI) {
-    const AnnotateAttr *Ann = *AI;
+  for (const auto *Ann : M->specific_attrs<AnnotateAttr>()) {
     if (!LookForPartial &&
         Ann->getAnnotation() == "objc_instance_variable_invalidator")
       return true;
@@ -247,33 +248,22 @@
   // TODO: Cache the results.
 
   // Check all methods.
-  for (ObjCContainerDecl::method_iterator
-      I = D->meth_begin(),
-      E = D->meth_end(); I != E; ++I) {
-      const ObjCMethodDecl *MDI = *I;
-      if (isInvalidationMethod(MDI, Partial))
-        OutInfo.addInvalidationMethod(
-                               cast<ObjCMethodDecl>(MDI->getCanonicalDecl()));
-  }
+  for (const auto *MDI : D->methods())
+    if (isInvalidationMethod(MDI, Partial))
+      OutInfo.addInvalidationMethod(
+          cast<ObjCMethodDecl>(MDI->getCanonicalDecl()));
 
   // If interface, check all parent protocols and super.
   if (const ObjCInterfaceDecl *InterfD = dyn_cast<ObjCInterfaceDecl>(D)) {
 
     // Visit all protocols.
-    for (ObjCInterfaceDecl::protocol_iterator
-        I = InterfD->protocol_begin(),
-        E = InterfD->protocol_end(); I != E; ++I) {
-      containsInvalidationMethod((*I)->getDefinition(), OutInfo, Partial);
-    }
+    for (const auto *I : InterfD->protocols())
+      containsInvalidationMethod(I->getDefinition(), OutInfo, Partial);
 
     // Visit all categories in case the invalidation method is declared in
     // a category.
-    for (ObjCInterfaceDecl::visible_extensions_iterator
-           Ext = InterfD->visible_extensions_begin(),
-           ExtEnd = InterfD->visible_extensions_end();
-         Ext != ExtEnd; ++Ext) {
-      containsInvalidationMethod(*Ext, OutInfo, Partial);
-    }
+    for (const auto *Ext : InterfD->visible_extensions())
+      containsInvalidationMethod(Ext, OutInfo, Partial);
 
     containsInvalidationMethod(InterfD->getSuperClass(), OutInfo, Partial);
     return;
@@ -281,10 +271,8 @@
 
   // If protocol, check all parent protocols.
   if (const ObjCProtocolDecl *ProtD = dyn_cast<ObjCProtocolDecl>(D)) {
-    for (ObjCInterfaceDecl::protocol_iterator
-        I = ProtD->protocol_begin(),
-        E = ProtD->protocol_end(); I != E; ++I) {
-      containsInvalidationMethod((*I)->getDefinition(), OutInfo, Partial);
+    for (const auto *I : ProtD->protocols()) {
+      containsInvalidationMethod(I->getDefinition(), OutInfo, Partial);
     }
     return;
   }
@@ -476,7 +464,8 @@
   // Report an error in case none of the invalidation methods are declared.
   if (!Info.needsInvalidation() && !PartialInfo.needsInvalidation()) {
     if (Filter.check_MissingInvalidationMethod)
-      reportNoInvalidationMethod(FirstIvarDecl, IvarToPopertyMap, InterfaceD,
+      reportNoInvalidationMethod(Filter.checkName_MissingInvalidationMethod,
+                                 FirstIvarDecl, IvarToPopertyMap, InterfaceD,
                                  /*MissingDeclaration*/ true);
     // If there are no invalidation methods, there is no ivar validation work
     // to be done.
@@ -532,17 +521,17 @@
         reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, 0);
     } else {
       // Otherwise, no invalidation methods were implemented.
-      reportNoInvalidationMethod(FirstIvarDecl, IvarToPopertyMap, InterfaceD,
+      reportNoInvalidationMethod(Filter.checkName_InstanceVariableInvalidation,
+                                 FirstIvarDecl, IvarToPopertyMap, InterfaceD,
                                  /*MissingDeclaration*/ false);
     }
   }
 }
 
-void IvarInvalidationCheckerImpl::
-reportNoInvalidationMethod(const ObjCIvarDecl *FirstIvarDecl,
-                           const IvarToPropMapTy &IvarToPopertyMap,
-                           const ObjCInterfaceDecl *InterfaceD,
-                           bool MissingDeclaration) const {
+void IvarInvalidationCheckerImpl::reportNoInvalidationMethod(
+    CheckName CheckName, const ObjCIvarDecl *FirstIvarDecl,
+    const IvarToPropMapTy &IvarToPopertyMap,
+    const ObjCInterfaceDecl *InterfaceD, bool MissingDeclaration) const {
   SmallString<128> sbuf;
   llvm::raw_svector_ostream os(sbuf);
   assert(FirstIvarDecl);
@@ -557,7 +546,7 @@
   PathDiagnosticLocation IvarDecLocation =
     PathDiagnosticLocation::createBegin(FirstIvarDecl, BR.getSourceManager());
 
-  BR.EmitBasicReport(FirstIvarDecl, "Incomplete invalidation",
+  BR.EmitBasicReport(FirstIvarDecl, CheckName, "Incomplete invalidation",
                      categories::CoreFoundationObjectiveC, os.str(),
                      IvarDecLocation);
 }
@@ -575,15 +564,16 @@
                            PathDiagnosticLocation::createEnd(MethodD->getBody(),
                            BR.getSourceManager(),
                            Mgr.getAnalysisDeclContext(MethodD));
-    BR.EmitBasicReport(MethodD, "Incomplete invalidation",
+    BR.EmitBasicReport(MethodD, Filter.checkName_InstanceVariableInvalidation,
+                       "Incomplete invalidation",
                        categories::CoreFoundationObjectiveC, os.str(),
                        MethodDecLocation);
   } else {
-    BR.EmitBasicReport(IvarD, "Incomplete invalidation",
-                       categories::CoreFoundationObjectiveC, os.str(),
-                       PathDiagnosticLocation::createBegin(IvarD,
-                                                        BR.getSourceManager()));
-                       
+    BR.EmitBasicReport(
+        IvarD, Filter.checkName_InstanceVariableInvalidation,
+        "Incomplete invalidation", categories::CoreFoundationObjectiveC,
+        os.str(),
+        PathDiagnosticLocation::createBegin(IvarD, BR.getSourceManager()));
   }
 }
 
@@ -750,10 +740,13 @@
 };
 }
 
-#define REGISTER_CHECKER(name) \
-void ento::register##name(CheckerManager &mgr) {\
-  mgr.registerChecker<IvarInvalidationChecker>()->Filter.check_##name = true;\
-}
+#define REGISTER_CHECKER(name)                                                 \
+  void ento::register##name(CheckerManager &mgr) {                             \
+    IvarInvalidationChecker *checker =                                         \
+        mgr.registerChecker<IvarInvalidationChecker>();                        \
+    checker->Filter.check_##name = true;                                       \
+    checker->Filter.checkName_##name = mgr.getCurrentCheckName();              \
+  }
 
 REGISTER_CHECKER(InstanceVariableInvalidation)
 REGISTER_CHECKER(MissingInvalidationMethod)
diff --git a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 02a7cc3..17ebf9e 100644
--- a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -115,11 +115,14 @@
 
 namespace {
 class StringRefCheckerVisitor : public StmtVisitor<StringRefCheckerVisitor> {
-  BugReporter &BR;
   const Decl *DeclWithIssue;
+  BugReporter &BR;
+  const CheckerBase *Checker;
+
 public:
-  StringRefCheckerVisitor(const Decl *declWithIssue, BugReporter &br)
-    : BR(br), DeclWithIssue(declWithIssue) {}
+  StringRefCheckerVisitor(const Decl *declWithIssue, BugReporter &br,
+                          const CheckerBase *checker)
+      : DeclWithIssue(declWithIssue), BR(br), Checker(checker) {}
   void VisitChildren(Stmt *S) {
     for (Stmt::child_iterator I = S->child_begin(), E = S->child_end() ;
       I != E; ++I)
@@ -133,16 +136,17 @@
 };
 } // end anonymous namespace
 
-static void CheckStringRefAssignedTemporary(const Decl *D, BugReporter &BR) {
-  StringRefCheckerVisitor walker(D, BR);
+static void CheckStringRefAssignedTemporary(const Decl *D, BugReporter &BR,
+                                            const CheckerBase *Checker) {
+  StringRefCheckerVisitor walker(D, BR, Checker);
   walker.Visit(D->getBody());
 }
 
 void StringRefCheckerVisitor::VisitDeclStmt(DeclStmt *S) {
   VisitChildren(S);
 
-  for (DeclStmt::decl_iterator I = S->decl_begin(), E = S->decl_end();I!=E; ++I)
-    if (VarDecl *VD = dyn_cast<VarDecl>(*I))
+  for (auto *I : S->decls())
+    if (VarDecl *VD = dyn_cast<VarDecl>(I))
       VisitVarDecl(VD);
 }
 
@@ -179,7 +183,7 @@
                      "std::string that it outlives";
   PathDiagnosticLocation VDLoc =
     PathDiagnosticLocation::createBegin(VD, BR.getSourceManager());
-  BR.EmitBasicReport(DeclWithIssue, desc, "LLVM Conventions", desc,
+  BR.EmitBasicReport(DeclWithIssue, Checker, desc, "LLVM Conventions", desc,
                      VDLoc, Init->getSourceRange());
 }
 
@@ -197,9 +201,7 @@
   if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R))
     return true;
 
-  for (CXXRecordDecl::base_class_const_iterator I = R->bases_begin(),
-                                                E = R->bases_end(); I!=E; ++I) {
-    CXXBaseSpecifier BS = *I;
+  for (const auto &BS : R->bases()) {
     QualType T = BS.getType();
     if (const RecordType *baseT = T->getAs<RecordType>()) {
       CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getDecl());
@@ -216,23 +218,26 @@
   SmallVector<FieldDecl*, 10> FieldChain;
   const CXXRecordDecl *Root;
   BugReporter &BR;
+  const CheckerBase *Checker;
+
 public:
-  ASTFieldVisitor(const CXXRecordDecl *root, BugReporter &br)
-    : Root(root), BR(br) {}
+  ASTFieldVisitor(const CXXRecordDecl *root, BugReporter &br,
+                  const CheckerBase *checker)
+      : Root(root), BR(br), Checker(checker) {}
 
   void Visit(FieldDecl *D);
   void ReportError(QualType T);
 };
 } // end anonymous namespace
 
-static void CheckASTMemory(const CXXRecordDecl *R, BugReporter &BR) {
+static void CheckASTMemory(const CXXRecordDecl *R, BugReporter &BR,
+                           const CheckerBase *Checker) {
   if (!IsPartOfAST(R))
     return;
 
-  for (RecordDecl::field_iterator I = R->field_begin(), E = R->field_end();
-       I != E; ++I) {
-    ASTFieldVisitor walker(R, BR);
-    walker.Visit(*I);
+  for (auto *I : R->fields()) {
+    ASTFieldVisitor walker(R, BR, Checker);
+    walker.Visit(I);
   }
 }
 
@@ -246,9 +251,8 @@
 
   if (const RecordType *RT = T->getAs<RecordType>()) {
     const RecordDecl *RD = RT->getDecl()->getDefinition();
-    for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
-         I != E; ++I)
-      Visit(*I);
+    for (auto *I : RD->fields())
+      Visit(I);
   }
 
   FieldChain.pop_back();
@@ -284,8 +288,8 @@
   // the class may be in the header file, for example).
   PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(
                                FieldChain.front(), BR.getSourceManager());
-  BR.EmitBasicReport(Root, "AST node allocates heap memory", "LLVM Conventions",
-                     os.str(), L);
+  BR.EmitBasicReport(Root, Checker, "AST node allocates heap memory",
+                     "LLVM Conventions", os.str(), L);
 }
 
 //===----------------------------------------------------------------------===//
@@ -300,12 +304,12 @@
   void checkASTDecl(const CXXRecordDecl *R, AnalysisManager& mgr,
                     BugReporter &BR) const {
     if (R->isCompleteDefinition())
-      CheckASTMemory(R, BR);
+      CheckASTMemory(R, BR, this);
   }
 
   void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
                         BugReporter &BR) const {
-    CheckStringRefAssignedTemporary(D, BR);
+    CheckStringRefAssignedTemporary(D, BR, this);
   }
 };
 }
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index f1f06c7..4a293c4 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -29,7 +29,7 @@
 class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
                                                check::PostStmt<CallExpr>,
                                                check::DeadSymbols> {
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
 
 public:
   /// AllocationState is a part of the checker specific state together with the
@@ -91,7 +91,7 @@
 
   inline void initBugType() const {
     if (!BT)
-      BT.reset(new BugType("Improper use of SecKeychain API",
+      BT.reset(new BugType(this, "Improper use of SecKeychain API",
                            "API Misuse (Apple)"));
   }
 
@@ -139,7 +139,7 @@
     SecKeychainBugVisitor(SymbolRef S) : Sym(S) {}
     virtual ~SecKeychainBugVisitor() {}
 
-    void Profile(llvm::FoldingSetNodeID &ID) const {
+    void Profile(llvm::FoldingSetNodeID &ID) const override {
       static int X = 0;
       ID.AddPointer(&X);
       ID.AddPointer(Sym);
@@ -148,7 +148,7 @@
     PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
                                    const ExplodedNode *PrevN,
                                    BugReporterContext &BRC,
-                                   BugReport &BR);
+                                   BugReport &BR) override;
   };
 };
 }
@@ -575,7 +575,7 @@
     return;
   }
 
-  static SimpleProgramPointTag Tag("MacOSKeychainAPIChecker : DeadSymbolsLeak");
+  static CheckerProgramPointTag Tag(this, "DeadSymbolsLeak");
   ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
 
   // Generate the error reports.
diff --git a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index 32ebb51..d9e4699 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -31,7 +31,7 @@
 
 namespace {
 class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
-  mutable OwningPtr<BugType> BT_dispatchOnce;
+  mutable std::unique_ptr<BugType> BT_dispatchOnce;
 
 public:
   void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
@@ -67,7 +67,7 @@
     return;
 
   if (!BT_dispatchOnce)
-    BT_dispatchOnce.reset(new BugType("Improper use of 'dispatch_once'",
+    BT_dispatchOnce.reset(new BugType(this, "Improper use of 'dispatch_once'",
                                       "API Misuse (Apple)"));
 
   // Handle _dispatch_once.  In some versions of the OS X SDK we have the case
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index c7aa0fb..f0d1924 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -16,6 +16,7 @@
 #include "InterCheckerAPI.h"
 #include "clang/AST/Attr.h"
 #include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
 #include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
 #include "clang/StaticAnalyzer/Core/Checker.h"
 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -48,7 +49,7 @@
               Allocated,
               // Reference to released/freed memory.
               Released,
-              // The responsibility for freeing resources has transfered from
+              // The responsibility for freeing resources has transferred from
               // this reference. A relinquished symbol should not be freed.
               Relinquished,
               // We are no longer guaranteed to have observed all manipulations
@@ -100,17 +101,16 @@
   }
 
   void dump(raw_ostream &OS) const {
-    static const char *const Table[] = {
-      "Allocated",
-      "Released",
-      "Relinquished"
-    };
-    OS << Table[(unsigned) K];
+    switch (static_cast<Kind>(K)) {
+#define CASE(ID) case ID: OS << #ID; break;
+    CASE(Allocated)
+    CASE(Released)
+    CASE(Relinquished)
+    CASE(Escaped)
+    }
   }
 
-  LLVM_ATTRIBUTE_USED void dump() const {
-    dump(llvm::errs());
-  }
+  LLVM_DUMP_METHOD void dump() const { dump(llvm::errs()); }
 };
 
 enum ReallocPairKind {
@@ -156,30 +156,24 @@
                                      check::Location,
                                      eval::Assume>
 {
-  mutable OwningPtr<BugType> BT_DoubleFree;
-  mutable OwningPtr<BugType> BT_Leak;
-  mutable OwningPtr<BugType> BT_UseFree;
-  mutable OwningPtr<BugType> BT_BadFree;
-  mutable OwningPtr<BugType> BT_MismatchedDealloc;
-  mutable OwningPtr<BugType> BT_OffsetFree;
-  mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc,
-                         *II_valloc, *II_reallocf, *II_strndup, *II_strdup;
-
 public:
   MallocChecker() : II_malloc(0), II_free(0), II_realloc(0), II_calloc(0),
-                    II_valloc(0), II_reallocf(0), II_strndup(0), II_strdup(0) {}
+                    II_valloc(0), II_reallocf(0), II_strndup(0), II_strdup(0),
+                    II_kmalloc(0) {}
 
   /// In pessimistic mode, the checker assumes that it does not know which
   /// functions might free the memory.
-  struct ChecksFilter {
-    DefaultBool CMallocPessimistic;
-    DefaultBool CMallocOptimistic;
-    DefaultBool CNewDeleteChecker;
-    DefaultBool CNewDeleteLeaksChecker;
-    DefaultBool CMismatchedDeallocatorChecker;
+  enum CheckKind {
+    CK_MallocPessimistic,
+    CK_MallocOptimistic,
+    CK_NewDeleteChecker,
+    CK_NewDeleteLeaksChecker,
+    CK_MismatchedDeallocatorChecker,
+    CK_NumCheckKinds
   };
 
-  ChecksFilter Filter;
+  DefaultBool ChecksEnabled[CK_NumCheckKinds];
+  CheckName CheckNames[CK_NumCheckKinds];
 
   void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
   void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
@@ -204,9 +198,21 @@
                                           PointerEscapeKind Kind) const;
 
   void printState(raw_ostream &Out, ProgramStateRef State,
-                  const char *NL, const char *Sep) const;
+                  const char *NL, const char *Sep) const override;
 
 private:
+  mutable std::unique_ptr<BugType> BT_DoubleFree[CK_NumCheckKinds];
+  mutable std::unique_ptr<BugType> BT_DoubleDelete;
+  mutable std::unique_ptr<BugType> BT_Leak[CK_NumCheckKinds];
+  mutable std::unique_ptr<BugType> BT_UseFree[CK_NumCheckKinds];
+  mutable std::unique_ptr<BugType> BT_BadFree[CK_NumCheckKinds];
+  mutable std::unique_ptr<BugType> BT_MismatchedDealloc;
+  mutable std::unique_ptr<BugType> BT_OffsetFree[CK_NumCheckKinds];
+  mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc,
+                         *II_valloc, *II_reallocf, *II_strndup, *II_strdup,
+                         *II_kmalloc;
+  mutable Optional<uint64_t> KernelZeroFlagVal;
+
   void initIdentifierInfo(ASTContext &C) const;
 
   /// \brief Determine family of a deallocation expression.
@@ -234,9 +240,9 @@
   bool isAllocationFunction(const FunctionDecl *FD, ASTContext &C) const;
   bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
   ///@}
-  static ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
-                                              const CallExpr *CE,
-                                              const OwnershipAttr* Att);
+  ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
+                                       const CallExpr *CE,
+                                       const OwnershipAttr* Att) const;
   static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
                                      const Expr *SizeEx, SVal Init,
                                      ProgramStateRef State,
@@ -251,6 +257,12 @@
                                      ProgramStateRef State,
                                      AllocationFamily Family = AF_Malloc);
 
+  // Check if this malloc() for special flags. At present that means M_ZERO or
+  // __GFP_ZERO (in which case, treat it like calloc).
+  llvm::Optional<ProgramStateRef>
+  performKernelMalloc(const CallExpr *CE, CheckerContext &C,
+                      const ProgramStateRef &State) const;
+
   /// Update the RefState to reflect the new memory allocation.
   static ProgramStateRef 
   MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State,
@@ -279,6 +291,8 @@
 
   bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
 
+  bool checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const;
+
   /// Check if the function is known free memory, or if it is
   /// "interesting" and should be modeled explicitly.
   ///
@@ -302,10 +316,12 @@
 
   ///@{
   /// Tells if a given family/call/symbol is tracked by the current checker.
-  bool isTrackedByCurrentChecker(AllocationFamily Family) const;
-  bool isTrackedByCurrentChecker(CheckerContext &C,
-                                 const Stmt *AllocDeallocStmt) const;
-  bool isTrackedByCurrentChecker(CheckerContext &C, SymbolRef Sym) const;
+  /// Sets CheckKind to the kind of the checker responsible for this
+  /// family/call/symbol.
+  Optional<CheckKind> getCheckIfTracked(AllocationFamily Family) const;
+  Optional<CheckKind> getCheckIfTracked(CheckerContext &C,
+                                        const Stmt *AllocDeallocStmt) const;
+  Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym) const;
   ///@}
   static bool SummarizeValue(raw_ostream &os, SVal V);
   static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
@@ -322,6 +338,8 @@
   void ReportDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
                         SymbolRef Sym, SymbolRef PrevSym) const;
 
+  void ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const;
+
   /// Find the location of the allocation for Sym on the path leading to the
   /// exploded node N.
   LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
@@ -356,7 +374,7 @@
 
     virtual ~MallocBugVisitor() {}
 
-    void Profile(llvm::FoldingSetNodeID &ID) const {
+    void Profile(llvm::FoldingSetNodeID &ID) const override {
       static int X = 0;
       ID.AddPointer(&X);
       ID.AddPointer(Sym);
@@ -398,11 +416,11 @@
     PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
                                    const ExplodedNode *PrevN,
                                    BugReporterContext &BRC,
-                                   BugReport &BR);
+                                   BugReport &BR) override;
 
     PathDiagnosticPiece* getEndPath(BugReporterContext &BRC,
                                     const ExplodedNode *EndPathNode,
-                                    BugReport &BR) {
+                                    BugReport &BR) override {
       if (!IsLeak)
         return 0;
 
@@ -420,7 +438,8 @@
       StackHintGeneratorForReallocationFailed(SymbolRef S, StringRef M)
         : StackHintGeneratorForSymbol(S, M) {}
 
-      virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex) {
+      std::string getMessageForArg(const Expr *ArgE,
+                                   unsigned ArgIndex) override {
         // Printed parameters start at 1, not 0.
         ++ArgIndex;
 
@@ -433,7 +452,7 @@
         return os.str();
       }
 
-      virtual std::string getMessageForReturn(const CallExpr *CallExpr) {
+      std::string getMessageForReturn(const CallExpr *CallExpr) override {
         return "Reallocation of returned value failed";
       }
     };
@@ -455,7 +474,7 @@
   StopTrackingCallback(ProgramStateRef st) : state(st) {}
   ProgramStateRef getState() const { return state; }
 
-  bool VisitSymbol(SymbolRef sym) {
+  bool VisitSymbol(SymbolRef sym) override {
     state = state->remove<RegionState>(sym);
     return true;
   }
@@ -473,6 +492,7 @@
   II_valloc = &Ctx.Idents.get("valloc");
   II_strdup = &Ctx.Idents.get("strdup");
   II_strndup = &Ctx.Idents.get("strndup");
+  II_kmalloc = &Ctx.Idents.get("kmalloc");
 }
 
 bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
@@ -499,16 +519,13 @@
 
     if (FunI == II_malloc || FunI == II_realloc ||
         FunI == II_reallocf || FunI == II_calloc || FunI == II_valloc ||
-        FunI == II_strdup || FunI == II_strndup)
+        FunI == II_strdup || FunI == II_strndup || FunI == II_kmalloc)
       return true;
   }
 
-  if (Filter.CMallocOptimistic && FD->hasAttrs())
-    for (specific_attr_iterator<OwnershipAttr>
-           i = FD->specific_attr_begin<OwnershipAttr>(),
-           e = FD->specific_attr_end<OwnershipAttr>();
-           i != e; ++i)
-      if ((*i)->getOwnKind() == OwnershipAttr::Returns)
+  if (ChecksEnabled[CK_MallocOptimistic] && FD->hasAttrs())
+    for (const auto *I : FD->specific_attrs<OwnershipAttr>())
+      if (I->getOwnKind() == OwnershipAttr::Returns)
         return true;
   return false;
 }
@@ -525,13 +542,10 @@
       return true;
   }
 
-  if (Filter.CMallocOptimistic && FD->hasAttrs())
-    for (specific_attr_iterator<OwnershipAttr>
-           i = FD->specific_attr_begin<OwnershipAttr>(),
-           e = FD->specific_attr_end<OwnershipAttr>();
-           i != e; ++i)
-      if ((*i)->getOwnKind() == OwnershipAttr::Takes ||
-          (*i)->getOwnKind() == OwnershipAttr::Holds)
+  if (ChecksEnabled[CK_MallocOptimistic] && FD->hasAttrs())
+    for (const auto *I : FD->specific_attrs<OwnershipAttr>())
+      if (I->getOwnKind() == OwnershipAttr::Takes ||
+          I->getOwnKind() == OwnershipAttr::Holds)
         return true;
   return false;
 }
@@ -569,10 +583,88 @@
   return true;
 }
 
+llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
+  const CallExpr *CE, CheckerContext &C, const ProgramStateRef &State) const {
+  // 3-argument malloc(), as commonly used in {Free,Net,Open}BSD Kernels:
+  //
+  // void *malloc(unsigned long size, struct malloc_type *mtp, int flags);
+  //
+  // One of the possible flags is M_ZERO, which means 'give me back an
+  // allocation which is already zeroed', like calloc.
+
+  // 2-argument kmalloc(), as used in the Linux kernel:
+  //
+  // void *kmalloc(size_t size, gfp_t flags);
+  //
+  // Has the similar flag value __GFP_ZERO.
+
+  // This logic is largely cloned from O_CREAT in UnixAPIChecker, maybe some
+  // code could be shared.
+
+  ASTContext &Ctx = C.getASTContext();
+  llvm::Triple::OSType OS = Ctx.getTargetInfo().getTriple().getOS();
+
+  if (!KernelZeroFlagVal.hasValue()) {
+    if (OS == llvm::Triple::FreeBSD)
+      KernelZeroFlagVal = 0x0100;
+    else if (OS == llvm::Triple::NetBSD)
+      KernelZeroFlagVal = 0x0002;
+    else if (OS == llvm::Triple::OpenBSD)
+      KernelZeroFlagVal = 0x0008;
+    else if (OS == llvm::Triple::Linux)
+      // __GFP_ZERO
+      KernelZeroFlagVal = 0x8000;
+    else
+      // FIXME: We need a more general way of getting the M_ZERO value.
+      // See also: O_CREAT in UnixAPIChecker.cpp.
+
+      // Fall back to normal malloc behavior on platforms where we don't
+      // know M_ZERO.
+      return None;
+  }
+
+  // We treat the last argument as the flags argument, and callers fall-back to
+  // normal malloc on a None return. This works for the FreeBSD kernel malloc
+  // as well as Linux kmalloc.
+  if (CE->getNumArgs() < 2)
+    return None;
+
+  const Expr *FlagsEx = CE->getArg(CE->getNumArgs() - 1);
+  const SVal V = State->getSVal(FlagsEx, C.getLocationContext());
+  if (!V.getAs<NonLoc>()) {
+    // The case where 'V' can be a location can only be due to a bad header,
+    // so in this case bail out.
+    return None;
+  }
+
+  NonLoc Flags = V.castAs<NonLoc>();
+  NonLoc ZeroFlag = C.getSValBuilder()
+      .makeIntVal(KernelZeroFlagVal.getValue(), FlagsEx->getType())
+      .castAs<NonLoc>();
+  SVal MaskedFlagsUC = C.getSValBuilder().evalBinOpNN(State, BO_And,
+                                                      Flags, ZeroFlag,
+                                                      FlagsEx->getType());
+  if (MaskedFlagsUC.isUnknownOrUndef())
+    return None;
+  DefinedSVal MaskedFlags = MaskedFlagsUC.castAs<DefinedSVal>();
+
+  // Check if maskedFlags is non-zero.
+  ProgramStateRef TrueState, FalseState;
+  std::tie(TrueState, FalseState) = State->assume(MaskedFlags);
+
+  // If M_ZERO is set, treat this like calloc (initialized).
+  if (TrueState && !FalseState) {
+    SVal ZeroVal = C.getSValBuilder().makeZeroVal(Ctx.CharTy);
+    return MallocMemAux(C, CE, CE->getArg(0), ZeroVal, TrueState);
+  }
+
+  return None;
+}
+
 void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
   if (C.wasInlined)
     return;
-  
+
   const FunctionDecl *FD = C.getCalleeDecl(CE);
   if (!FD)
     return;
@@ -584,7 +676,27 @@
     initIdentifierInfo(C.getASTContext());
     IdentifierInfo *FunI = FD->getIdentifier();
 
-    if (FunI == II_malloc || FunI == II_valloc) {
+    if (FunI == II_malloc) {
+      if (CE->getNumArgs() < 1)
+        return;
+      if (CE->getNumArgs() < 3) {
+        State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+      } else if (CE->getNumArgs() == 3) {
+        llvm::Optional<ProgramStateRef> MaybeState =
+          performKernelMalloc(CE, C, State);
+        if (MaybeState.hasValue())
+          State = MaybeState.getValue();
+        else
+          State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+      }
+    } else if (FunI == II_kmalloc) {
+      llvm::Optional<ProgramStateRef> MaybeState =
+        performKernelMalloc(CE, C, State);
+      if (MaybeState.hasValue())
+        State = MaybeState.getValue();
+      else
+        State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+    } else if (FunI == II_valloc) {
       if (CE->getNumArgs() < 1)
         return;
       State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
@@ -620,21 +732,19 @@
     }
   }
 
-  if (Filter.CMallocOptimistic || Filter.CMismatchedDeallocatorChecker) {
+  if (ChecksEnabled[CK_MallocOptimistic] ||
+      ChecksEnabled[CK_MismatchedDeallocatorChecker]) {
     // Check all the attributes, if there are any.
     // There can be multiple of these attributes.
     if (FD->hasAttrs())
-      for (specific_attr_iterator<OwnershipAttr>
-          i = FD->specific_attr_begin<OwnershipAttr>(),
-          e = FD->specific_attr_end<OwnershipAttr>();
-          i != e; ++i) {
-        switch ((*i)->getOwnKind()) {
+      for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
+        switch (I->getOwnKind()) {
         case OwnershipAttr::Returns:
-          State = MallocMemReturnsAttr(C, CE, *i);
+          State = MallocMemReturnsAttr(C, CE, I);
           break;
         case OwnershipAttr::Takes:
         case OwnershipAttr::Holds:
-          State = FreeMemAttr(C, CE, *i);
+          State = FreeMemAttr(C, CE, I);
           break;
         }
       }
@@ -667,7 +777,7 @@
 void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE, 
                                  CheckerContext &C) const {
 
-  if (!Filter.CNewDeleteChecker)
+  if (!ChecksEnabled[CK_NewDeleteChecker])
     if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
       checkUseAfterFree(Sym, C, DE->getArgument());
 
@@ -729,10 +839,10 @@
   C.addTransition(State);
 }
 
-ProgramStateRef MallocChecker::MallocMemReturnsAttr(CheckerContext &C,
-                                                    const CallExpr *CE,
-                                                    const OwnershipAttr* Att) {
-  if (Att->getModule() != "malloc")
+ProgramStateRef
+MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
+                                    const OwnershipAttr *Att) const {
+  if (Att->getModule() != II_malloc)
     return 0;
 
   OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
@@ -804,8 +914,8 @@
 
 ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
                                            const CallExpr *CE,
-                                           const OwnershipAttr* Att) const {
-  if (Att->getModule() != "malloc")
+                                           const OwnershipAttr *Att) const {
+  if (Att->getModule() != II_malloc)
     return 0;
 
   ProgramStateRef State = C.getState();
@@ -909,7 +1019,7 @@
       os << "-";
     else
       os << "+";
-    os << Msg->getSelector().getAsString();
+    Msg->getSelector().print(os);
     return true;
   }
 
@@ -971,7 +1081,7 @@
 
   // The explicit NULL case, no operation is performed.
   ProgramStateRef notNullState, nullState;
-  llvm::tie(notNullState, nullState) = State->assume(location);
+  std::tie(notNullState, nullState) = State->assume(location);
   if (nullState && !notNullState)
     return 0;
 
@@ -1088,18 +1198,23 @@
                                  RefState::getReleased(Family, ParentExpr));
 }
 
-bool MallocChecker::isTrackedByCurrentChecker(AllocationFamily Family) const {
+Optional<MallocChecker::CheckKind>
+MallocChecker::getCheckIfTracked(AllocationFamily Family) const {
   switch (Family) {
   case AF_Malloc: {
-    if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic)
-      return false;
-    return true;
+    if (ChecksEnabled[CK_MallocOptimistic]) {
+      return CK_MallocOptimistic;
+    } else if (ChecksEnabled[CK_MallocPessimistic]) {
+      return CK_MallocPessimistic;
+    }
+    return Optional<MallocChecker::CheckKind>();
   }
   case AF_CXXNew:
   case AF_CXXNewArray: {
-    if (!Filter.CNewDeleteChecker)
-      return false;
-    return true;
+    if (ChecksEnabled[CK_NewDeleteChecker]) {
+      return CK_NewDeleteChecker;
+    }
+    return Optional<MallocChecker::CheckKind>();
   }
   case AF_None: {
     llvm_unreachable("no family");
@@ -1108,18 +1223,18 @@
   llvm_unreachable("unhandled family");
 }
 
-bool
-MallocChecker::isTrackedByCurrentChecker(CheckerContext &C, 
-                                         const Stmt *AllocDeallocStmt) const {
-  return isTrackedByCurrentChecker(getAllocationFamily(C, AllocDeallocStmt));
+Optional<MallocChecker::CheckKind>
+MallocChecker::getCheckIfTracked(CheckerContext &C,
+                                 const Stmt *AllocDeallocStmt) const {
+  return getCheckIfTracked(getAllocationFamily(C, AllocDeallocStmt));
 }
 
-bool MallocChecker::isTrackedByCurrentChecker(CheckerContext &C,
-                                              SymbolRef Sym) const {
+Optional<MallocChecker::CheckKind>
+MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym) const {
 
   const RefState *RS = C.getState()->get<RegionState>(Sym);
   assert(RS);
-  return isTrackedByCurrentChecker(RS->getAllocationFamily());
+  return getCheckIfTracked(RS->getAllocationFamily());
 }
 
 bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
@@ -1213,17 +1328,21 @@
                                   SourceRange Range, 
                                   const Expr *DeallocExpr) const {
 
-  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
-      !Filter.CNewDeleteChecker)
+  if (!ChecksEnabled[CK_MallocOptimistic] &&
+      !ChecksEnabled[CK_MallocPessimistic] &&
+      !ChecksEnabled[CK_NewDeleteChecker])
     return;
 
-  if (!isTrackedByCurrentChecker(C, DeallocExpr))
+  Optional<MallocChecker::CheckKind> CheckKind =
+      getCheckIfTracked(C, DeallocExpr);
+  if (!CheckKind.hasValue())
     return;
 
   if (ExplodedNode *N = C.generateSink()) {
-    if (!BT_BadFree)
-      BT_BadFree.reset(new BugType("Bad free", "Memory Error"));
-    
+    if (!BT_BadFree[*CheckKind])
+      BT_BadFree[*CheckKind].reset(
+          new BugType(CheckNames[*CheckKind], "Bad free", "Memory Error"));
+
     SmallString<100> buf;
     llvm::raw_svector_ostream os(buf);
 
@@ -1249,7 +1368,7 @@
       printExpectedAllocName(os, C, DeallocExpr);
     }
 
-    BugReport *R = new BugReport(*BT_BadFree, os.str(), N);
+    BugReport *R = new BugReport(*BT_BadFree[*CheckKind], os.str(), N);
     R->markInteresting(MR);
     R->addRange(Range);
     C.emitReport(R);
@@ -1263,14 +1382,15 @@
                                             SymbolRef Sym, 
                                             bool OwnershipTransferred) const {
 
-  if (!Filter.CMismatchedDeallocatorChecker)
+  if (!ChecksEnabled[CK_MismatchedDeallocatorChecker])
     return;
 
   if (ExplodedNode *N = C.generateSink()) {
     if (!BT_MismatchedDealloc)
-      BT_MismatchedDealloc.reset(new BugType("Bad deallocator",
-                                             "Memory Error"));
-    
+      BT_MismatchedDealloc.reset(
+          new BugType(CheckNames[CK_MismatchedDeallocatorChecker],
+                      "Bad deallocator", "Memory Error"));
+
     SmallString<100> buf;
     llvm::raw_svector_ostream os(buf);
 
@@ -1314,19 +1434,23 @@
                                      SourceRange Range, const Expr *DeallocExpr,
                                      const Expr *AllocExpr) const {
 
-  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
-      !Filter.CNewDeleteChecker)
+  if (!ChecksEnabled[CK_MallocOptimistic] &&
+      !ChecksEnabled[CK_MallocPessimistic] &&
+      !ChecksEnabled[CK_NewDeleteChecker])
     return;
 
-  if (!isTrackedByCurrentChecker(C, AllocExpr))
+  Optional<MallocChecker::CheckKind> CheckKind =
+      getCheckIfTracked(C, AllocExpr);
+  if (!CheckKind.hasValue())
     return;
 
   ExplodedNode *N = C.generateSink();
   if (N == NULL)
     return;
 
-  if (!BT_OffsetFree)
-    BT_OffsetFree.reset(new BugType("Offset free", "Memory Error"));
+  if (!BT_OffsetFree[*CheckKind])
+    BT_OffsetFree[*CheckKind].reset(
+        new BugType(CheckNames[*CheckKind], "Offset free", "Memory Error"));
 
   SmallString<100> buf;
   llvm::raw_svector_ostream os(buf);
@@ -1357,7 +1481,7 @@
   else
     os << "allocated memory";
 
-  BugReport *R = new BugReport(*BT_OffsetFree, os.str(), N);
+  BugReport *R = new BugReport(*BT_OffsetFree[*CheckKind], os.str(), N);
   R->markInteresting(MR->getBaseRegion());
   R->addRange(Range);
   C.emitReport(R);
@@ -1366,18 +1490,21 @@
 void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
                                        SymbolRef Sym) const {
 
-  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
-      !Filter.CNewDeleteChecker)
+  if (!ChecksEnabled[CK_MallocOptimistic] &&
+      !ChecksEnabled[CK_MallocPessimistic] &&
+      !ChecksEnabled[CK_NewDeleteChecker])
     return;
 
-  if (!isTrackedByCurrentChecker(C, Sym))
+  Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+  if (!CheckKind.hasValue())
     return;
 
   if (ExplodedNode *N = C.generateSink()) {
-    if (!BT_UseFree)
-      BT_UseFree.reset(new BugType("Use-after-free", "Memory Error"));
+    if (!BT_UseFree[*CheckKind])
+      BT_UseFree[*CheckKind].reset(new BugType(
+          CheckNames[*CheckKind], "Use-after-free", "Memory Error"));
 
-    BugReport *R = new BugReport(*BT_UseFree,
+    BugReport *R = new BugReport(*BT_UseFree[*CheckKind],
                                  "Use of memory after it is freed", N);
 
     R->markInteresting(Sym);
@@ -1391,21 +1518,25 @@
                                      bool Released, SymbolRef Sym, 
                                      SymbolRef PrevSym) const {
 
-  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
-      !Filter.CNewDeleteChecker)
+  if (!ChecksEnabled[CK_MallocOptimistic] &&
+      !ChecksEnabled[CK_MallocPessimistic] &&
+      !ChecksEnabled[CK_NewDeleteChecker])
     return;
 
-  if (!isTrackedByCurrentChecker(C, Sym))
+  Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+  if (!CheckKind.hasValue())
     return;
 
   if (ExplodedNode *N = C.generateSink()) {
-    if (!BT_DoubleFree)
-      BT_DoubleFree.reset(new BugType("Double free", "Memory Error"));
+    if (!BT_DoubleFree[*CheckKind])
+      BT_DoubleFree[*CheckKind].reset(
+          new BugType(CheckNames[*CheckKind], "Double free", "Memory Error"));
 
-    BugReport *R = new BugReport(*BT_DoubleFree,
-      (Released ? "Attempt to free released memory"
-                : "Attempt to free non-owned memory"),
-      N);
+    BugReport *R =
+        new BugReport(*BT_DoubleFree[*CheckKind],
+                      (Released ? "Attempt to free released memory"
+                                : "Attempt to free non-owned memory"),
+                      N);
     R->addRange(Range);
     R->markInteresting(Sym);
     if (PrevSym)
@@ -1415,6 +1546,30 @@
   }
 }
 
+void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
+
+  if (!ChecksEnabled[CK_NewDeleteChecker])
+    return;
+
+  Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+  if (!CheckKind.hasValue())
+    return;
+  assert(*CheckKind == CK_NewDeleteChecker && "invalid check kind");
+
+  if (ExplodedNode *N = C.generateSink()) {
+    if (!BT_DoubleDelete)
+      BT_DoubleDelete.reset(new BugType(CheckNames[CK_NewDeleteChecker],
+                                        "Double delete", "Memory Error"));
+
+    BugReport *R = new BugReport(*BT_DoubleDelete,
+                                 "Attempt to delete released memory", N);
+
+    R->markInteresting(Sym);
+    R->addVisitor(new MallocBugVisitor(Sym));
+    C.emitReport(R);
+  }
+}
+
 ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
                                           const CallExpr *CE,
                                           bool FreesOnFail) const {
@@ -1451,9 +1606,9 @@
                        svalBuilder.makeIntValWithPtrWidth(0, false));
 
   ProgramStateRef StatePtrIsNull, StatePtrNotNull;
-  llvm::tie(StatePtrIsNull, StatePtrNotNull) = state->assume(PtrEQ);
+  std::tie(StatePtrIsNull, StatePtrNotNull) = state->assume(PtrEQ);
   ProgramStateRef StateSizeIsZero, StateSizeNotZero;
-  llvm::tie(StateSizeIsZero, StateSizeNotZero) = state->assume(SizeZero);
+  std::tie(StateSizeIsZero, StateSizeNotZero) = state->assume(SizeZero);
   // We only assume exceptional states if they are definitely true; if the
   // state is under-constrained, assume regular realloc behavior.
   bool PrtIsNull = StatePtrIsNull && !StatePtrNotNull;
@@ -1576,31 +1731,34 @@
 void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
                                CheckerContext &C) const {
 
-  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
-      !Filter.CNewDeleteLeaksChecker)
+  if (!ChecksEnabled[CK_MallocOptimistic] &&
+      !ChecksEnabled[CK_MallocPessimistic] &&
+      !ChecksEnabled[CK_NewDeleteLeaksChecker])
     return;
 
   const RefState *RS = C.getState()->get<RegionState>(Sym);
   assert(RS && "cannot leak an untracked symbol");
   AllocationFamily Family = RS->getAllocationFamily();
-  if (!isTrackedByCurrentChecker(Family))
+  Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
+  if (!CheckKind.hasValue())
     return;
 
   // Special case for new and new[]; these are controlled by a separate checker
   // flag so that they can be selectively disabled.
   if (Family == AF_CXXNew || Family == AF_CXXNewArray)
-    if (!Filter.CNewDeleteLeaksChecker)
+    if (!ChecksEnabled[CK_NewDeleteLeaksChecker])
       return;
 
   assert(N);
-  if (!BT_Leak) {
-    BT_Leak.reset(new BugType("Memory leak", "Memory Error"));
+  if (!BT_Leak[*CheckKind]) {
+    BT_Leak[*CheckKind].reset(
+        new BugType(CheckNames[*CheckKind], "Memory leak", "Memory Error"));
     // Leaks should not be reported if they are post-dominated by a sink:
     // (1) Sinks are higher importance bugs.
     // (2) NoReturnFunctionChecker uses sink nodes to represent paths ending
     //     with __noreturn functions such as assert() or exit(). We choose not
     //     to report leaks on such paths.
-    BT_Leak->setSuppressOnSink(true);
+    BT_Leak[*CheckKind]->setSuppressOnSink(true);
   }
 
   // Most bug reports are cached at the location where they occurred.
@@ -1609,7 +1767,7 @@
   PathDiagnosticLocation LocUsedForUniqueing;
   const ExplodedNode *AllocNode = 0;
   const MemRegion *Region = 0;
-  llvm::tie(AllocNode, Region) = getAllocationSite(N, Sym, C);
+  std::tie(AllocNode, Region) = getAllocationSite(N, Sym, C);
   
   ProgramPoint P = AllocNode->getLocation();
   const Stmt *AllocationStmt = 0;
@@ -1631,9 +1789,9 @@
     os << "Potential memory leak";
   }
 
-  BugReport *R = new BugReport(*BT_Leak, os.str(), N, 
-                               LocUsedForUniqueing, 
-                               AllocNode->getLocationContext()->getDecl());
+  BugReport *R =
+      new BugReport(*BT_Leak[*CheckKind], os.str(), N, LocUsedForUniqueing,
+                    AllocNode->getLocationContext()->getDecl());
   R->markInteresting(Sym);
   R->addVisitor(new MallocBugVisitor(Sym, true));
   C.emitReport(R);
@@ -1681,7 +1839,7 @@
   // Generate leak node.
   ExplodedNode *N = C.getPredecessor();
   if (!Errors.empty()) {
-    static SimpleProgramPointTag Tag("MallocChecker : DeadSymbolsLeak");
+    static CheckerProgramPointTag Tag("MallocChecker", "DeadSymbolsLeak");
     N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
     for (SmallVectorImpl<SymbolRef>::iterator
            I = Errors.begin(), E = Errors.end(); I != E; ++I) {
@@ -1695,17 +1853,24 @@
 void MallocChecker::checkPreCall(const CallEvent &Call,
                                  CheckerContext &C) const {
 
+  if (const CXXDestructorCall *DC = dyn_cast<CXXDestructorCall>(&Call)) {
+    SymbolRef Sym = DC->getCXXThisVal().getAsSymbol();
+    if (!Sym || checkDoubleDelete(Sym, C))
+      return;
+  }
+
   // We will check for double free in the post visit.
   if (const AnyFunctionCall *FC = dyn_cast<AnyFunctionCall>(&Call)) {
     const FunctionDecl *FD = FC->getDecl();
     if (!FD)
       return;
 
-    if ((Filter.CMallocOptimistic || Filter.CMallocPessimistic) &&
+    if ((ChecksEnabled[CK_MallocOptimistic] ||
+         ChecksEnabled[CK_MallocPessimistic]) &&
         isFreeFunction(FD, C.getASTContext()))
       return;
 
-    if (Filter.CNewDeleteChecker &&
+    if (ChecksEnabled[CK_NewDeleteChecker] &&
         isStandardNewDelete(FD, C.getASTContext()))
       return;
   }
@@ -1803,8 +1968,7 @@
 bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
                                       const Stmt *S) const {
 
-  // FIXME: Handle destructor called from delete more precisely.
-  if (isReleased(Sym, C) && S) {
+  if (isReleased(Sym, C)) {
     ReportUseAfterFree(C, S->getSourceRange(), Sym);
     return true;
   }
@@ -1812,6 +1976,15 @@
   return false;
 }
 
+bool MallocChecker::checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const {
+
+  if (isReleased(Sym, C)) {
+    ReportDoubleDelete(C, Sym);
+    return true;
+  }
+  return false;
+}
+
 // Check if the location is a freed symbolic region.
 void MallocChecker::checkLocation(SVal l, bool isLoad, const Stmt *S,
                                   CheckerContext &C) const {
@@ -1869,11 +2042,11 @@
   assert(Call);
   EscapingSymbol = 0;
   
-  // For now, assume that any C++ call can free memory.
+  // For now, assume that any C++ or block call can free memory.
   // TODO: If we want to be more optimistic here, we'll need to make sure that
   // regions escape to C++ containers. They seem to do that even now, but for
   // mysterious reasons.
-  if (!(isa<FunctionCall>(Call) || isa<ObjCMethodCall>(Call)))
+  if (!(isa<SimpleFunctionCall>(Call) || isa<ObjCMethodCall>(Call)))
     return true;
 
   // Check Objective-C messages by selector name.
@@ -1909,7 +2082,8 @@
     // that the pointers get freed by following the container itself.
     if (FirstSlot.startswith("addPointer") ||
         FirstSlot.startswith("insertPointer") ||
-        FirstSlot.startswith("replacePointer")) {
+        FirstSlot.startswith("replacePointer") ||
+        FirstSlot.equals("valueWithPointer")) {
       return true;
     }
 
@@ -1927,7 +2101,7 @@
   }
 
   // At this point the only thing left to handle is straight function calls.
-  const FunctionDecl *FD = cast<FunctionCall>(Call)->getDecl();
+  const FunctionDecl *FD = cast<SimpleFunctionCall>(Call)->getDecl();
   if (!FD)
     return true;
 
@@ -2130,7 +2304,7 @@
       StackHint = new StackHintGeneratorForSymbol(Sym,
                                              "Returning; memory was released");
     } else if (isRelinquished(RS, RSPrev, S)) {
-      Msg = "Memory ownership is transfered";
+      Msg = "Memory ownership is transferred";
       StackHint = new StackHintGeneratorForSymbol(Sym, "");
     } else if (isReallocFailedCheck(RS, RSPrev, S)) {
       Mode = ReallocationFailed;
@@ -2178,11 +2352,17 @@
   RegionStateTy RS = State->get<RegionState>();
 
   if (!RS.isEmpty()) {
-    Out << Sep << "MallocChecker:" << NL;
+    Out << Sep << "MallocChecker :" << NL;
     for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+      const RefState *RefS = State->get<RegionState>(I.getKey());
+      AllocationFamily Family = RefS->getAllocationFamily();
+      Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
+
       I.getKey()->dumpToStream(Out);
       Out << " : ";
       I.getData().dump(Out);
+      if (CheckKind.hasValue())
+        Out << " (" << CheckNames[*CheckKind].getName() << ")";
       Out << NL;
     }
   }
@@ -2190,17 +2370,23 @@
 
 void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
   registerCStringCheckerBasic(mgr);
-  mgr.registerChecker<MallocChecker>()->Filter.CNewDeleteLeaksChecker = true;
+  MallocChecker *checker = mgr.registerChecker<MallocChecker>();
+  checker->ChecksEnabled[MallocChecker::CK_NewDeleteLeaksChecker] = true;
+  checker->CheckNames[MallocChecker::CK_NewDeleteLeaksChecker] =
+      mgr.getCurrentCheckName();
   // We currently treat NewDeleteLeaks checker as a subchecker of NewDelete 
   // checker.
-  mgr.registerChecker<MallocChecker>()->Filter.CNewDeleteChecker = true;
+  if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker])
+    checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker] = true;
 }
 
-#define REGISTER_CHECKER(name) \
-void ento::register##name(CheckerManager &mgr) {\
-  registerCStringCheckerBasic(mgr); \
-  mgr.registerChecker<MallocChecker>()->Filter.C##name = true;\
-}
+#define REGISTER_CHECKER(name)                                                 \
+  void ento::register##name(CheckerManager &mgr) {                             \
+    registerCStringCheckerBasic(mgr);                                          \
+    MallocChecker *checker = mgr.registerChecker<MallocChecker>();             \
+    checker->ChecksEnabled[MallocChecker::CK_##name] = true;                   \
+    checker->CheckNames[MallocChecker::CK_##name] = mgr.getCurrentCheckName(); \
+  }
 
 REGISTER_CHECKER(MallocPessimistic)
 REGISTER_CHECKER(MallocOptimistic)
diff --git a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index 0cdf911..8097727 100644
--- a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -213,11 +213,12 @@
        e = PossibleMallocOverflows.end();
        i != e;
        ++i) {
-    BR.EmitBasicReport(D, "malloc() size overflow", categories::UnixAPI,
-      "the computation of the size of the memory allocation may overflow",
-      PathDiagnosticLocation::createOperatorLoc(i->mulop,
-                                                BR.getSourceManager()),
-      i->mulop->getSourceRange());
+    BR.EmitBasicReport(
+        D, this, "malloc() size overflow", categories::UnixAPI,
+        "the computation of the size of the memory allocation may overflow",
+        PathDiagnosticLocation::createOperatorLoc(i->mulop,
+                                                  BR.getSourceManager()),
+        i->mulop->getSourceRange());
   }
 }
 
@@ -262,6 +263,7 @@
   OutputPossibleOverflows(PossibleMallocOverflows, D, BR, mgr);
 }
 
-void ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
+void
+ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
   mgr.registerChecker<MallocOverflowSecurityChecker>();
 }
diff --git a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 6c776eb..5b068c8 100644
--- a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -101,9 +101,8 @@
   }
 
   TypeCallPair VisitDeclStmt(const DeclStmt *S) {
-    for (DeclStmt::const_decl_iterator I = S->decl_begin(), E = S->decl_end();
-         I!=E; ++I)
-      if (const VarDecl *VD = dyn_cast<VarDecl>(*I))
+    for (const auto *I : S->decls())
+      if (const VarDecl *VD = dyn_cast<VarDecl>(I))
         if (const Expr *Init = VD->getInit())
           VisitChild(VD, Init);
     return TypeCallPair();
@@ -236,10 +235,8 @@
             PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
                 BR.getSourceManager(), ADC);
 
-        BR.EmitBasicReport(D, "Allocator sizeof operand mismatch",
-            categories::UnixAPI,
-            OS.str(),
-            L, Ranges);
+        BR.EmitBasicReport(D, this, "Allocator sizeof operand mismatch",
+                           categories::UnixAPI, OS.str(), L, Ranges);
       }
     }
   }
diff --git a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index fc28e1f..b180c03 100644
--- a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -32,7 +32,7 @@
 namespace {
 class NSAutoreleasePoolChecker
   : public Checker<check::PreObjCMessage> {
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
   mutable Selector releaseS;
 
 public:
@@ -59,7 +59,7 @@
     return;
 
   if (!BT)
-    BT.reset(new BugType("Use -drain instead of -release",
+    BT.reset(new BugType(this, "Use -drain instead of -release",
                          "API Upgrade (Apple)"));
 
   ExplodedNode *N = C.addTransition();
diff --git a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 9f01522..5a505fc 100644
--- a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -54,16 +54,15 @@
                                         BugReporter &BR) const {
   if (!D->isThisDeclarationADefinition())
     return;
-  if (!D->getResultType()->isVoidType())
+  if (!D->getReturnType()->isVoidType())
     return;
 
   if (!II)
     II = &D->getASTContext().Idents.get("NSError"); 
 
   bool hasNSError = false;
-  for (ObjCMethodDecl::param_const_iterator
-         I = D->param_begin(), E = D->param_end(); I != E; ++I)  {
-    if (IsNSError((*I)->getType(), II)) {
+  for (const auto *I : D->params())  {
+    if (IsNSError(I->getType(), II)) {
       hasNSError = true;
       break;
     }
@@ -75,7 +74,7 @@
         "error occurred";
     PathDiagnosticLocation L =
       PathDiagnosticLocation::create(D, BR.getSourceManager());
-    BR.EmitBasicReport(D, "Bad return type when passing NSError**",
+    BR.EmitBasicReport(D, this, "Bad return type when passing NSError**",
                        "Coding conventions (Apple)", err, L);
   }
 }
@@ -102,16 +101,15 @@
                                         BugReporter &BR) const {
   if (!D->doesThisDeclarationHaveABody())
     return;
-  if (!D->getResultType()->isVoidType())
+  if (!D->getReturnType()->isVoidType())
     return;
 
   if (!II)
     II = &D->getASTContext().Idents.get("CFErrorRef"); 
 
   bool hasCFError = false;
-  for (FunctionDecl::param_const_iterator
-         I = D->param_begin(), E = D->param_end(); I != E; ++I)  {
-    if (IsCFError((*I)->getType(), II)) {
+  for (auto I : D->params())  {
+    if (IsCFError(I->getType(), II)) {
       hasCFError = true;
       break;
     }
@@ -123,7 +121,7 @@
         "error occurred";
     PathDiagnosticLocation L =
       PathDiagnosticLocation::create(D, BR.getSourceManager());
-    BR.EmitBasicReport(D, "Bad return type when passing CFErrorRef*",
+    BR.EmitBasicReport(D, this, "Bad return type when passing CFErrorRef*",
                        "Coding conventions (Apple)", err, L);
   }
 }
@@ -136,14 +134,16 @@
 
 class NSErrorDerefBug : public BugType {
 public:
-  NSErrorDerefBug() : BugType("NSError** null dereference",
-                              "Coding conventions (Apple)") {}
+  NSErrorDerefBug(const CheckerBase *Checker)
+      : BugType(Checker, "NSError** null dereference",
+                "Coding conventions (Apple)") {}
 };
 
 class CFErrorDerefBug : public BugType {
 public:
-  CFErrorDerefBug() : BugType("CFErrorRef* null dereference",
-                              "Coding conventions (Apple)") {}
+  CFErrorDerefBug(const CheckerBase *Checker)
+      : BugType(Checker, "CFErrorRef* null dereference",
+                "Coding conventions (Apple)") {}
 };
 
 }
@@ -264,11 +264,10 @@
 
   BugType *bug = 0;
   if (isNSError)
-    bug = new NSErrorDerefBug();
+    bug = new NSErrorDerefBug(this);
   else
-    bug = new CFErrorDerefBug();
-  BugReport *report = new BugReport(*bug, os.str(),
-                                                    event.SinkNode);
+    bug = new CFErrorDerefBug(this);
+  BugReport *report = new BugReport(*bug, os.str(), event.SinkNode);
   BR.emitReport(report);
 }
 
@@ -305,14 +304,14 @@
 
 void ento::registerNSErrorChecker(CheckerManager &mgr) {
   mgr.registerChecker<NSErrorMethodChecker>();
-  NSOrCFErrorDerefChecker *
-    checker = mgr.registerChecker<NSOrCFErrorDerefChecker>();
+  NSOrCFErrorDerefChecker *checker =
+      mgr.registerChecker<NSOrCFErrorDerefChecker>();
   checker->ShouldCheckNSError = true;
 }
 
 void ento::registerCFErrorChecker(CheckerManager &mgr) {
   mgr.registerChecker<CFErrorFunctionChecker>();
-  NSOrCFErrorDerefChecker *
-    checker = mgr.registerChecker<NSOrCFErrorDerefChecker>();
+  NSOrCFErrorDerefChecker *checker =
+      mgr.registerChecker<NSOrCFErrorDerefChecker>();
   checker->ShouldCheckCFError = true;
 }
diff --git a/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index 0e1064e..0e9cbba 100644
--- a/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -37,11 +37,10 @@
 
 void NoReturnFunctionChecker::checkPostCall(const CallEvent &CE,
                                             CheckerContext &C) const {
-  ProgramStateRef state = C.getState();
   bool BuildSinks = false;
 
   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CE.getDecl()))
-    BuildSinks = FD->getAttr<AnalyzerNoReturnAttr>() || FD->isNoReturn();
+    BuildSinks = FD->hasAttr<AnalyzerNoReturnAttr>() || FD->isNoReturn();
 
   const Expr *Callee = CE.getOriginExpr();
   if (!BuildSinks && Callee)
@@ -151,7 +150,6 @@
   C.generateSink();
 }
 
-
 void ento::registerNoReturnFunctionChecker(CheckerManager &mgr) {
   mgr.registerChecker<NoReturnFunctionChecker>();
 }
diff --git a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 273a7a3..293114f 100644
--- a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -29,8 +29,9 @@
 namespace {
 class NonNullParamChecker
   : public Checker< check::PreCall > {
-  mutable OwningPtr<BugType> BTAttrNonNull;
-  mutable OwningPtr<BugType> BTNullRefArg;
+  mutable std::unique_ptr<BugType> BTAttrNonNull;
+  mutable std::unique_ptr<BugType> BTNullRefArg;
+
 public:
 
   void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
@@ -43,7 +44,7 @@
 } // end anonymous namespace
 
 void NonNullParamChecker::checkPreCall(const CallEvent &Call,
-                                      CheckerContext &C) const {
+                                       CheckerContext &C) const {
   const Decl *FD = Call.getDecl();
   if (!FD)
     return;
@@ -66,6 +67,12 @@
     }
 
     bool haveAttrNonNull = Att && Att->isNonNull(idx);
+    if (!haveAttrNonNull) {
+      // Check if the parameter is also marked 'nonnull'.
+      ArrayRef<ParmVarDecl*> parms = Call.parameters();
+      if (idx < parms.size())
+        haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
+    }
 
     if (!haveRefTypeParam && !haveAttrNonNull)
       continue;
@@ -98,7 +105,9 @@
         V = *CSV_I;
         DV = V.getAs<DefinedSVal>();
         assert(++CSV_I == CSV->end());
-        if (!DV)
+        // FIXME: Handle (some_union){ some_other_union_val }, which turns into
+        // a LazyCompoundVal inside a CompoundVal.
+        if (!V.getAs<Loc>())
           continue;
         // Retrieve the corresponding expression.
         if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
@@ -114,7 +123,7 @@
 
     ConstraintManager &CM = C.getConstraintManager();
     ProgramStateRef stateNotNull, stateNull;
-    llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
+    std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
 
     if (stateNull && !stateNotNull) {
       // Generate an error node.  Check for a null node in case
@@ -156,8 +165,7 @@
   // the BugReport is passed to 'EmitWarning'.
   if (!BTAttrNonNull)
     BTAttrNonNull.reset(new BugType(
-                            "Argument with 'nonnull' attribute passed null",
-                            "API"));
+        this, "Argument with 'nonnull' attribute passed null", "API"));
 
   BugReport *R = new BugReport(*BTAttrNonNull,
                   "Null pointer passed as an argument to a 'nonnull' parameter",
@@ -171,7 +179,7 @@
 BugReport *NonNullParamChecker::genReportReferenceToNullPointer(
   const ExplodedNode *ErrorNode, const Expr *ArgE) const {
   if (!BTNullRefArg)
-    BTNullRefArg.reset(new BuiltinBug("Dereference of null pointer"));
+    BTNullRefArg.reset(new BuiltinBug(this, "Dereference of null pointer"));
 
   BugReport *R = new BugReport(*BTNullRefArg,
                                "Forming reference to null pointer",
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 4018a66..fbf2d73 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -26,8 +26,8 @@
 namespace {
 class ObjCAtSyncChecker
     : public Checker< check::PreStmt<ObjCAtSynchronizedStmt> > {
-  mutable OwningPtr<BuiltinBug> BT_null;
-  mutable OwningPtr<BuiltinBug> BT_undef;
+  mutable std::unique_ptr<BuiltinBug> BT_null;
+  mutable std::unique_ptr<BuiltinBug> BT_undef;
 
 public:
   void checkPreStmt(const ObjCAtSynchronizedStmt *S, CheckerContext &C) const;
@@ -45,8 +45,8 @@
   if (V.getAs<UndefinedVal>()) {
     if (ExplodedNode *N = C.generateSink()) {
       if (!BT_undef)
-        BT_undef.reset(new BuiltinBug("Uninitialized value used as mutex "
-                                  "for @synchronized"));
+        BT_undef.reset(new BuiltinBug(this, "Uninitialized value used as mutex "
+                                            "for @synchronized"));
       BugReport *report =
         new BugReport(*BT_undef, BT_undef->getDescription(), N);
       bugreporter::trackNullOrUndefValue(N, Ex, *report);
@@ -60,7 +60,7 @@
 
   // Check for null mutexes.
   ProgramStateRef notNullState, nullState;
-  llvm::tie(notNullState, nullState) = state->assume(V.castAs<DefinedSVal>());
+  std::tie(notNullState, nullState) = state->assume(V.castAs<DefinedSVal>());
 
   if (nullState) {
     if (!notNullState) {
@@ -68,8 +68,9 @@
       // a null mutex just means no synchronization occurs.
       if (ExplodedNode *N = C.addTransition(nullState)) {
         if (!BT_null)
-          BT_null.reset(new BuiltinBug("Nil value used as mutex for @synchronized() "
-                                   "(no synchronization will occur)"));
+          BT_null.reset(new BuiltinBug(
+              this, "Nil value used as mutex for @synchronized() "
+                    "(no synchronization will occur)"));
         BugReport *report =
           new BugReport(*BT_null, BT_null->getDescription(), N);
         bugreporter::trackNullOrUndefValue(N, Ex, *report);
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index 503b1b5..239860d 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -27,6 +27,7 @@
 namespace {
 class WalkAST : public StmtVisitor<WalkAST> {
   BugReporter &BR;
+  const CheckerBase *Checker;
   AnalysisDeclContext* AC;
   ASTContext &ASTC;
   uint64_t PtrWidth;
@@ -71,9 +72,9 @@
   }
 
 public:
-  WalkAST(BugReporter &br, AnalysisDeclContext* ac)
-  : BR(br), AC(ac), ASTC(AC->getASTContext()),
-    PtrWidth(ASTC.getTargetInfo().getPointerWidth(0)) {}
+  WalkAST(BugReporter &br, const CheckerBase *checker, AnalysisDeclContext *ac)
+      : BR(br), Checker(checker), AC(ac), ASTC(AC->getASTContext()),
+        PtrWidth(ASTC.getTargetInfo().getPointerWidth(0)) {}
 
   // Statement visitor methods.
   void VisitChildren(Stmt *S);
@@ -142,9 +143,9 @@
 
     PathDiagnosticLocation CELoc =
         PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
-    BR.EmitBasicReport(AC->getDecl(),
-                       OsName.str(), categories::CoreFoundationObjectiveC,
-                       Os.str(), CELoc, Arg->getSourceRange());
+    BR.EmitBasicReport(AC->getDecl(), Checker, OsName.str(),
+                       categories::CoreFoundationObjectiveC, Os.str(), CELoc,
+                       Arg->getSourceRange());
   }
 
   // Recurse and check children.
@@ -163,7 +164,7 @@
 
   void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
                         BugReporter &BR) const {
-    WalkAST walker(BR, Mgr.getAnalysisDeclContext(D));
+    WalkAST walker(BR, this, Mgr.getAnalysisDeclContext(D));
     walker.Visit(D->getBody());
   }
 };
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index b9e96ee..8e51154 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -30,10 +30,10 @@
 namespace {
 class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
                                              check::PostStmt<CallExpr> > {
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
   inline void initBugType() const {
     if (!BT)
-      BT.reset(new BugType("CFArray API",
+      BT.reset(new BugType(this, "CFArray API",
                            categories::CoreFoundationObjectiveC));
   }
 
diff --git a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 789b9f4..a2cf8e1 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -181,16 +181,12 @@
 
 
   // Iterate over all instance methods.
-  for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
-                                                 E = D->instmeth_end();
-       I != E; ++I) {
-    Selector S = (*I)->getSelector();
+  for (auto *MD : D->instance_methods()) {
+    Selector S = MD->getSelector();
     // Find out whether this is a selector that we want to check.
     if (!SelectorsForClass[SuperclassName].count(S))
       continue;
 
-    ObjCMethodDecl *MD = *I;
-
     // Check if the method calls its superclass implementation.
     if (MD->getBody())
     {
@@ -212,7 +208,7 @@
            << "' instance method in " << SuperclassName.str() << " subclass '"
            << *D << "' is missing a [super " << S.getAsString() << "] call";
 
-        BR.EmitBasicReport(MD, Name, categories::CoreFoundationObjectiveC,
+        BR.EmitBasicReport(MD, this, Name, categories::CoreFoundationObjectiveC,
                            os.str(), DLoc);
       }
     }
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 8506e08..6c33084 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -74,17 +74,17 @@
   void checkPostCall(const CallEvent &CE, CheckerContext &C) const;
 
   void printState(raw_ostream &Out, ProgramStateRef State,
-                  const char *NL, const char *Sep) const;
+                  const char *NL, const char *Sep) const override;
 };
 } // end anonymous namespace
 
 namespace {
 
 class InitSelfBug : public BugType {
-  const std::string desc;
 public:
-  InitSelfBug() : BugType("Missing \"self = [(super or self) init...]\"",
-                          categories::CoreFoundationObjectiveC) {}
+  InitSelfBug(const CheckerBase *Checker)
+      : BugType(Checker, "Missing \"self = [(super or self) init...]\"",
+                categories::CoreFoundationObjectiveC) {}
 };
 
 } // end anonymous namespace
@@ -147,7 +147,8 @@
 }
 
 static void checkForInvalidSelf(const Expr *E, CheckerContext &C,
-                                const char *errorStr) {
+                                const char *errorStr,
+                                const CheckerBase *Checker) {
   if (!E)
     return;
   
@@ -162,8 +163,7 @@
   if (!N)
     return;
 
-  BugReport *report =
-    new BugReport(*new InitSelfBug(), errorStr, N);
+  BugReport *report = new BugReport(*new InitSelfBug(Checker), errorStr, N);
   C.emitReport(report);
 }
 
@@ -205,9 +205,11 @@
                                  C.getCurrentAnalysisDeclContext()->getDecl())))
     return;
 
-  checkForInvalidSelf(E->getBase(), C,
-    "Instance variable used while 'self' is not set to the result of "
-                                                 "'[(super or self) init...]'");
+  checkForInvalidSelf(
+      E->getBase(), C,
+      "Instance variable used while 'self' is not set to the result of "
+      "'[(super or self) init...]'",
+      this);
 }
 
 void ObjCSelfInitChecker::checkPreStmt(const ReturnStmt *S,
@@ -218,8 +220,9 @@
     return;
 
   checkForInvalidSelf(S->getRetValue(), C,
-    "Returning 'self' while it is not set to the result of "
-                                                 "'[(super or self) init...]'");
+                      "Returning 'self' while it is not set to the result of "
+                      "'[(super or self) init...]'",
+                      this);
 }
 
 // When a call receives a reference to 'self', [Pre/Post]Call pass
@@ -347,7 +350,7 @@
   if (FlagMap.isEmpty() && !DidCallInit && !PreCallFlags)
     return;
 
-  Out << Sep << NL << "ObjCSelfInitChecker:" << NL;
+  Out << Sep << NL << *this << " :" << NL;
 
   if (DidCallInit)
     Out << "  An init method has been called." << NL;
diff --git a/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index c66c7d0..d3b1753 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -77,22 +77,17 @@
 
 static void Scan(IvarUsageMap& M, const ObjCContainerDecl *D) {
   // Scan the methods for accesses.
-  for (ObjCContainerDecl::instmeth_iterator I = D->instmeth_begin(),
-       E = D->instmeth_end(); I!=E; ++I)
-    Scan(M, (*I)->getBody());
+  for (const auto *I : D->instance_methods())
+    Scan(M, I->getBody());
 
   if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) {
     // Scan for @synthesized property methods that act as setters/getters
     // to an ivar.
-    for (ObjCImplementationDecl::propimpl_iterator I = ID->propimpl_begin(),
-         E = ID->propimpl_end(); I!=E; ++I)
-      Scan(M, *I);
+    for (const auto *I : ID->property_impls())
+      Scan(M, I);
 
     // Scan the associated categories as well.
-    for (ObjCInterfaceDecl::visible_categories_iterator
-           Cat = ID->getClassInterface()->visible_categories_begin(),
-           CatEnd = ID->getClassInterface()->visible_categories_end();
-         Cat != CatEnd; ++Cat) {
+    for (const auto *Cat : ID->getClassInterface()->visible_categories()) {
       if (const ObjCCategoryImplDecl *CID = Cat->getImplementation())
         Scan(M, CID);
     }
@@ -101,9 +96,8 @@
 
 static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
                  SourceManager &SM) {
-  for (DeclContext::decl_iterator I=C->decls_begin(), E=C->decls_end();
-       I!=E; ++I)
-    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+  for (const auto *I : C->decls())
+    if (const auto *FD = dyn_cast<FunctionDecl>(I)) {
       SourceLocation L = FD->getLocStart();
       if (SM.getFileID(L) == FID)
         Scan(M, FD->getBody());
@@ -111,29 +105,26 @@
 }
 
 static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
-                                BugReporter &BR) {
+                                BugReporter &BR,
+                                const CheckerBase *Checker) {
 
   const ObjCInterfaceDecl *ID = D->getClassInterface();
   IvarUsageMap M;
 
   // Iterate over the ivars.
-  for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(),
-        E=ID->ivar_end(); I!=E; ++I) {
-
-    const ObjCIvarDecl *ID = *I;
-
+  for (const auto *Ivar : ID->ivars()) {
     // Ignore ivars that...
     // (a) aren't private
     // (b) explicitly marked unused
     // (c) are iboutlets
     // (d) are unnamed bitfields
-    if (ID->getAccessControl() != ObjCIvarDecl::Private ||
-        ID->getAttr<UnusedAttr>() || ID->getAttr<IBOutletAttr>() ||
-        ID->getAttr<IBOutletCollectionAttr>() ||
-        ID->isUnnamedBitfield())
+    if (Ivar->getAccessControl() != ObjCIvarDecl::Private ||
+        Ivar->hasAttr<UnusedAttr>() || Ivar->hasAttr<IBOutletAttr>() ||
+        Ivar->hasAttr<IBOutletCollectionAttr>() ||
+        Ivar->isUnnamedBitfield())
       continue;
 
-    M[ID] = Unused;
+    M[Ivar] = Unused;
   }
 
   if (M.empty())
@@ -172,7 +163,7 @@
 
       PathDiagnosticLocation L =
         PathDiagnosticLocation::create(I->first, BR.getSourceManager());
-      BR.EmitBasicReport(D, "Unused instance variable", "Optimization",
+      BR.EmitBasicReport(D, Checker, "Unused instance variable", "Optimization",
                          os.str(), L);
     }
 }
@@ -187,7 +178,7 @@
 public:
   void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
                     BugReporter &BR) const {
-    checkObjCUnusedIvar(D, BR);
+    checkObjCUnusedIvar(D, BR, this);
   }
 };
 }
diff --git a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index bcbfacd..00480e4 100644
--- a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -24,7 +24,7 @@
 namespace {
 class PointerArithChecker 
   : public Checker< check::PreStmt<BinaryOperator> > {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
 
 public:
   void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -53,10 +53,11 @@
 
     if (ExplodedNode *N = C.addTransition()) {
       if (!BT)
-        BT.reset(new BuiltinBug("Dangerous pointer arithmetic",
-                            "Pointer arithmetic done on non-array variables "
-                            "means reliance on memory layout, which is "
-                            "dangerous."));
+        BT.reset(
+            new BuiltinBug(this, "Dangerous pointer arithmetic",
+                           "Pointer arithmetic done on non-array variables "
+                           "means reliance on memory layout, which is "
+                           "dangerous."));
       BugReport *R = new BugReport(*BT, BT->getDescription(), N);
       R->addRange(B->getSourceRange());
       C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 07c82d4..fbb2628 100644
--- a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -25,7 +25,7 @@
 namespace {
 class PointerSubChecker 
   : public Checker< check::PreStmt<BinaryOperator> > {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
 
 public:
   void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -62,9 +62,10 @@
 
   if (ExplodedNode *N = C.addTransition()) {
     if (!BT)
-      BT.reset(new BuiltinBug("Pointer subtraction", 
-                          "Subtraction of two pointers that do not point to "
-                          "the same memory chunk may cause incorrect result."));
+      BT.reset(
+          new BuiltinBug(this, "Pointer subtraction",
+                         "Subtraction of two pointers that do not point to "
+                         "the same memory chunk may cause incorrect result."));
     BugReport *R = new BugReport(*BT, BT->getDescription(), N);
     R->addRange(B->getSourceRange());
     C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index ffb8cf2..1ede3a2 100644
--- a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -24,9 +24,37 @@
 using namespace ento;
 
 namespace {
+
+struct LockState {
+  enum Kind { Destroyed, Locked, Unlocked } K;
+
+private:
+  LockState(Kind K) : K(K) {}
+
+public:
+  static LockState getLocked(void) { return LockState(Locked); }
+  static LockState getUnlocked(void) { return LockState(Unlocked); }
+  static LockState getDestroyed(void) { return LockState(Destroyed); }
+
+  bool operator==(const LockState &X) const {
+    return K == X.K;
+  }
+
+  bool isLocked() const { return K == Locked; }
+  bool isUnlocked() const { return K == Unlocked; }
+  bool isDestroyed() const { return K == Destroyed; }
+
+  void Profile(llvm::FoldingSetNodeID &ID) const {
+    ID.AddInteger(K);
+  }
+};
+
 class PthreadLockChecker : public Checker< check::PostStmt<CallExpr> > {
-  mutable OwningPtr<BugType> BT_doublelock;
-  mutable OwningPtr<BugType> BT_lor;
+  mutable std::unique_ptr<BugType> BT_doublelock;
+  mutable std::unique_ptr<BugType> BT_doubleunlock;
+  mutable std::unique_ptr<BugType> BT_destroylock;
+  mutable std::unique_ptr<BugType> BT_initlock;
+  mutable std::unique_ptr<BugType> BT_lor;
   enum LockingSemantics {
     NotApplicable = 0,
     PthreadSemantics,
@@ -39,12 +67,16 @@
                    bool isTryLock, enum LockingSemantics semantics) const;
     
   void ReleaseLock(CheckerContext &C, const CallExpr *CE, SVal lock) const;
+  void DestroyLock(CheckerContext &C, const CallExpr *CE, SVal Lock) const;
+  void InitLock(CheckerContext &C, const CallExpr *CE, SVal Lock) const;
+  void reportUseDestroyedBug(CheckerContext &C, const CallExpr *CE) const;
 };
 } // end anonymous namespace
 
 // GDM Entry for tracking lock state.
 REGISTER_LIST_WITH_PROGRAMSTATE(LockSet, const MemRegion *)
 
+REGISTER_MAP_WITH_PROGRAMSTATE(LockMap, const MemRegion *, LockState)
 
 void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
                                        CheckerContext &C) const {
@@ -54,7 +86,7 @@
   if (FName.empty())
     return;
 
-  if (CE->getNumArgs() != 1)
+  if (CE->getNumArgs() != 1 && CE->getNumArgs() != 2)
     return;
 
   if (FName == "pthread_mutex_lock" ||
@@ -69,7 +101,7 @@
                 false, XNUSemantics);
   else if (FName == "pthread_mutex_trylock" ||
            FName == "pthread_rwlock_tryrdlock" ||
-           FName == "pthread_rwlock_tryrwlock")
+           FName == "pthread_rwlock_trywrlock")
     AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
                 true, PthreadSemantics);
   else if (FName == "lck_mtx_try_lock" ||
@@ -82,6 +114,11 @@
            FName == "lck_mtx_unlock" ||
            FName == "lck_rw_done")
     ReleaseLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+  else if (FName == "pthread_mutex_destroy" ||
+           FName == "lck_mtx_destroy")
+    DestroyLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+  else if (FName == "pthread_mutex_init")
+    InitLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
 }
 
 void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
@@ -100,18 +137,24 @@
   
   DefinedSVal retVal = X.castAs<DefinedSVal>();
 
-  if (state->contains<LockSet>(lockR)) {
-    if (!BT_doublelock)
-      BT_doublelock.reset(new BugType("Double locking", "Lock checker"));
-    ExplodedNode *N = C.generateSink();
-    if (!N)
+  if (const LockState *LState = state->get<LockMap>(lockR)) {
+    if (LState->isLocked()) {
+      if (!BT_doublelock)
+        BT_doublelock.reset(new BugType(this, "Double locking",
+                                        "Lock checker"));
+      ExplodedNode *N = C.generateSink();
+      if (!N)
+        return;
+      BugReport *report = new BugReport(*BT_doublelock,
+                                        "This lock has already been acquired",
+                                        N);
+      report->addRange(CE->getArg(0)->getSourceRange());
+      C.emitReport(report);
       return;
-    BugReport *report = new BugReport(*BT_doublelock,
-                                                      "This lock has already "
-                                                      "been acquired", N);
-    report->addRange(CE->getArg(0)->getSourceRange());
-    C.emitReport(report);
-    return;
+    } else if (LState->isDestroyed()) {
+      reportUseDestroyedBug(C, CE);
+      return;
+    }
   }
 
   ProgramStateRef lockSucc = state;
@@ -120,10 +163,10 @@
     ProgramStateRef lockFail;
     switch (semantics) {
     case PthreadSemantics:
-      llvm::tie(lockFail, lockSucc) = state->assume(retVal);    
+      std::tie(lockFail, lockSucc) = state->assume(retVal);
       break;
     case XNUSemantics:
-      llvm::tie(lockSucc, lockFail) = state->assume(retVal);    
+      std::tie(lockSucc, lockFail) = state->assume(retVal);
       break;
     default:
       llvm_unreachable("Unknown tryLock locking semantics");
@@ -144,6 +187,7 @@
   
   // Record that the lock was acquired.  
   lockSucc = lockSucc->add<LockSet>(lockR);
+  lockSucc = lockSucc->set<LockMap>(lockR, LockState::getLocked());
   C.addTransition(lockSucc);
 }
 
@@ -155,35 +199,140 @@
     return;
   
   ProgramStateRef state = C.getState();
+
+  if (const LockState *LState = state->get<LockMap>(lockR)) {
+    if (LState->isUnlocked()) {
+      if (!BT_doubleunlock)
+        BT_doubleunlock.reset(new BugType(this, "Double unlocking",
+                                          "Lock checker"));
+      ExplodedNode *N = C.generateSink();
+      if (!N)
+        return;
+      BugReport *Report = new BugReport(*BT_doubleunlock,
+                                        "This lock has already been unlocked",
+                                        N);
+      Report->addRange(CE->getArg(0)->getSourceRange());
+      C.emitReport(Report);
+      return;
+    } else if (LState->isDestroyed()) {
+      reportUseDestroyedBug(C, CE);
+      return;
+    }
+  }
+
   LockSetTy LS = state->get<LockSet>();
 
   // FIXME: Better analysis requires IPA for wrappers.
-  // FIXME: check for double unlocks
-  if (LS.isEmpty())
-    return;
-  
-  const MemRegion *firstLockR = LS.getHead();
-  if (firstLockR != lockR) {
-    if (!BT_lor)
-      BT_lor.reset(new BugType("Lock order reversal", "Lock checker"));
-    ExplodedNode *N = C.generateSink();
-    if (!N)
+
+  if (!LS.isEmpty()) {
+    const MemRegion *firstLockR = LS.getHead();
+    if (firstLockR != lockR) {
+      if (!BT_lor)
+        BT_lor.reset(new BugType(this, "Lock order reversal", "Lock checker"));
+      ExplodedNode *N = C.generateSink();
+      if (!N)
+        return;
+      BugReport *report = new BugReport(*BT_lor,
+                                        "This was not the most recently "
+                                        "acquired lock. Possible lock order "
+                                        "reversal",
+                                        N);
+      report->addRange(CE->getArg(0)->getSourceRange());
+      C.emitReport(report);
       return;
-    BugReport *report = new BugReport(*BT_lor,
-                                                      "This was not the most "
-                                                      "recently acquired lock. "
-                                                      "Possible lock order "
-                                                      "reversal", N);
-    report->addRange(CE->getArg(0)->getSourceRange());
-    C.emitReport(report);
-    return;
+    }
+    // Record that the lock was released.
+    state = state->set<LockSet>(LS.getTail());
   }
 
-  // Record that the lock was released. 
-  state = state->set<LockSet>(LS.getTail());
+  state = state->set<LockMap>(lockR, LockState::getUnlocked());
   C.addTransition(state);
 }
 
+void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
+                                     SVal Lock) const {
+
+  const MemRegion *LockR = Lock.getAsRegion();
+  if (!LockR)
+    return;
+
+  ProgramStateRef State = C.getState();
+
+  const LockState *LState = State->get<LockMap>(LockR);
+  if (!LState || LState->isUnlocked()) {
+    State = State->set<LockMap>(LockR, LockState::getDestroyed());
+    C.addTransition(State);
+    return;
+  }
+
+  StringRef Message;
+
+  if (LState->isLocked()) {
+    Message = "This lock is still locked";
+  } else {
+    Message = "This lock has already been destroyed";
+  }
+
+  if (!BT_destroylock)
+    BT_destroylock.reset(new BugType(this, "Destroy invalid lock",
+                                     "Lock checker"));
+  ExplodedNode *N = C.generateSink();
+  if (!N)
+    return;
+  BugReport *Report = new BugReport(*BT_destroylock, Message, N);
+  Report->addRange(CE->getArg(0)->getSourceRange());
+  C.emitReport(Report);
+}
+
+void PthreadLockChecker::InitLock(CheckerContext &C, const CallExpr *CE,
+                                  SVal Lock) const {
+
+  const MemRegion *LockR = Lock.getAsRegion();
+  if (!LockR)
+    return;
+
+  ProgramStateRef State = C.getState();
+
+  const struct LockState *LState = State->get<LockMap>(LockR);
+  if (!LState || LState->isDestroyed()) {
+    State = State->set<LockMap>(LockR, LockState::getUnlocked());
+    C.addTransition(State);
+    return;
+  }
+
+  StringRef Message;
+
+  if (LState->isLocked()) {
+    Message = "This lock is still being held";
+  } else {
+    Message = "This lock has already been initialized";
+  }
+
+  if (!BT_initlock)
+    BT_initlock.reset(new BugType(this, "Init invalid lock",
+                                  "Lock checker"));
+  ExplodedNode *N = C.generateSink();
+  if (!N)
+    return;
+  BugReport *Report = new BugReport(*BT_initlock, Message, N);
+  Report->addRange(CE->getArg(0)->getSourceRange());
+  C.emitReport(Report);
+}
+
+void PthreadLockChecker::reportUseDestroyedBug(CheckerContext &C,
+                                               const CallExpr *CE) const {
+  if (!BT_destroylock)
+    BT_destroylock.reset(new BugType(this, "Use destroyed lock",
+                                     "Lock checker"));
+  ExplodedNode *N = C.generateSink();
+  if (!N)
+    return;
+  BugReport *Report = new BugReport(*BT_destroylock,
+                                    "This lock has already been destroyed",
+                                    N);
+  Report->addRange(CE->getArg(0)->getSourceRange());
+  C.emitReport(Report);
+}
 
 void ento::registerPthreadLockChecker(CheckerManager &mgr) {
   mgr.registerChecker<PthreadLockChecker>();
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index c474e78..9ac9931 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "ClangSACheckers.h"
+#include "AllocationDiagnostics.h"
 #include "clang/AST/Attr.h"
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/DeclObjC.h"
@@ -20,6 +21,7 @@
 #include "clang/Analysis/DomainSpecific/CocoaConventions.h"
 #include "clang/Basic/LangOptions.h"
 #include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
 #include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
 #include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
 #include "clang/StaticAnalyzer/Core/Checker.h"
@@ -28,7 +30,6 @@
 #include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
 #include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
 #include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/FoldingSet.h"
 #include "llvm/ADT/ImmutableList.h"
@@ -38,8 +39,6 @@
 #include "llvm/ADT/StringExtras.h"
 #include <cstdarg>
 
-#include "AllocationDiagnostics.h"
-
 using namespace clang;
 using namespace ento;
 using namespace objc_retain;
@@ -95,29 +94,70 @@
   };
 
 private:
-  Kind kind;
-  RetEffect::ObjKind okind;
+  /// The number of outstanding retains.
   unsigned Cnt;
+  /// The number of outstanding autoreleases.
   unsigned ACnt;
+  /// The (static) type of the object at the time we started tracking it.
   QualType T;
 
-  RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t)
-  : kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {}
+  /// The current state of the object.
+  ///
+  /// See the RefVal::Kind enum for possible values.
+  unsigned RawKind : 5;
+
+  /// The kind of object being tracked (CF or ObjC), if known.
+  ///
+  /// See the RetEffect::ObjKind enum for possible values.
+  unsigned RawObjectKind : 2;
+
+  /// True if the current state and/or retain count may turn out to not be the
+  /// best possible approximation of the reference counting state.
+  ///
+  /// If true, the checker may decide to throw away ("override") this state
+  /// in favor of something else when it sees the object being used in new ways.
+  ///
+  /// This setting should not be propagated to state derived from this state.
+  /// Once we start deriving new states, it would be inconsistent to override
+  /// them.
+  unsigned IsOverridable : 1;
+
+  RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t,
+         bool Overridable = false)
+    : Cnt(cnt), ACnt(acnt), T(t), RawKind(static_cast<unsigned>(k)),
+      RawObjectKind(static_cast<unsigned>(o)), IsOverridable(Overridable) {
+    assert(getKind() == k && "not enough bits for the kind");
+    assert(getObjKind() == o && "not enough bits for the object kind");
+  }
 
 public:
-  Kind getKind() const { return kind; }
+  Kind getKind() const { return static_cast<Kind>(RawKind); }
 
-  RetEffect::ObjKind getObjKind() const { return okind; }
+  RetEffect::ObjKind getObjKind() const {
+    return static_cast<RetEffect::ObjKind>(RawObjectKind);
+  }
 
   unsigned getCount() const { return Cnt; }
   unsigned getAutoreleaseCount() const { return ACnt; }
   unsigned getCombinedCounts() const { return Cnt + ACnt; }
-  void clearCounts() { Cnt = 0; ACnt = 0; }
-  void setCount(unsigned i) { Cnt = i; }
-  void setAutoreleaseCount(unsigned i) { ACnt = i; }
+  void clearCounts() {
+    Cnt = 0;
+    ACnt = 0;
+    IsOverridable = false;
+  }
+  void setCount(unsigned i) {
+    Cnt = i;
+    IsOverridable = false;
+  }
+  void setAutoreleaseCount(unsigned i) {
+    ACnt = i;
+    IsOverridable = false;
+  }
 
   QualType getType() const { return T; }
 
+  bool isOverridable() const { return IsOverridable; }
+
   bool isOwned() const {
     return getKind() == Owned;
   }
@@ -134,20 +174,31 @@
     return getKind() == ReturnedNotOwned;
   }
 
+  /// Create a state for an object whose lifetime is the responsibility of the
+  /// current function, at least partially.
+  ///
+  /// Most commonly, this is an owned object with a retain count of +1.
   static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
                           unsigned Count = 1) {
     return RefVal(Owned, o, Count, 0, t);
   }
 
+  /// Create a state for an object whose lifetime is not the responsibility of
+  /// the current function.
+  ///
+  /// Most commonly, this is an unowned object with a retain count of +0.
   static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
                              unsigned Count = 0) {
     return RefVal(NotOwned, o, Count, 0, t);
   }
 
-  // Comparison, profiling, and pretty-printing.
-
-  bool operator==(const RefVal& X) const {
-    return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
+  /// Create an "overridable" state for an unowned object at +0.
+  ///
+  /// An overridable state is one that provides a good approximation of the
+  /// reference counting state now, but which may be discarded later if the
+  /// checker sees the object being used in new ways.
+  static RefVal makeOverridableNotOwned(RetEffect::ObjKind o, QualType t) {
+    return RefVal(NotOwned, o, 0, 0, t, /*Overridable=*/true);
   }
 
   RefVal operator-(size_t i) const {
@@ -170,11 +221,24 @@
                   getType());
   }
 
+  // Comparison, profiling, and pretty-printing.
+
+  bool hasSameState(const RefVal &X) const {
+    return getKind() == X.getKind() && Cnt == X.Cnt && ACnt == X.ACnt;
+  }
+
+  bool operator==(const RefVal& X) const {
+    return T == X.T && hasSameState(X) && getObjKind() == X.getObjKind() &&
+           IsOverridable == X.IsOverridable;
+  }
+  
   void Profile(llvm::FoldingSetNodeID& ID) const {
-    ID.AddInteger((unsigned) kind);
+    ID.Add(T);
+    ID.AddInteger(RawKind);
     ID.AddInteger(Cnt);
     ID.AddInteger(ACnt);
-    ID.Add(T);
+    ID.AddInteger(RawObjectKind);
+    ID.AddBoolean(IsOverridable);
   }
 
   void print(raw_ostream &Out) const;
@@ -184,6 +248,9 @@
   if (!T.isNull())
     Out << "Tracked " << T.getAsString() << '/';
 
+  if (isOverridable())
+    Out << "(overridable) ";
+
   switch (getKind()) {
     default: llvm_unreachable("Invalid RefVal kind");
     case Owned: {
@@ -653,11 +720,11 @@
      AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
      ObjCAllocRetE(gcenabled
                     ? RetEffect::MakeGCNotOwned()
-                    : (usesARC ? RetEffect::MakeARCNotOwned()
+                    : (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
                                : RetEffect::MakeOwned(RetEffect::ObjC, true))),
      ObjCInitRetE(gcenabled 
                     ? RetEffect::MakeGCNotOwned()
-                    : (usesARC ? RetEffect::MakeARCNotOwned()
+                    : (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
                                : RetEffect::MakeOwnedWhenTrackedReceiver())) {
     InitializeClassMethodSummaries();
     InitializeMethodSummaries();
@@ -689,7 +756,7 @@
   const RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
     const ObjCInterfaceDecl *ID = MD->getClassInterface();
     Selector S = MD->getSelector();
-    QualType ResultTy = MD->getResultType();
+    QualType ResultTy = MD->getReturnType();
 
     ObjCMethodSummariesTy *CachedSummaries;
     if (MD->isInstanceMethod())
@@ -861,7 +928,7 @@
     // Special cases where the callback argument CANNOT free the return value.
     // This can generally only happen if we know that the callback will only be
     // called when the return value is already being deallocated.
-    if (const FunctionCall *FC = dyn_cast<FunctionCall>(&Call)) {
+    if (const SimpleFunctionCall *FC = dyn_cast<SimpleFunctionCall>(&Call)) {
       if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
         // When the CGBitmapContext is deallocated, the callback here will free
         // the associated data buffer.
@@ -909,7 +976,7 @@
   const RetainSummary *Summ;
   switch (Call.getKind()) {
   case CE_Function:
-    Summ = getFunctionSummary(cast<FunctionCall>(Call).getDecl());
+    Summ = getFunctionSummary(cast<SimpleFunctionCall>(Call).getDecl());
     break;
   case CE_CXXMember:
   case CE_CXXMemberOperator:
@@ -971,7 +1038,7 @@
     FName = FName.substr(FName.find_first_not_of('_'));
 
     // Inspect the result type.
-    QualType RetTy = FT->getResultType();
+    QualType RetTy = FT->getReturnType();
 
     // FIXME: This should all be refactored into a chain of "summary lookup"
     //  filters.
@@ -1102,7 +1169,7 @@
         break;
       }
 
-      if (FD->getAttr<CFAuditedTransferAttr>()) {
+      if (FD->hasAttr<CFAuditedTransferAttr>()) {
         S = getCFCreateGetRuleSummary(FD);
         break;
       }
@@ -1175,7 +1242,7 @@
   // Sanity check that this is *really* a unary function.  This can
   // happen if people do weird things.
   const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
-  if (!FTP || FTP->getNumArgs() != 1)
+  if (!FTP || FTP->getNumParams() != 1)
     return getPersistentStopSummary();
 
   assert (ScratchArgs.isEmpty());
@@ -1214,21 +1281,21 @@
 RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
                                                   const Decl *D) {
   if (cocoa::isCocoaObjectRef(RetTy)) {
-    if (D->getAttr<NSReturnsRetainedAttr>())
+    if (D->hasAttr<NSReturnsRetainedAttr>())
       return ObjCAllocRetE;
 
-    if (D->getAttr<NSReturnsNotRetainedAttr>() ||
-        D->getAttr<NSReturnsAutoreleasedAttr>())
+    if (D->hasAttr<NSReturnsNotRetainedAttr>() ||
+        D->hasAttr<NSReturnsAutoreleasedAttr>())
       return RetEffect::MakeNotOwned(RetEffect::ObjC);
 
   } else if (!RetTy->isPointerType()) {
     return None;
   }
 
-  if (D->getAttr<CFReturnsRetainedAttr>())
+  if (D->hasAttr<CFReturnsRetainedAttr>())
     return RetEffect::MakeOwned(RetEffect::CF, true);
 
-  if (D->getAttr<CFReturnsNotRetainedAttr>())
+  if (D->hasAttr<CFReturnsNotRetainedAttr>())
     return RetEffect::MakeNotOwned(RetEffect::CF);
 
   return None;
@@ -1248,13 +1315,13 @@
   for (FunctionDecl::param_const_iterator pi = FD->param_begin(), 
          pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
     const ParmVarDecl *pd = *pi;
-    if (pd->getAttr<NSConsumedAttr>())
+    if (pd->hasAttr<NSConsumedAttr>())
       Template->addArg(AF, parm_idx, DecRefMsg);
-    else if (pd->getAttr<CFConsumedAttr>())
+    else if (pd->hasAttr<CFConsumedAttr>())
       Template->addArg(AF, parm_idx, DecRef);      
   }
-  
-  QualType RetTy = FD->getResultType();
+
+  QualType RetTy = FD->getReturnType();
   if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
     Template->setRetEffect(*RetE);
 }
@@ -1269,7 +1336,7 @@
   RetainSummaryTemplate Template(Summ, *this);
 
   // Effects on the receiver.
-  if (MD->getAttr<NSConsumesSelfAttr>())
+  if (MD->hasAttr<NSConsumesSelfAttr>())
     Template->setReceiverEffect(DecRefMsg);      
   
   // Effects on the parameters.
@@ -1278,14 +1345,14 @@
          pi=MD->param_begin(), pe=MD->param_end();
        pi != pe; ++pi, ++parm_idx) {
     const ParmVarDecl *pd = *pi;
-    if (pd->getAttr<NSConsumedAttr>())
+    if (pd->hasAttr<NSConsumedAttr>())
       Template->addArg(AF, parm_idx, DecRefMsg);      
-    else if (pd->getAttr<CFConsumedAttr>()) {
+    else if (pd->hasAttr<CFConsumedAttr>()) {
       Template->addArg(AF, parm_idx, DecRef);      
     }   
   }
-  
-  QualType RetTy = MD->getResultType();
+
+  QualType RetTy = MD->getReturnType();
   if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
     Template->setRetEffect(*RetE);
 }
@@ -1507,6 +1574,11 @@
   //   as for NSWindow objects.
   addClassMethSummary("NSPanel", "alloc", NoTrackYet);
 
+  // For NSNull, objects returned by +null are singletons that ignore
+  // retain/release semantics.  Just don't track them.
+  // <rdar://problem/12858915>
+  addClassMethSummary("NSNull", "null", NoTrackYet);
+
   // Don't track allocated autorelease pools, as it is okay to prematurely
   // exit a method.
   addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
@@ -1543,8 +1615,9 @@
 
   class CFRefBug : public BugType {
   protected:
-    CFRefBug(StringRef name)
-    : BugType(name, categories::MemoryCoreFoundationObjectiveC) {}
+    CFRefBug(const CheckerBase *checker, StringRef name)
+        : BugType(checker, name, categories::MemoryCoreFoundationObjectiveC) {}
+
   public:
 
     // FIXME: Eventually remove.
@@ -1555,18 +1628,19 @@
 
   class UseAfterRelease : public CFRefBug {
   public:
-    UseAfterRelease() : CFRefBug("Use-after-release") {}
+    UseAfterRelease(const CheckerBase *checker)
+        : CFRefBug(checker, "Use-after-release") {}
 
-    const char *getDescription() const {
+    const char *getDescription() const override {
       return "Reference-counted object is used after it is released";
     }
   };
 
   class BadRelease : public CFRefBug {
   public:
-    BadRelease() : CFRefBug("Bad release") {}
+    BadRelease(const CheckerBase *checker) : CFRefBug(checker, "Bad release") {}
 
-    const char *getDescription() const {
+    const char *getDescription() const override {
       return "Incorrect decrement of the reference count of an object that is "
              "not owned at this point by the caller";
     }
@@ -1574,40 +1648,40 @@
 
   class DeallocGC : public CFRefBug {
   public:
-    DeallocGC()
-    : CFRefBug("-dealloc called while using garbage collection") {}
+    DeallocGC(const CheckerBase *checker)
+        : CFRefBug(checker, "-dealloc called while using garbage collection") {}
 
-    const char *getDescription() const {
+    const char *getDescription() const override {
       return "-dealloc called while using garbage collection";
     }
   };
 
   class DeallocNotOwned : public CFRefBug {
   public:
-    DeallocNotOwned()
-    : CFRefBug("-dealloc sent to non-exclusively owned object") {}
+    DeallocNotOwned(const CheckerBase *checker)
+        : CFRefBug(checker, "-dealloc sent to non-exclusively owned object") {}
 
-    const char *getDescription() const {
+    const char *getDescription() const override {
       return "-dealloc sent to object that may be referenced elsewhere";
     }
   };
 
   class OverAutorelease : public CFRefBug {
   public:
-    OverAutorelease()
-    : CFRefBug("Object autoreleased too many times") {}
+    OverAutorelease(const CheckerBase *checker)
+        : CFRefBug(checker, "Object autoreleased too many times") {}
 
-    const char *getDescription() const {
+    const char *getDescription() const override {
       return "Object autoreleased too many times";
     }
   };
 
   class ReturnedNotOwnedForOwned : public CFRefBug {
   public:
-    ReturnedNotOwnedForOwned()
-    : CFRefBug("Method should return an owned object") {}
+    ReturnedNotOwnedForOwned(const CheckerBase *checker)
+        : CFRefBug(checker, "Method should return an owned object") {}
 
-    const char *getDescription() const {
+    const char *getDescription() const override {
       return "Object with a +0 retain count returned to caller where a +1 "
              "(owning) retain count is expected";
     }
@@ -1615,15 +1689,14 @@
 
   class Leak : public CFRefBug {
   public:
-    Leak(StringRef name)
-    : CFRefBug(name) {
+    Leak(const CheckerBase *checker, StringRef name) : CFRefBug(checker, name) {
       // Leaks should not be reported if they are post-dominated by a sink.
       setSuppressOnSink(true);
     }
 
-    const char *getDescription() const { return ""; }
+    const char *getDescription() const override { return ""; }
 
-    bool isLeak() const { return true; }
+    bool isLeak() const override { return true; }
   };
 
   //===---------===//
@@ -1640,20 +1713,20 @@
     CFRefReportVisitor(SymbolRef sym, bool gcEnabled, const SummaryLogTy &log)
        : Sym(sym), SummaryLog(log), GCEnabled(gcEnabled) {}
 
-    virtual void Profile(llvm::FoldingSetNodeID &ID) const {
+    void Profile(llvm::FoldingSetNodeID &ID) const override {
       static int x = 0;
       ID.AddPointer(&x);
       ID.AddPointer(Sym);
     }
 
-    virtual PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
-                                           const ExplodedNode *PrevN,
-                                           BugReporterContext &BRC,
-                                           BugReport &BR);
+    PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+                                   const ExplodedNode *PrevN,
+                                   BugReporterContext &BRC,
+                                   BugReport &BR) override;
 
-    virtual PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
-                                            const ExplodedNode *N,
-                                            BugReport &BR);
+    PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
+                                    const ExplodedNode *N,
+                                    BugReport &BR) override;
   };
 
   class CFRefLeakReportVisitor : public CFRefReportVisitor {
@@ -1664,9 +1737,9 @@
 
     PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
                                     const ExplodedNode *N,
-                                    BugReport &BR);
+                                    BugReport &BR) override;
 
-    virtual BugReporterVisitor *clone() const {
+    BugReporterVisitor *clone() const override {
       // The curiously-recurring template pattern only works for one level of
       // subclassing. Rather than make a new template base for
       // CFRefReportVisitor, we simply override clone() to do the right thing.
@@ -1697,7 +1770,7 @@
       addGCModeDescription(LOpts, GCEnabled);
     }
 
-    virtual std::pair<ranges_iterator, ranges_iterator> getRanges() {
+    std::pair<ranges_iterator, ranges_iterator> getRanges() override {
       const CFRefBug& BugTy = static_cast<CFRefBug&>(getBugType());
       if (!BugTy.isLeak())
         return BugReport::getRanges();
@@ -1714,7 +1787,7 @@
                     CheckerContext &Ctx,
                     bool IncludeAllocationLine);
 
-    PathDiagnosticLocation getLocation(const SourceManager &SM) const {
+    PathDiagnosticLocation getLocation(const SourceManager &SM) const override {
       assert(Location.isValid());
       return Location;
     }
@@ -1918,7 +1991,7 @@
     if (!GCEnabled && std::find(AEffects.begin(), AEffects.end(), Dealloc) !=
                           AEffects.end()) {
       // Determine if the object's reference count was pushed to zero.
-      assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+      assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
       // We may not have transitioned to 'release' if we hit an error.
       // This case is handled elsewhere.
       if (CurrV.getKind() == RefVal::Released) {
@@ -1939,7 +2012,7 @@
 
       if (GCEnabled) {
         // Determine if the object's reference count was pushed to zero.
-        assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+        assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
 
         os << "In GC mode a call to '" << *FD
         <<  "' decrements an object's retain count and registers the "
@@ -1964,7 +2037,7 @@
     }
 
     // Determine if the typestate has changed.
-    if (!(PrevV == CurrV))
+    if (!PrevV.hasSameState(CurrV))
       switch (CurrV.getKind()) {
         case RefVal::Owned:
         case RefVal::NotOwned:
@@ -2212,9 +2285,9 @@
     os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
                                   : " is returned from a function ");
     
-    if (D->getAttr<CFReturnsNotRetainedAttr>())
+    if (D->hasAttr<CFReturnsNotRetainedAttr>())
       os << "that is annotated as CF_RETURNS_NOT_RETAINED";
-    else if (D->getAttr<NSReturnsNotRetainedAttr>())
+    else if (D->hasAttr<NSReturnsNotRetainedAttr>())
       os << "that is annotated as NS_RETURNS_NOT_RETAINED";
     else {
       if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
@@ -2328,24 +2401,25 @@
                     check::PostStmt<ObjCArrayLiteral>,
                     check::PostStmt<ObjCDictionaryLiteral>,
                     check::PostStmt<ObjCBoxedExpr>,
+                    check::PostStmt<ObjCIvarRefExpr>,
                     check::PostCall,
                     check::PreStmt<ReturnStmt>,
                     check::RegionChanges,
                     eval::Assume,
                     eval::Call > {
-  mutable OwningPtr<CFRefBug> useAfterRelease, releaseNotOwned;
-  mutable OwningPtr<CFRefBug> deallocGC, deallocNotOwned;
-  mutable OwningPtr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
-  mutable OwningPtr<CFRefBug> leakWithinFunction, leakAtReturn;
-  mutable OwningPtr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
+  mutable std::unique_ptr<CFRefBug> useAfterRelease, releaseNotOwned;
+  mutable std::unique_ptr<CFRefBug> deallocGC, deallocNotOwned;
+  mutable std::unique_ptr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
+  mutable std::unique_ptr<CFRefBug> leakWithinFunction, leakAtReturn;
+  mutable std::unique_ptr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
 
-  typedef llvm::DenseMap<SymbolRef, const SimpleProgramPointTag *> SymbolTagMap;
+  typedef llvm::DenseMap<SymbolRef, const CheckerProgramPointTag *> SymbolTagMap;
 
   // This map is only used to ensure proper deletion of any allocated tags.
   mutable SymbolTagMap DeadSymbolTags;
 
-  mutable OwningPtr<RetainSummaryManager> Summaries;
-  mutable OwningPtr<RetainSummaryManager> SummariesGC;
+  mutable std::unique_ptr<RetainSummaryManager> Summaries;
+  mutable std::unique_ptr<RetainSummaryManager> SummariesGC;
   mutable SummaryLogTy SummaryLog;
   mutable bool ShouldResetSummaryLog;
 
@@ -2402,17 +2476,18 @@
                                      bool GCEnabled) const {
     if (GCEnabled) {
       if (!leakWithinFunctionGC)
-        leakWithinFunctionGC.reset(new Leak("Leak of object when using "
-                                             "garbage collection"));
+        leakWithinFunctionGC.reset(new Leak(this, "Leak of object when using "
+                                                  "garbage collection"));
       return leakWithinFunctionGC.get();
     } else {
       if (!leakWithinFunction) {
         if (LOpts.getGC() == LangOptions::HybridGC) {
-          leakWithinFunction.reset(new Leak("Leak of object when not using "
+          leakWithinFunction.reset(new Leak(this,
+                                            "Leak of object when not using "
                                             "garbage collection (GC) in "
                                             "dual GC/non-GC code"));
         } else {
-          leakWithinFunction.reset(new Leak("Leak"));
+          leakWithinFunction.reset(new Leak(this, "Leak"));
         }
       }
       return leakWithinFunction.get();
@@ -2422,17 +2497,19 @@
   CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts, bool GCEnabled) const {
     if (GCEnabled) {
       if (!leakAtReturnGC)
-        leakAtReturnGC.reset(new Leak("Leak of returned object when using "
+        leakAtReturnGC.reset(new Leak(this,
+                                      "Leak of returned object when using "
                                       "garbage collection"));
       return leakAtReturnGC.get();
     } else {
       if (!leakAtReturn) {
         if (LOpts.getGC() == LangOptions::HybridGC) {
-          leakAtReturn.reset(new Leak("Leak of returned object when not using "
+          leakAtReturn.reset(new Leak(this,
+                                      "Leak of returned object when not using "
                                       "garbage collection (GC) in dual "
                                       "GC/non-GC code"));
         } else {
-          leakAtReturn.reset(new Leak("Leak of returned object"));
+          leakAtReturn.reset(new Leak(this, "Leak of returned object"));
         }
       }
       return leakAtReturn.get();
@@ -2464,7 +2541,7 @@
   }
 
   void printState(raw_ostream &Out, ProgramStateRef State,
-                  const char *NL, const char *Sep) const;
+                  const char *NL, const char *Sep) const override;
 
   void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
   void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
@@ -2474,6 +2551,8 @@
   void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
   void checkPostStmt(const ObjCBoxedExpr *BE, CheckerContext &C) const;
 
+  void checkPostStmt(const ObjCIvarRefExpr *IRE, CheckerContext &C) const;
+
   void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
                       
   void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
@@ -2542,7 +2621,7 @@
   StopTrackingCallback(ProgramStateRef st) : state(st) {}
   ProgramStateRef getState() const { return state; }
 
-  bool VisitSymbol(SymbolRef sym) {
+  bool VisitSymbol(SymbolRef sym) override {
     state = state->remove<RefBindings>(sym);
     return true;
   }
@@ -2691,6 +2770,20 @@
   C.addTransition(State);
 }
 
+void RetainCountChecker::checkPostStmt(const ObjCIvarRefExpr *IRE,
+                                       CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  // If an instance variable was previously accessed through a property,
+  // it may have a synthesized refcount of +0. Override right now that we're
+  // doing direct access.
+  if (Optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>())
+    if (SymbolRef Sym = State->getSVal(*IVarLoc).getAsSymbol())
+      if (const RefVal *RV = getRefBinding(State, Sym))
+        if (RV->isOverridable())
+          State = removeRefBinding(State, Sym);
+  C.addTransition(State);
+}
+
 void RetainCountChecker::checkPostCall(const CallEvent &Call,
                                        CheckerContext &C) const {
   RetainSummaryManager &Summaries = getSummaryManager(C);
@@ -2731,6 +2824,16 @@
   return RetTy;
 }
 
+static bool wasSynthesizedProperty(const ObjCMethodCall *Call,
+                                   ExplodedNode *N) {
+  if (!Call || !Call->getDecl()->isPropertyAccessor())
+    return false;
+
+  CallExitEnd PP = N->getLocation().castAs<CallExitEnd>();
+  const StackFrameContext *Frame = PP.getCalleeContext();
+  return Frame->getAnalysisDeclContext()->isBodyAutosynthesized();
+}
+
 // We don't always get the exact modeling of the function with regards to the
 // retain count checker even when the function is inlined. For example, we need
 // to stop tracking the symbols which were marked with StopTrackingHard.
@@ -2765,6 +2868,19 @@
     SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
     if (Sym)
       state = removeRefBinding(state, Sym);
+  } else if (RE.getKind() == RetEffect::NotOwnedSymbol) {
+    if (wasSynthesizedProperty(MsgInvocation, C.getPredecessor())) {
+      // Believe the summary if we synthesized the body of a property getter
+      // and the return value is currently untracked. If the corresponding
+      // instance variable is later accessed directly, however, we're going to
+      // want to override this state, so that the owning object can perform
+      // reference counting operations on its own ivars.
+      SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
+      if (Sym && !getRefBinding(state, Sym))
+        state = setRefBinding(state, Sym,
+                              RefVal::makeOverridableNotOwned(RE.getObjKind(),
+                                                              Sym->getType()));
+    }
   }
   
   C.addTransition(state);
@@ -2857,7 +2973,6 @@
     }
 
     case RetEffect::GCNotOwnedSymbol:
-    case RetEffect::ARCNotOwnedSymbol:
     case RetEffect::NotOwnedSymbol: {
       const Expr *Ex = CallOrMsg.getOriginExpr();
       SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
@@ -3053,22 +3168,22 @@
       llvm_unreachable("Unhandled error.");
     case RefVal::ErrorUseAfterRelease:
       if (!useAfterRelease)
-        useAfterRelease.reset(new UseAfterRelease());
+        useAfterRelease.reset(new UseAfterRelease(this));
       BT = &*useAfterRelease;
       break;
     case RefVal::ErrorReleaseNotOwned:
       if (!releaseNotOwned)
-        releaseNotOwned.reset(new BadRelease());
+        releaseNotOwned.reset(new BadRelease(this));
       BT = &*releaseNotOwned;
       break;
     case RefVal::ErrorDeallocGC:
       if (!deallocGC)
-        deallocGC.reset(new DeallocGC());
+        deallocGC.reset(new DeallocGC(this));
       BT = &*deallocGC;
       break;
     case RefVal::ErrorDeallocNotOwned:
       if (!deallocNotOwned)
-        deallocNotOwned.reset(new DeallocNotOwned());
+        deallocNotOwned.reset(new DeallocNotOwned(this));
       BT = &*deallocNotOwned;
       break;
   }
@@ -3232,8 +3347,7 @@
     return;
 
   // Update the autorelease counts.
-  static SimpleProgramPointTag
-         AutoreleaseTag("RetainCountChecker : Autorelease");
+  static CheckerProgramPointTag AutoreleaseTag(this, "Autorelease");
   state = handleAutoreleaseCounts(state, Pred, &AutoreleaseTag, C, Sym, X);
 
   // Did we cache out?
@@ -3294,8 +3408,7 @@
         // Generate an error node.
         state = setRefBinding(state, Sym, X);
 
-        static SimpleProgramPointTag
-               ReturnOwnLeakTag("RetainCountChecker : ReturnsOwnLeak");
+        static CheckerProgramPointTag ReturnOwnLeakTag(this, "ReturnsOwnLeak");
         ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
         if (N) {
           const LangOptions &LOpts = C.getASTContext().getLangOpts();
@@ -3315,12 +3428,12 @@
       // owned object.
       state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned);
 
-      static SimpleProgramPointTag
-             ReturnNotOwnedTag("RetainCountChecker : ReturnNotOwnedForOwned");
+      static CheckerProgramPointTag ReturnNotOwnedTag(this, 
+                                                      "ReturnNotOwnedForOwned");
       ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
       if (N) {
         if (!returnNotOwnedForOwned)
-          returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned());
+          returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned(this));
 
         CFRefReport *report =
             new CFRefReport(*returnNotOwnedForOwned,
@@ -3375,7 +3488,7 @@
   // false positives.
   if (const VarRegion *LVR = dyn_cast_or_null<VarRegion>(loc.getAsRegion())) {
     const VarDecl *VD = LVR->getDecl();
-    if (VD->getAttr<CleanupAttr>()) {
+    if (VD->hasAttr<CleanupAttr>()) {
       escapes = true;
     }
   }
@@ -3508,7 +3621,7 @@
     os << "has a +" << V.getCount() << " retain count";
 
     if (!overAutorelease)
-      overAutorelease.reset(new OverAutorelease());
+      overAutorelease.reset(new OverAutorelease(this));
 
     const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
     CFRefReport *report =
@@ -3602,13 +3715,13 @@
 
 const ProgramPointTag *
 RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
-  const SimpleProgramPointTag *&tag = DeadSymbolTags[sym];
+  const CheckerProgramPointTag *&tag = DeadSymbolTags[sym];
   if (!tag) {
     SmallString<64> buf;
     llvm::raw_svector_ostream out(buf);
-    out << "RetainCountChecker : Dead Symbol : ";
+    out << "Dead Symbol : ";
     sym->dumpToStream(out);
-    tag = new SimpleProgramPointTag(out.str());
+    tag = new CheckerProgramPointTag(this, out.str());
   }
   return tag;  
 }
diff --git a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index fe253b7..b1cde6b 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -25,7 +25,8 @@
 namespace {
 class ReturnPointerRangeChecker : 
     public Checker< check::PreStmt<ReturnStmt> > {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
+
 public:
     void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
 };
@@ -69,9 +70,10 @@
     // FIXME: This bug correspond to CWE-466.  Eventually we should have bug
     // types explicitly reference such exploit categories (when applicable).
     if (!BT)
-      BT.reset(new BuiltinBug("Return of pointer value outside of expected range",
-           "Returned pointer value points outside the original object "
-           "(potential buffer overflow)"));
+      BT.reset(new BuiltinBug(
+          this, "Return of pointer value outside of expected range",
+          "Returned pointer value points outside the original object "
+          "(potential buffer overflow)"));
 
     // FIXME: It would be nice to eventually make this diagnostic more clear,
     // e.g., by referencing the original declaration or by saying *why* this
diff --git a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index ed96c40..b4d92d6 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -25,8 +25,8 @@
 
 namespace {
 class ReturnUndefChecker : public Checker< check::PreStmt<ReturnStmt> > {
-  mutable OwningPtr<BuiltinBug> BT_Undef;
-  mutable OwningPtr<BuiltinBug> BT_NullReference;
+  mutable std::unique_ptr<BuiltinBug> BT_Undef;
+  mutable std::unique_ptr<BuiltinBug> BT_NullReference;
 
   void emitUndef(CheckerContext &C, const Expr *RetE) const;
   void checkReference(CheckerContext &C, const Expr *RetE,
@@ -94,16 +94,16 @@
 
 void ReturnUndefChecker::emitUndef(CheckerContext &C, const Expr *RetE) const {
   if (!BT_Undef)
-    BT_Undef.reset(new BuiltinBug("Garbage return value",
-                                  "Undefined or garbage value "
-                                    "returned to caller"));
+    BT_Undef.reset(
+        new BuiltinBug(this, "Garbage return value",
+                       "Undefined or garbage value returned to caller"));
   emitBug(C, *BT_Undef, RetE);
 }
 
 void ReturnUndefChecker::checkReference(CheckerContext &C, const Expr *RetE,
                                         DefinedOrUnknownSVal RetVal) const {
   ProgramStateRef StNonNull, StNull;
-  llvm::tie(StNonNull, StNull) = C.getState()->assume(RetVal);
+  std::tie(StNonNull, StNull) = C.getState()->assume(RetVal);
 
   if (StNonNull) {
     // Going forward, assume the location is non-null.
@@ -113,7 +113,7 @@
 
   // The return value is known to be null. Emit a bug report.
   if (!BT_NullReference)
-    BT_NullReference.reset(new BuiltinBug("Returning null reference"));
+    BT_NullReference.reset(new BuiltinBug(this, "Returning null reference"));
 
   emitBug(C, *BT_NullReference, RetE, bugreporter::getDerefExpr(RetE));
 }
diff --git a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 9ca0ab5..83b15ec 100644
--- a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -54,8 +54,8 @@
 
   mutable IdentifierInfo *IIfopen, *IIfclose;
 
-  OwningPtr<BugType> DoubleCloseBugType;
-  OwningPtr<BugType> LeakBugType;
+  std::unique_ptr<BugType> DoubleCloseBugType;
+  std::unique_ptr<BugType> LeakBugType;
 
   void initIdentifierInfo(ASTContext &Ctx) const;
 
@@ -99,7 +99,7 @@
   StopTrackingCallback(ProgramStateRef st) : state(st) {}
   ProgramStateRef getState() const { return state; }
 
-  bool VisitSymbol(SymbolRef sym) {
+  bool VisitSymbol(SymbolRef sym) override {
     state = state->remove<StreamMap>(sym);
     return true;
   }
@@ -109,11 +109,11 @@
 
 SimpleStreamChecker::SimpleStreamChecker() : IIfopen(0), IIfclose(0) {
   // Initialize the bug types.
-  DoubleCloseBugType.reset(new BugType("Double fclose",
-                                       "Unix Stream API Error"));
+  DoubleCloseBugType.reset(
+      new BugType(this, "Double fclose", "Unix Stream API Error"));
 
-  LeakBugType.reset(new BugType("Resource Leak",
-                                "Unix Stream API Error"));
+  LeakBugType.reset(
+      new BugType(this, "Resource Leak", "Unix Stream API Error"));
   // Sinks are higher importance bugs as well as calls to assert() or exit(0).
   LeakBugType->setSuppressOnSink(true);
 }
diff --git a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 4fd778e..327a9e0 100644
--- a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -28,8 +28,8 @@
 namespace {
 class StackAddrEscapeChecker : public Checker< check::PreStmt<ReturnStmt>,
                                                check::EndFunction > {
-  mutable OwningPtr<BuiltinBug> BT_stackleak;
-  mutable OwningPtr<BuiltinBug> BT_returnstack;
+  mutable std::unique_ptr<BuiltinBug> BT_stackleak;
+  mutable std::unique_ptr<BuiltinBug> BT_returnstack;
 
 public:
   void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
@@ -100,8 +100,8 @@
     return;
 
   if (!BT_returnstack)
-   BT_returnstack.reset(
-                 new BuiltinBug("Return of address to stack-allocated memory"));
+    BT_returnstack.reset(
+        new BuiltinBug(this, "Return of address to stack-allocated memory"));
 
   // Generate a report for this bug.
   SmallString<512> buf;
@@ -177,8 +177,8 @@
     {}
     
     bool HandleBinding(StoreManager &SMgr, Store store,
-                       const MemRegion *region, SVal val) {
-      
+                       const MemRegion *region, SVal val) override {
+
       if (!isa<GlobalsSpaceRegion>(region->getMemorySpace()))
         return true;
       
@@ -217,11 +217,11 @@
 
   if (!BT_stackleak)
     BT_stackleak.reset(
-      new BuiltinBug("Stack address stored into global variable",
-                     "Stack address was saved into a global variable. "
-                     "This is dangerous because the address will become "
-                     "invalid after returning from the function"));
-  
+        new BuiltinBug(this, "Stack address stored into global variable",
+                       "Stack address was saved into a global variable. "
+                       "This is dangerous because the address will become "
+                       "invalid after returning from the function"));
+
   for (unsigned i = 0, e = cb.V.size(); i != e; ++i) {
     // Generate a report for this bug.
     SmallString<512> buf;
diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index ffdf2d5..6000942 100644
--- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -62,8 +62,8 @@
                  *II_fwrite, 
                  *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,  
                  *II_clearerr, *II_feof, *II_ferror, *II_fileno;
-  mutable OwningPtr<BuiltinBug> BT_nullfp, BT_illegalwhence,
-                                      BT_doubleclose, BT_ResourceLeak;
+  mutable std::unique_ptr<BuiltinBug> BT_nullfp, BT_illegalwhence,
+      BT_doubleclose, BT_ResourceLeak;
 
 public:
   StreamChecker() 
@@ -218,7 +218,7 @@
   // Bifurcate the state into two: one with a valid FILE* pointer, the other
   // with a NULL.
   ProgramStateRef stateNotNull, stateNull;
-  llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
+  std::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
   
   if (SymbolRef Sym = RetVal.getAsSymbol()) {
     // if RetVal is not NULL, set the symbol's state to Opened.
@@ -270,9 +270,10 @@
 
   if (ExplodedNode *N = C.addTransition(state)) {
     if (!BT_illegalwhence)
-      BT_illegalwhence.reset(new BuiltinBug("Illegal whence argument",
-					"The whence argument to fseek() should be "
-					"SEEK_SET, SEEK_END, or SEEK_CUR."));
+      BT_illegalwhence.reset(
+          new BuiltinBug(this, "Illegal whence argument",
+                         "The whence argument to fseek() should be "
+                         "SEEK_SET, SEEK_END, or SEEK_CUR."));
     BugReport *R = new BugReport(*BT_illegalwhence, 
 				 BT_illegalwhence->getDescription(), N);
     C.emitReport(R);
@@ -343,13 +344,13 @@
 
   ConstraintManager &CM = C.getConstraintManager();
   ProgramStateRef stateNotNull, stateNull;
-  llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
+  std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
 
   if (!stateNotNull && stateNull) {
     if (ExplodedNode *N = C.generateSink(stateNull)) {
       if (!BT_nullfp)
-        BT_nullfp.reset(new BuiltinBug("NULL stream pointer",
-                                     "Stream pointer might be NULL."));
+        BT_nullfp.reset(new BuiltinBug(this, "NULL stream pointer",
+                                       "Stream pointer might be NULL."));
       BugReport *R =new BugReport(*BT_nullfp, BT_nullfp->getDescription(), N);
       C.emitReport(R);
     }
@@ -378,9 +379,9 @@
     ExplodedNode *N = C.generateSink();
     if (N) {
       if (!BT_doubleclose)
-        BT_doubleclose.reset(new BuiltinBug("Double fclose",
-                                        "Try to close a file Descriptor already"
-                                        " closed. Cause undefined behaviour."));
+        BT_doubleclose.reset(new BuiltinBug(
+            this, "Double fclose", "Try to close a file Descriptor already"
+                                   " closed. Cause undefined behaviour."));
       BugReport *R = new BugReport(*BT_doubleclose,
                                    BT_doubleclose->getDescription(), N);
       C.emitReport(R);
@@ -407,8 +408,9 @@
       ExplodedNode *N = C.generateSink();
       if (N) {
         if (!BT_ResourceLeak)
-          BT_ResourceLeak.reset(new BuiltinBug("Resource Leak", 
-                         "Opened File never closed. Potential Resource leak."));
+          BT_ResourceLeak.reset(new BuiltinBug(
+              this, "Resource Leak",
+              "Opened File never closed. Potential Resource leak."));
         BugReport *R = new BugReport(*BT_ResourceLeak, 
                                      BT_ResourceLeak->getDescription(), N);
         C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index 264f7f9..d33c977 100644
--- a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -22,7 +22,7 @@
 namespace {
 class TaintTesterChecker : public Checker< check::PostStmt<Expr> > {
 
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
   void initBugType() const;
 
   /// Given a pointer argument, get the symbol of the value it contains
@@ -38,7 +38,7 @@
 
 inline void TaintTesterChecker::initBugType() const {
   if (!BT)
-    BT.reset(new BugType("Tainted data", "General"));
+    BT.reset(new BugType(this, "Tainted data", "General"));
 }
 
 void TaintTesterChecker::checkPostStmt(const Expr *E,
diff --git a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 8235e68..22e2155 100644
--- a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -24,7 +24,7 @@
 namespace {
 
 class UndefBranchChecker : public Checker<check::BranchCondition> {
-  mutable OwningPtr<BuiltinBug> BT;
+  mutable std::unique_ptr<BuiltinBug> BT;
 
   struct FindUndefExpr {
     ProgramStateRef St;
@@ -67,8 +67,8 @@
     ExplodedNode *N = Ctx.generateSink();
     if (N) {
       if (!BT)
-        BT.reset(
-               new BuiltinBug("Branch condition evaluates to a garbage value"));
+        BT.reset(new BuiltinBug(
+            this, "Branch condition evaluates to a garbage value"));
 
       // What's going on here: we want to highlight the subexpression of the
       // condition that is the most likely source of the "uninitialized
diff --git a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 93812f7..93fe7d4 100644
--- a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -27,7 +27,7 @@
 namespace {
 class UndefCapturedBlockVarChecker
   : public Checker< check::PostStmt<BlockExpr> > {
- mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
 
 public:
   void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
@@ -71,7 +71,7 @@
     const VarRegion *VR = I.getCapturedRegion();
     const VarDecl *VD = VR->getDecl();
 
-    if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage())
+    if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage())
       continue;
 
     // Get the VarRegion associated with VD in the local stack frame.
@@ -79,7 +79,8 @@
           state->getSVal(I.getOriginalRegion()).getAs<UndefinedVal>()) {
       if (ExplodedNode *N = C.generateSink()) {
         if (!BT)
-          BT.reset(new BuiltinBug("uninitialized variable captured by block"));
+          BT.reset(
+              new BuiltinBug(this, "uninitialized variable captured by block"));
 
         // Generate a bug report.
         SmallString<128> buf;
diff --git a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 3f6549d..00fd971 100644
--- a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -28,8 +28,8 @@
 class UndefResultChecker 
   : public Checker< check::PostStmt<BinaryOperator> > {
 
-  mutable OwningPtr<BugType> BT;
-  
+  mutable std::unique_ptr<BugType> BT;
+
 public:
   void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
 };
@@ -55,7 +55,8 @@
       return;
     
     if (!BT)
-      BT.reset(new BuiltinBug("Result of operation is garbage or undefined"));
+      BT.reset(
+          new BuiltinBug(this, "Result of operation is garbage or undefined"));
 
     SmallString<256> sbuf;
     llvm::raw_svector_ostream OS(sbuf);
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index 5df8846..e952671 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -25,7 +25,7 @@
 namespace {
 class UndefinedArraySubscriptChecker
   : public Checker< check::PreStmt<ArraySubscriptExpr> > {
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
 
 public:
   void checkPreStmt(const ArraySubscriptExpr *A, CheckerContext &C) const;
@@ -50,7 +50,7 @@
   if (!N)
     return;
   if (!BT)
-    BT.reset(new BuiltinBug("Array subscript is undefined"));
+    BT.reset(new BuiltinBug(this, "Array subscript is undefined"));
 
   // Generate a report for this bug.
   BugReport *R = new BugReport(*BT, BT->getName(), N);
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 016e3c8..30775d5 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -24,7 +24,7 @@
 namespace {
 class UndefinedAssignmentChecker
   : public Checker<check::Bind> {
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
 
 public:
   void checkBind(SVal location, SVal val, const Stmt *S,
@@ -54,7 +54,7 @@
   const char *str = "Assigned value is garbage or undefined";
 
   if (!BT)
-    BT.reset(new BuiltinBug(str));
+    BT.reset(new BuiltinBug(this, str));
 
   // Generate a report for this bug.
   const Expr *ex = 0;
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 4ea07e2..aa7e25bc 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -30,7 +30,7 @@
 
 namespace {
 class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
-  mutable OwningPtr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
+  mutable std::unique_ptr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
   mutable Optional<uint64_t> Val_O_CREAT;
 
 public:
@@ -57,21 +57,15 @@
                             const unsigned numArgs,
                             const unsigned sizeArg,
                             const char *fn) const;
+  void LazyInitialize(std::unique_ptr<BugType> &BT, const char *name) const {
+    if (BT)
+      return;
+    BT.reset(new BugType(this, name, categories::UnixAPI));
+  }
 };
 } //end anonymous namespace
 
 //===----------------------------------------------------------------------===//
-// Utility functions.
-//===----------------------------------------------------------------------===//
-
-static inline void LazyInitialize(OwningPtr<BugType> &BT,
-                                  const char *name) {
-  if (BT)
-    return;
-  BT.reset(new BugType(name, categories::UnixAPI));
-}
-
-//===----------------------------------------------------------------------===//
 // "open" (man 2 open)
 //===----------------------------------------------------------------------===//
 
@@ -86,6 +80,7 @@
       // FIXME: We need a more general way of getting the O_CREAT value.
       // We could possibly grovel through the preprocessor state, but
       // that would require passing the Preprocessor object to the ExprEngine.
+      // See also: MallocChecker.cpp / M_ZERO.
       return;
     }
   }
@@ -119,7 +114,7 @@
 
   // Check if maskedFlags is non-zero.
   ProgramStateRef trueState, falseState;
-  llvm::tie(trueState, falseState) = state->assume(maskedFlags);
+  std::tie(trueState, falseState) = state->assume(maskedFlags);
 
   // Only emit an error if the value of 'maskedFlags' is properly
   // constrained;
@@ -199,7 +194,7 @@
                                 const SVal argVal,
                                 ProgramStateRef *trueState,
                                 ProgramStateRef *falseState) {
-  llvm::tie(*trueState, *falseState) =
+  std::tie(*trueState, *falseState) =
     state->assume(argVal.castAs<DefinedSVal>());
   
   return (*falseState && !*trueState);
@@ -207,7 +202,7 @@
 
 // Generates an error report, indicating that the function whose name is given
 // will perform a zero byte allocation.
-// Returns false if an error occured, true otherwise.
+// Returns false if an error occurred, true otherwise.
 bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
                                               ProgramStateRef falseState,
                                               const Expr *arg,
@@ -217,7 +212,7 @@
     return false;
 
   LazyInitialize(BT_mallocZero,
-    "Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
+                 "Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
 
   SmallString<256> S;
   llvm::raw_svector_ostream os(S);    
diff --git a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index a40b5a3..66c1acd 100644
--- a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -136,7 +136,7 @@
            ci != ce; ++ci) {
         if (Optional<CFGStmt> S = (*ci).getAs<CFGStmt>())
           if (const CallExpr *CE = dyn_cast<CallExpr>(S->getStmt())) {
-            if (CE->isBuiltinCall() == Builtin::BI__builtin_unreachable) {
+            if (CE->getBuiltinCallee() == Builtin::BI__builtin_unreachable) {
               foundUnreachable = true;
               break;
             }
@@ -165,7 +165,7 @@
     if (SM.isInSystemHeader(SL) || SM.isInExternCSystemHeader(SL))
       continue;
 
-    B.EmitBasicReport(D, "Unreachable code", "Dead code",
+    B.EmitBasicReport(D, this, "Unreachable code", "Dead code",
                       "This statement is never executed", DL, SR);
   }
 }
@@ -178,6 +178,9 @@
 
   for (CFGBlock::const_pred_iterator I = CB->pred_begin(), E = CB->pred_end();
       I != E; ++I) {
+    if (!*I)
+      continue;
+
     if (!reachable.count((*I)->getBlockID())) {
       // If we find an unreachable predecessor, mark this block as reachable so
       // we don't report this block
@@ -219,6 +222,8 @@
     return false;
 
   const CFGBlock *pred = *CB->pred_begin();
+  if (!pred)
+    return false;
 
   // Get the predecessor block's terminator conditon
   const Stmt *cond = pred->getTerminatorCondition();
@@ -242,7 +247,7 @@
 bool UnreachableCodeChecker::isEmptyCFGBlock(const CFGBlock *CB) {
   return CB->getLabel() == 0       // No labels
       && CB->size() == 0           // No statements
-      && CB->getTerminator() == 0; // No terminator
+      && !CB->getTerminator();     // No terminator
 }
 
 void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
diff --git a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 30aef06..c7b2024 100644
--- a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -29,7 +29,7 @@
 
 namespace {
 class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
-  mutable OwningPtr<BugType> BT;
+  mutable std::unique_ptr<BugType> BT;
   enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted };
 
   void reportBug(VLASize_Kind Kind,
@@ -51,7 +51,8 @@
     return;
 
   if (!BT)
-    BT.reset(new BuiltinBug("Dangerous variable-length array (VLA) declaration"));
+    BT.reset(new BuiltinBug(
+        this, "Dangerous variable-length array (VLA) declaration"));
 
   SmallString<256> buf;
   llvm::raw_svector_ostream os(buf);
@@ -113,7 +114,7 @@
   DefinedSVal sizeD = sizeV.castAs<DefinedSVal>();
 
   ProgramStateRef stateNotZero, stateZero;
-  llvm::tie(stateNotZero, stateZero) = state->assume(sizeD);
+  std::tie(stateNotZero, stateZero) = state->assume(sizeD);
 
   if (stateZero && !stateNotZero) {
     reportBug(VLA_Zero, SE, stateZero, C);
diff --git a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index 7b6adbf..9b5c852 100644
--- a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -28,6 +28,7 @@
 namespace {
 
 class WalkAST : public StmtVisitor<WalkAST> {
+  const CheckerBase *Checker;
   BugReporter &BR;
   AnalysisDeclContext *AC;
 
@@ -58,11 +59,10 @@
   const CallExpr *visitingCallExpr;
   
 public:
-  WalkAST(BugReporter &br, AnalysisDeclContext *ac)
-    : BR(br),
-      AC(ac),
-      visitingCallExpr(0) {}
-  
+  WalkAST(const CheckerBase *checker, BugReporter &br,
+          AnalysisDeclContext *ac)
+      : Checker(checker), BR(br), AC(ac), visitingCallExpr(0) {}
+
   bool hasWork() const { return !WList.empty(); }
 
   /// This method adds a CallExpr to the worklist and marks the callee as
@@ -187,21 +187,19 @@
   if (isPure) {
     os << "\n" <<  "Call pure virtual functions during construction or "
        << "destruction may leads undefined behaviour";
-    BR.EmitBasicReport(AC->getDecl(),
+    BR.EmitBasicReport(AC->getDecl(), Checker,
                        "Call pure virtual function during construction or "
                        "Destruction",
-                       "Cplusplus",
-                       os.str(), CELoc, R);
+                       "Cplusplus", os.str(), CELoc, R);
     return;
   }
   else {
     os << "\n" << "Call virtual functions during construction or "
        << "destruction will never go to a more derived class";
-    BR.EmitBasicReport(AC->getDecl(),
+    BR.EmitBasicReport(AC->getDecl(), Checker,
                        "Call virtual function during construction or "
                        "Destruction",
-                       "Cplusplus",
-                       os.str(), CELoc, R);
+                       "Cplusplus", os.str(), CELoc, R);
     return;
   }
 }
@@ -215,11 +213,10 @@
 public:
   void checkASTDecl(const CXXRecordDecl *RD, AnalysisManager& mgr,
                     BugReporter &BR) const {
-    WalkAST walker(BR, mgr.getAnalysisDeclContext(RD));
+    WalkAST walker(this, BR, mgr.getAnalysisDeclContext(RD));
 
     // Check the constructors.
-    for (CXXRecordDecl::ctor_iterator I = RD->ctor_begin(), E = RD->ctor_end();
-         I != E; ++I) {
+    for (const auto *I : RD->ctors()) {
       if (!I->isCopyOrMoveConstructor())
         if (Stmt *Body = I->getBody()) {
           walker.Visit(Body);
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 9dcf58b..1bea96e 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -134,6 +134,12 @@
                           /*Default=*/true);
 }
 
+bool AnalyzerOptions::mayInlineCXXAllocator() {
+  return getBooleanOption(InlineCXXAllocator,
+                          "c++-allocator-inlining",
+                          /*Default=*/false);
+}
+
 bool AnalyzerOptions::mayInlineCXXContainerCtorsAndDtors() {
   return getBooleanOption(InlineCXXContainerCtorsAndDtors,
                           "c++-container-inlining",
diff --git a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index a6c400f..8f93cee 100644
--- a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -177,7 +177,7 @@
 
       uint64_t Amt = V2.getZExtValue();
 
-      if (Amt > V1.getBitWidth())
+      if (Amt >= V1.getBitWidth())
         return NULL;
 
       return &getValue( V1.operator<<( (unsigned) Amt ));
@@ -195,7 +195,7 @@
 
       uint64_t Amt = V2.getZExtValue();
 
-      if (Amt > V1.getBitWidth())
+      if (Amt >= V1.getBitWidth())
         return NULL;
 
       return &getValue( V1.operator>>( (unsigned) Amt ));
diff --git a/lib/StaticAnalyzer/Core/BlockCounter.cpp b/lib/StaticAnalyzer/Core/BlockCounter.cpp
index 74d761e..c1ac03d 100644
--- a/lib/StaticAnalyzer/Core/BlockCounter.cpp
+++ b/lib/StaticAnalyzer/Core/BlockCounter.cpp
@@ -34,8 +34,7 @@
   }
 
   bool operator<(const CountKey &RHS) const {
-    return (CallSite == RHS.CallSite) ? (BlockID < RHS.BlockID) 
-                                      : (CallSite < RHS.CallSite);
+    return std::tie(CallSite, BlockID) < std::tie(RHS.CallSite, RHS.BlockID);
   }
 
   void Profile(llvm::FoldingSetNodeID &ID) const {
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index 1940fa7..a08a226 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -20,8 +20,8 @@
 #include "clang/AST/Expr.h"
 #include "clang/AST/ExprCXX.h"
 #include "clang/AST/ParentMap.h"
-#include "clang/AST/StmtObjC.h"
 #include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
 #include "clang/Analysis/CFG.h"
 #include "clang/Analysis/ProgramPoint.h"
 #include "clang/Basic/SourceManager.h"
@@ -30,11 +30,11 @@
 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Support/raw_ostream.h"
+#include <memory>
 #include <queue>
 
 using namespace clang;
@@ -265,7 +265,7 @@
         I = Pieces.erase(I);
         continue;
       } else if (End && isa<CXXDefaultInitExpr>(End)) {
-        PathPieces::iterator Next = llvm::next(I);
+        PathPieces::iterator Next = std::next(I);
         if (Next != E) {
           if (PathDiagnosticControlFlowPiece *NextCF =
                 dyn_cast<PathDiagnosticControlFlowPiece>(*Next)) {
@@ -311,7 +311,7 @@
 public:
   NodeMapClosure(InterExplodedGraphMap &m) : M(m) {}
 
-  const ExplodedNode *getOriginalNode(const ExplodedNode *N) {
+  const ExplodedNode *getOriginalNode(const ExplodedNode *N) override {
     return M.lookup(N);
   }
 };
@@ -345,7 +345,7 @@
     return getParentMap().getParent(S);
   }
 
-  virtual NodeMapClosure& getNodeResolver() { return NMC; }
+  NodeMapClosure& getNodeResolver() override { return NMC; }
 
   PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
 
@@ -1263,8 +1263,8 @@
           SVal ChildV = State->getSVal(child, LCtx);
           R.markInteresting(ChildV);
         }
-        break;
       }
+      break;
     }
   }
   
@@ -2742,12 +2742,10 @@
     assert(!Location.isValid() &&
      "Either Location or ErrorNode should be specified but not both.");
     return PathDiagnosticLocation::createEndOfPath(ErrorNode, SM);
-  } else {
-    assert(Location.isValid());
-    return Location;
   }
 
-  return PathDiagnosticLocation();
+  assert(Location.isValid());
+  return Location;
 }
 
 //===----------------------------------------------------------------------===//
@@ -2802,9 +2800,7 @@
   // EmitBasicReport.
   // FIXME: There are leaks from checkers that assume that the BugTypes they
   // create will be destroyed by the BugReporter.
-  for (llvm::StringMap<BugType*>::iterator
-         I = StrBugTypes.begin(), E = StrBugTypes.end(); I != E; ++I)
-    delete I->second;
+  llvm::DeleteContainerSeconds(StrBugTypes);
 
   // Remove all references to the BugType objects.
   BugTypes = F.getEmptySet();
@@ -2820,7 +2816,7 @@
 class ReportGraph {
 public:
   InterExplodedGraphMap BackMap;
-  OwningPtr<ExplodedGraph> Graph;
+  std::unique_ptr<ExplodedGraph> Graph;
   const ExplodedNode *ErrorNode;
   size_t Index;
 };
@@ -2835,7 +2831,7 @@
   typedef std::pair<const ExplodedNode *, size_t> NodeIndexPair;
   SmallVector<NodeIndexPair, 32> ReportNodes;
 
-  OwningPtr<ExplodedGraph> G;
+  std::unique_ptr<ExplodedGraph> G;
 
   /// A helper class for sorting ExplodedNodes by priority.
   template <bool Descending>
@@ -2906,7 +2902,7 @@
 
     PriorityMapTy::iterator PriorityEntry;
     bool IsNew;
-    llvm::tie(PriorityEntry, IsNew) =
+    std::tie(PriorityEntry, IsNew) =
       PriorityMap.insert(std::make_pair(Node, Priority));
     ++Priority;
 
@@ -2935,7 +2931,7 @@
     return false;
 
   const ExplodedNode *OrigN;
-  llvm::tie(OrigN, GraphWrapper.Index) = ReportNodes.pop_back_val();
+  std::tie(OrigN, GraphWrapper.Index) = ReportNodes.pop_back_val();
   assert(PriorityMap.find(OrigN) != PriorityMap.end() &&
          "error node not accessible from root");
 
@@ -3247,6 +3243,9 @@
 }
 
 void BugReporter::emitReport(BugReport* R) {
+  // To guarantee memory release.
+  std::unique_ptr<BugReport> UniqueR(R);
+
   // Defensive checking: throw the bug away if it comes from a BodyFarm-
   // generated body. We do this very early because report processing relies
   // on the report's location being valid.
@@ -3277,12 +3276,12 @@
   BugReportEquivClass* EQ = EQClasses.FindNodeOrInsertPos(ID, InsertPos);
 
   if (!EQ) {
-    EQ = new BugReportEquivClass(R);
+    EQ = new BugReportEquivClass(UniqueR.release());
     EQClasses.InsertNode(EQ, InsertPos);
     EQClassesVector.push_back(EQ);
   }
   else
-    EQ->AddReport(R);
+    EQ->AddReport(UniqueR.release());
 }
 
 
@@ -3403,10 +3402,8 @@
   SmallVector<BugReport*, 10> bugReports;
   BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports);
   if (exampleReport) {
-    const PathDiagnosticConsumers &C = getPathDiagnosticConsumers();
-    for (PathDiagnosticConsumers::const_iterator I=C.begin(),
-                                                 E=C.end(); I != E; ++I) {
-      FlushReport(exampleReport, **I, bugReports);
+    for (PathDiagnosticConsumer *PDC : getPathDiagnosticConsumers()) {
+      FlushReport(exampleReport, *PDC, bugReports);
     }
   }
 }
@@ -3419,14 +3416,13 @@
   // Probably doesn't make a difference in practice.
   BugType& BT = exampleReport->getBugType();
 
-  OwningPtr<PathDiagnostic>
-    D(new PathDiagnostic(exampleReport->getDeclWithIssue(),
-                         exampleReport->getBugType().getName(),
-                         exampleReport->getDescription(),
-                         exampleReport->getShortDescription(/*Fallback=*/false),
-                         BT.getCategory(),
-                         exampleReport->getUniqueingLocation(),
-                         exampleReport->getUniqueingDecl()));
+  std::unique_ptr<PathDiagnostic> D(new PathDiagnostic(
+      exampleReport->getBugType().getCheckName(),
+      exampleReport->getDeclWithIssue(), exampleReport->getBugType().getName(),
+      exampleReport->getDescription(),
+      exampleReport->getShortDescription(/*Fallback=*/false), BT.getCategory(),
+      exampleReport->getUniqueingLocation(),
+      exampleReport->getUniqueingDecl()));
 
   MaxBugClassSize = std::max(bugReports.size(),
                              static_cast<size_t>(MaxBugClassSize));
@@ -3455,7 +3451,7 @@
     PathDiagnosticPiece *piece =
       new PathDiagnosticEventPiece(L, exampleReport->getDescription());
     BugReport::ranges_iterator Beg, End;
-    llvm::tie(Beg, End) = exampleReport->getRanges();
+    std::tie(Beg, End) = exampleReport->getRanges();
     for ( ; Beg != End; ++Beg)
       piece->addRange(*Beg);
     D->setEndOfPath(piece);
@@ -3468,17 +3464,25 @@
     D->addMeta(*i);
   }
 
-  PD.HandlePathDiagnostic(D.take());
+  PD.HandlePathDiagnostic(D.release());
 }
 
 void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
-                                  StringRef name,
-                                  StringRef category,
+                                  const CheckerBase *Checker,
+                                  StringRef Name, StringRef Category,
+                                  StringRef Str, PathDiagnosticLocation Loc,
+                                  ArrayRef<SourceRange> Ranges) {
+  EmitBasicReport(DeclWithIssue, Checker->getCheckName(), Name, Category, Str,
+                  Loc, Ranges);
+}
+void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
+                                  CheckName CheckName,
+                                  StringRef name, StringRef category,
                                   StringRef str, PathDiagnosticLocation Loc,
                                   ArrayRef<SourceRange> Ranges) {
 
   // 'BT' is owned by BugReporter.
-  BugType *BT = getBugTypeForName(name, category);
+  BugType *BT = getBugTypeForName(CheckName, name, category);
   BugReport *R = new BugReport(*BT, str, Loc);
   R->setDeclWithIssue(DeclWithIssue);
   for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
@@ -3487,22 +3491,22 @@
   emitReport(R);
 }
 
-BugType *BugReporter::getBugTypeForName(StringRef name,
+BugType *BugReporter::getBugTypeForName(CheckName CheckName, StringRef name,
                                         StringRef category) {
   SmallString<136> fullDesc;
-  llvm::raw_svector_ostream(fullDesc) << name << ":" << category;
+  llvm::raw_svector_ostream(fullDesc) << CheckName.getName() << ":" << name
+                                      << ":" << category;
   llvm::StringMapEntry<BugType *> &
       entry = StrBugTypes.GetOrCreateValue(fullDesc);
   BugType *BT = entry.getValue();
   if (!BT) {
-    BT = new BugType(name, category);
+    BT = new BugType(CheckName, name, category);
     entry.setValue(BT);
   }
   return BT;
 }
 
-
-void PathPieces::dump() const {
+LLVM_DUMP_METHOD void PathPieces::dump() const {
   unsigned index = 0;
   for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) {
     llvm::errs() << "[" << index++ << "]  ";
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index e1a92b3..ffc6ee5 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -115,7 +115,7 @@
     PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
 
   BugReport::ranges_iterator Beg, End;
-  llvm::tie(Beg, End) = BR.getRanges();
+  std::tie(Beg, End) = BR.getRanges();
 
   // Only add the statement itself as a range if we didn't specify any
   // special ranges for this report.
@@ -156,7 +156,7 @@
     return static_cast<void *>(&Tag);
   }
 
-  virtual void Profile(llvm::FoldingSetNodeID &ID) const {
+  void Profile(llvm::FoldingSetNodeID &ID) const override {
     ID.AddPointer(ReturnVisitor::getTag());
     ID.AddPointer(StackFrame);
     ID.AddBoolean(EnableNullFPSuppression);
@@ -386,7 +386,7 @@
   PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
                                  const ExplodedNode *PrevN,
                                  BugReporterContext &BRC,
-                                 BugReport &BR) {
+                                 BugReport &BR) override {
     switch (Mode) {
     case Initial:
       return visitNodeInitial(N, PrevN, BRC, BR);
@@ -401,7 +401,7 @@
 
   PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
                                   const ExplodedNode *N,
-                                  BugReport &BR) {
+                                  BugReport &BR) override {
     if (EnableNullFPSuppression)
       BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
     return 0;
@@ -869,7 +869,7 @@
 
   // Peel off the ternary operator.
   if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(Ex)) {
-    // Find a node where the branching occured and find out which branch
+    // Find a node where the branching occurred and find out which branch
     // we took (true/false) by looking at the ExplodedGraph.
     const ExplodedNode *NI = N;
     do {
@@ -1087,7 +1087,9 @@
   llvm::raw_svector_ostream OS(Buf);
 
   if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
-    OS << "'" << ME->getSelector().getAsString() << "' not called";
+    OS << "'";
+    ME->getSelector().print(OS);
+    OS << "' not called";
   }
   else {
     OS << "No method is called";
@@ -1556,25 +1558,18 @@
       //   std::u16string s; s += u'a';
       // because we cannot reason about the internal invariants of the
       // datastructure.
-      const LocationContext *LCtx = N->getLocationContext();
-      do {
+      for (const LocationContext *LCtx = N->getLocationContext(); LCtx;
+           LCtx = LCtx->getParent()) {
         const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LCtx->getDecl());
         if (!MD)
-          break;
+          continue;
 
         const CXXRecordDecl *CD = MD->getParent();
         if (CD->getName() == "basic_string") {
           BR.markInvalid(getTag(), 0);
           return 0;
-        } else if (CD->getName().find("allocator") == StringRef::npos) {
-          // Only keep searching if the current method is in a class with the
-          // word "allocator" in its name, e.g. std::allocator or
-          // allocator_traits.
-          break;
         }
-
-        LCtx = LCtx->getParent();
-      } while (LCtx);
+      }
     }
   }
 
@@ -1611,8 +1606,10 @@
   CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
   CallEventRef<> Call = CEMgr.getCaller(CEnter->getCalleeContext(), State);
   unsigned Idx = 0;
-  for (CallEvent::param_iterator I = Call->param_begin(),
-                                 E = Call->param_end(); I != E; ++I, ++Idx) {
+  ArrayRef<ParmVarDecl*> parms = Call->parameters();
+
+  for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
+                              I != E; ++I, ++Idx) {
     const MemRegion *ArgReg = Call->getArgSVal(Idx).getAsRegion();
 
     // Are we tracking the argument or its subregion?
diff --git a/lib/StaticAnalyzer/Core/CMakeLists.txt b/lib/StaticAnalyzer/Core/CMakeLists.txt
index 013f8a5..3a00131 100644
--- a/lib/StaticAnalyzer/Core/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Core/CMakeLists.txt
@@ -39,21 +39,11 @@
   Store.cpp
   SubEngine.cpp
   SymbolManager.cpp
-  )
 
-add_dependencies(clangStaticAnalyzerCore
-  ClangAttrClasses
-  ClangAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangStaticAnalyzerCore
+  LINK_LIBS
+  clangAST
+  clangAnalysis
   clangBasic
   clangLex
-  clangAST
-  clangFrontend
   clangRewriteCore
   )
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index a3b34f4..84a769f 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -68,8 +68,7 @@
 
   if (const RecordType *RT = T->getAsStructureType()) {
     const RecordDecl *RD = RT->getDecl();
-    for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
-         I != E; ++I) {
+    for (const auto *I : RD->fields()) {
       QualType FieldT = I->getType();
       if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType())
         return true;
@@ -209,9 +208,7 @@
   return getSVal(E);
 }
 
-void CallEvent::dump() const {
-  dump(llvm::errs());
-}
+LLVM_DUMP_METHOD void CallEvent::dump() const { dump(llvm::errs()); }
 
 void CallEvent::dump(raw_ostream &Out) const {
   ASTContext &Ctx = getState()->getStateManager().getContext();
@@ -241,9 +238,9 @@
 QualType CallEvent::getDeclaredResultType(const Decl *D) {
   assert(D);
   if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D))
-    return FD->getResultType();
+    return FD->getReturnType();
   if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(D))
-    return MD->getResultType();
+    return MD->getReturnType();
   if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
     // Blocks are difficult because the return type may not be stored in the
     // BlockDecl itself. The AST should probably be enhanced, but for now we
@@ -256,7 +253,7 @@
     if (const TypeSourceInfo *TSI = BD->getSignatureAsWritten()) {
       QualType Ty = TSI->getType();
       if (const FunctionType *FT = Ty->getAs<FunctionType>())
-        Ty = FT->getResultType();
+        Ty = FT->getReturnType();
       if (!Ty->isDependentType())
         return Ty;
     }
@@ -284,14 +281,14 @@
                                          CallEvent::BindingsTy &Bindings,
                                          SValBuilder &SVB,
                                          const CallEvent &Call,
-                                         CallEvent::param_iterator I,
-                                         CallEvent::param_iterator E) {
+                                         ArrayRef<ParmVarDecl*> parameters) {
   MemRegionManager &MRMgr = SVB.getRegionManager();
 
   // If the function has fewer parameters than the call has arguments, we simply
   // do not bind any values to them.
   unsigned NumArgs = Call.getNumArgs();
   unsigned Idx = 0;
+  ArrayRef<ParmVarDecl*>::iterator I = parameters.begin(), E = parameters.end();
   for (; I != E && Idx < NumArgs; ++I, ++Idx) {
     const ParmVarDecl *ParamDecl = *I;
     assert(ParamDecl && "Formal parameter has no decl?");
@@ -306,21 +303,11 @@
   // FIXME: Variadic arguments are not handled at all right now.
 }
 
-
-CallEvent::param_iterator AnyFunctionCall::param_begin() const {
+ArrayRef<ParmVarDecl*> AnyFunctionCall::parameters() const {
   const FunctionDecl *D = getDecl();
   if (!D)
-    return 0;
-
-  return D->param_begin();
-}
-
-CallEvent::param_iterator AnyFunctionCall::param_end() const {
-  const FunctionDecl *D = getDecl();
-  if (!D)
-    return 0;
-
-  return D->param_end();
+    return llvm::ArrayRef<ParmVarDecl*>();
+  return D->parameters();
 }
 
 void AnyFunctionCall::getInitialStackFrameContents(
@@ -329,7 +316,7 @@
   const FunctionDecl *D = cast<FunctionDecl>(CalleeCtx->getDecl());
   SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
   addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
-                               D->param_begin(), D->param_end());
+                               D->parameters());
 }
 
 bool AnyFunctionCall::argumentsMayEscape() const {
@@ -389,7 +376,7 @@
 }
 
 
-const FunctionDecl *SimpleCall::getDecl() const {
+const FunctionDecl *SimpleFunctionCall::getDecl() const {
   const FunctionDecl *D = getOriginExpr()->getDirectCallee();
   if (D)
     return D;
@@ -550,18 +537,11 @@
   return dyn_cast_or_null<BlockDataRegion>(DataReg);
 }
 
-CallEvent::param_iterator BlockCall::param_begin() const {
-  const BlockDecl *D = getBlockDecl();
+ArrayRef<ParmVarDecl*> BlockCall::parameters() const {
+  const BlockDecl *D = getDecl();
   if (!D)
     return 0;
-  return D->param_begin();
-}
-
-CallEvent::param_iterator BlockCall::param_end() const {
-  const BlockDecl *D = getBlockDecl();
-  if (!D)
-    return 0;
-  return D->param_end();
+  return D->parameters();
 }
 
 void BlockCall::getExtraInvalidatedValues(ValueList &Values) const {
@@ -575,7 +555,7 @@
   const BlockDecl *D = cast<BlockDecl>(CalleeCtx->getDecl());
   SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
   addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
-                               D->param_begin(), D->param_end());
+                               D->parameters());
 }
 
 
@@ -604,8 +584,6 @@
   }
 }
 
-
-
 SVal CXXDestructorCall::getCXXThisVal() const {
   if (Data)
     return loc::MemRegionVal(DtorDataTy::getFromOpaqueValue(Data).getPointer());
@@ -621,21 +599,11 @@
   return CXXInstanceCall::getRuntimeDefinition();
 }
 
-
-CallEvent::param_iterator ObjCMethodCall::param_begin() const {
+ArrayRef<ParmVarDecl*> ObjCMethodCall::parameters() const {
   const ObjCMethodDecl *D = getDecl();
   if (!D)
-    return 0;
-
-  return D->param_begin();
-}
-
-CallEvent::param_iterator ObjCMethodCall::param_end() const {
-  const ObjCMethodDecl *D = getDecl();
-  if (!D)
-    return 0;
-
-  return D->param_end();
+    return ArrayRef<ParmVarDecl*>();
+  return D->parameters();
 }
 
 void
@@ -865,9 +833,17 @@
         Optional<const ObjCMethodDecl *> &Val = PMC[std::make_pair(IDecl, Sel)];
 
         // Query lookupPrivateMethod() if the cache does not hit.
-        if (!Val.hasValue())
+        if (!Val.hasValue()) {
           Val = IDecl->lookupPrivateMethod(Sel);
 
+          // If the method is a property accessor, we should try to "inline" it
+          // even if we don't actually have an implementation.
+          if (!*Val)
+            if (const ObjCMethodDecl *CompileTimeMD = E->getMethodDecl())
+              if (CompileTimeMD->isPropertyAccessor())
+                Val = IDecl->lookupInstanceMethod(Sel);
+        }
+
         const ObjCMethodDecl *MD = Val.getValue();
         if (CanBeSubClassed)
           return RuntimeDefinition(MD, Receiver);
@@ -888,13 +864,24 @@
   return RuntimeDefinition();
 }
 
+bool ObjCMethodCall::argumentsMayEscape() const {
+  if (isInSystemHeader() && !isInstanceMessage()) {
+    Selector Sel = getSelector();
+    if (Sel.getNumArgs() == 1 &&
+        Sel.getIdentifierInfoForSlot(0)->isStr("valueWithPointer"))
+      return true;
+  }
+
+  return CallEvent::argumentsMayEscape();
+}
+
 void ObjCMethodCall::getInitialStackFrameContents(
                                              const StackFrameContext *CalleeCtx,
                                              BindingsTy &Bindings) const {
   const ObjCMethodDecl *D = cast<ObjCMethodDecl>(CalleeCtx->getDecl());
   SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
   addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
-                               D->param_begin(), D->param_end());
+                               D->parameters());
 
   SVal SelfVal = getReceiverSVal();
   if (!SelfVal.isUnknown()) {
@@ -923,7 +910,7 @@
 
   // Otherwise, it's a normal function call, static member function call, or
   // something we can't reason about.
-  return create<FunctionCall>(CE, State, LCtx);
+  return create<SimpleFunctionCall>(CE, State, LCtx);
 }
 
 
diff --git a/lib/StaticAnalyzer/Core/Checker.cpp b/lib/StaticAnalyzer/Core/Checker.cpp
index 07e0aac..1a3965a 100644
--- a/lib/StaticAnalyzer/Core/Checker.cpp
+++ b/lib/StaticAnalyzer/Core/Checker.cpp
@@ -18,8 +18,23 @@
 using namespace ento;
 
 StringRef CheckerBase::getTagDescription() const {
-  // FIXME: We want to return the package + name of the checker here.
-  return "A Checker";  
+  return getCheckName().getName();
+}
+
+CheckName CheckerBase::getCheckName() const { return Name; }
+
+CheckerProgramPointTag::CheckerProgramPointTag(StringRef CheckerName, 
+                                               StringRef Msg)
+  : SimpleProgramPointTag(CheckerName, Msg) {}
+
+CheckerProgramPointTag::CheckerProgramPointTag(const CheckerBase *Checker,
+                                               StringRef Msg)
+  : SimpleProgramPointTag(Checker->getCheckName().getName(), Msg) {}
+
+raw_ostream& clang::ento::operator<<(raw_ostream &Out,
+                                     const CheckerBase &Checker) {
+  Out << Checker.getCheckName().getName();
+  return Out;
 }
 
 void Checker<check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
diff --git a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
index 4729903..d4afeda 100644
--- a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -84,10 +84,10 @@
 
   // Record the presence of the checker in its packages.
   StringRef packageName, leafName;
-  llvm::tie(packageName, leafName) = name.rsplit(PackageSeparator);
+  std::tie(packageName, leafName) = name.rsplit(PackageSeparator);
   while (!leafName.empty()) {
     Packages[packageName] += 1;
-    llvm::tie(packageName, leafName) = packageName.rsplit(PackageSeparator);
+    std::tie(packageName, leafName) = packageName.rsplit(PackageSeparator);
   }
 }
 
@@ -106,6 +106,7 @@
   // Initialize the CheckerManager with all enabled checkers.
   for (CheckerInfoSet::iterator
          i = enabledCheckers.begin(), e = enabledCheckers.end(); i != e; ++i) {
+    checkerMgr.setCurrentCheckName(CheckName((*i)->FullName));
     (*i)->Initialize(checkerMgr);
   }
 }
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index b09b2c2..897164b 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -43,22 +43,22 @@
 class DFS : public WorkList {
   SmallVector<WorkListUnit,20> Stack;
 public:
-  virtual bool hasWork() const {
+  bool hasWork() const override {
     return !Stack.empty();
   }
 
-  virtual void enqueue(const WorkListUnit& U) {
+  void enqueue(const WorkListUnit& U) override {
     Stack.push_back(U);
   }
 
-  virtual WorkListUnit dequeue() {
+  WorkListUnit dequeue() override {
     assert (!Stack.empty());
     const WorkListUnit& U = Stack.back();
     Stack.pop_back(); // This technically "invalidates" U, but we are fine.
     return U;
   }
-  
-  virtual bool visitItemsInWorkList(Visitor &V) {
+
+  bool visitItemsInWorkList(Visitor &V) override {
     for (SmallVectorImpl<WorkListUnit>::iterator
          I = Stack.begin(), E = Stack.end(); I != E; ++I) {
       if (V.visit(*I))
@@ -71,21 +71,21 @@
 class BFS : public WorkList {
   std::deque<WorkListUnit> Queue;
 public:
-  virtual bool hasWork() const {
+  bool hasWork() const override {
     return !Queue.empty();
   }
 
-  virtual void enqueue(const WorkListUnit& U) {
+  void enqueue(const WorkListUnit& U) override {
     Queue.push_back(U);
   }
 
-  virtual WorkListUnit dequeue() {
+  WorkListUnit dequeue() override {
     WorkListUnit U = Queue.front();
     Queue.pop_front();
     return U;
   }
-  
-  virtual bool visitItemsInWorkList(Visitor &V) {
+
+  bool visitItemsInWorkList(Visitor &V) override {
     for (std::deque<WorkListUnit>::iterator
          I = Queue.begin(), E = Queue.end(); I != E; ++I) {
       if (V.visit(*I))
@@ -109,18 +109,18 @@
     std::deque<WorkListUnit> Queue;
     SmallVector<WorkListUnit,20> Stack;
   public:
-    virtual bool hasWork() const {
+    bool hasWork() const override {
       return !Queue.empty() || !Stack.empty();
     }
 
-    virtual void enqueue(const WorkListUnit& U) {
+    void enqueue(const WorkListUnit& U) override {
       if (U.getNode()->getLocation().getAs<BlockEntrance>())
         Queue.push_front(U);
       else
         Stack.push_back(U);
     }
 
-    virtual WorkListUnit dequeue() {
+    WorkListUnit dequeue() override {
       // Process all basic blocks to completion.
       if (!Stack.empty()) {
         const WorkListUnit& U = Stack.back();
@@ -135,7 +135,7 @@
       Queue.pop_front();
       return U;
     }
-    virtual bool visitItemsInWorkList(Visitor &V) {
+    bool visitItemsInWorkList(Visitor &V) override {
       for (SmallVectorImpl<WorkListUnit>::iterator
            I = Stack.begin(), E = Stack.end(); I != E; ++I) {
         if (V.visit(*I))
@@ -532,6 +532,11 @@
     return;
   }
 
+  if ((*Block)[Idx].getKind() == CFGElement::NewAllocator) {
+    WList->enqueue(N, Block, Idx+1);
+    return;
+  }
+
   // At this point, we know we're processing a normal statement.
   CFGStmt CS = (*Block)[Idx].castAs<CFGStmt>();
   PostStmt Loc(CS.getStmt(), N->getLocationContext());
diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp
index 7b133f6..0041d9f 100644
--- a/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/lib/StaticAnalyzer/Core/Environment.cpp
@@ -123,11 +123,11 @@
   SymbolReaper &SymReaper;
 public:
   MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
-  bool VisitSymbol(SymbolRef sym) {
+  bool VisitSymbol(SymbolRef sym) override {
     SymReaper.markLive(sym);
     return true;
   }
-  bool VisitMemRegion(const MemRegion *R) {
+  bool VisitMemRegion(const MemRegion *R) override {
     SymReaper.markLive(R);
     return true;
   }
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index e9c4a35..7812c96 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -90,8 +90,9 @@
   // (7) The LocationContext is the same as the predecessor.
   // (8) Expressions that are *not* lvalue expressions.
   // (9) The PostStmt isn't for a non-consumed Stmt or Expr.
-  // (10) The successor is not a CallExpr StmtPoint (so that we would
-  //      be able to find it when retrying a call with no inlining).
+  // (10) The successor is neither a CallExpr StmtPoint nor a CallEnter or 
+  //      PreImplicitCall (so that we would be able to find it when retrying a 
+  //      call with no inlining).
   // FIXME: It may be safe to reclaim PreCall and PostCall nodes as well.
 
   // Conditions 1 and 2.
@@ -153,6 +154,10 @@
     if (CallEvent::isCallStmt(SP->getStmt()))
       return false;
 
+  // Condition 10, continuation.
+  if (SuccLoc.getAs<CallEnter>() || SuccLoc.getAs<PreImplicitCall>())
+    return false;
+
   return true;
 }
 
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 9907d0c..e7f49b2 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -55,6 +55,8 @@
 // Engine construction and deletion.
 //===----------------------------------------------------------------------===//
 
+static const char* TagProviderName = "ExprEngine";
+
 ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled,
                        SetOfConstDecls *VisitedCalleesIn,
                        FunctionSummariesTy *FS,
@@ -118,7 +120,7 @@
       SVal V = state->getSVal(loc::MemRegionVal(R));
       SVal Constraint_untested = evalBinOp(state, BO_GT, V,
                                            svalBuilder.makeZeroVal(T),
-                                           getContext().IntTy);
+                                           svalBuilder.getConditionType());
 
       Optional<DefinedOrUnknownSVal> Constraint =
           Constraint_untested.getAs<DefinedOrUnknownSVal>();
@@ -286,6 +288,10 @@
     case CFGElement::Initializer:
       ProcessInitializer(E.castAs<CFGInitializer>().getInitializer(), Pred);
       return;
+    case CFGElement::NewAllocator:
+      ProcessNewAllocator(E.castAs<CFGNewAllocator>().getAllocatorExpr(),
+                          Pred);
+      return;
     case CFGElement::AutomaticObjectDtor:
     case CFGElement::DeleteDtor:
     case CFGElement::BaseDtor:
@@ -362,7 +368,7 @@
 
   // Process any special transfer function for dead symbols.
   // A tag to track convenience transitions, which can be removed at cleanup.
-  static SimpleProgramPointTag cleanupTag("ExprEngine : Clean Node");
+  static SimpleProgramPointTag cleanupTag(TagProviderName, "Clean Node");
   if (!SymReaper.hasDeadSymbols()) {
     // Generate a CleanedNode that has the environment and store cleaned
     // up. Since no symbols are dead, we can optimize and not clean out
@@ -547,6 +553,25 @@
   Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
 }
 
+void ExprEngine::ProcessNewAllocator(const CXXNewExpr *NE,
+                                     ExplodedNode *Pred) {
+  ExplodedNodeSet Dst;
+  AnalysisManager &AMgr = getAnalysisManager();
+  AnalyzerOptions &Opts = AMgr.options;
+  // TODO: We're not evaluating allocators for all cases just yet as
+  // we're not handling the return value correctly, which causes false
+  // positives when the alpha.cplusplus.NewDeleteLeaks check is on.
+  if (Opts.mayInlineCXXAllocator())
+    VisitCXXNewAllocatorCall(NE, Pred, Dst);
+  else {
+    NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+    const LocationContext *LCtx = Pred->getLocationContext();
+    PostImplicitCall PP(NE->getOperatorNew(), NE->getLocStart(), LCtx);
+    Bldr.generateNode(PP, Pred->getState(), Pred);
+  }
+  Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
+}
+
 void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
                                          ExplodedNode *Pred,
                                          ExplodedNodeSet &Dst) {
@@ -598,7 +623,6 @@
 void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
                                  ExplodedNode *Pred, ExplodedNodeSet &Dst) {
   const LocationContext *LCtx = Pred->getLocationContext();
-  ProgramStateRef State = Pred->getState();
 
   const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
   Loc ThisPtr = getSValBuilder().getCXXThis(CurDtor,
@@ -664,8 +688,6 @@
     case Stmt::MSPropertyRefExprClass:
     case Stmt::CXXUnresolvedConstructExprClass:
     case Stmt::DependentScopeDeclRefExprClass:
-    case Stmt::UnaryTypeTraitExprClass:
-    case Stmt::BinaryTypeTraitExprClass:
     case Stmt::TypeTraitExprClass:
     case Stmt::ArrayTypeTraitExprClass:
     case Stmt::ExpressionTraitExprClass:
@@ -709,6 +731,7 @@
     case Expr::MSDependentExistsStmtClass:
     case Stmt::CapturedStmtClass:
     case Stmt::OMPParallelDirectiveClass:
+    case Stmt::OMPSimdDirectiveClass:
       llvm_unreachable("Stmt should not be in analyzer evaluation loop");
 
     case Stmt::ObjCSubscriptRefExprClass:
@@ -1253,7 +1276,7 @@
 
   // FIXME: Refactor this into a checker.
   if (nodeBuilder.getContext().blockCount() >= AMgr.options.maxBlockVisitOnPath) {
-    static SimpleProgramPointTag tag("ExprEngine : Block count exceeded");
+    static SimpleProgramPointTag tag(TagProviderName, "Block count exceeded");
     const ExplodedNode *Sink =
                    nodeBuilder.generateSink(Pred->getState(), Pred, &tag);
 
@@ -1443,7 +1466,7 @@
     DefinedSVal V = X.castAs<DefinedSVal>();
 
     ProgramStateRef StTrue, StFalse;
-    tie(StTrue, StFalse) = PrevState->assume(V);
+    std::tie(StTrue, StFalse) = PrevState->assume(V);
 
     // Process the true branch.
     if (builder.isFeasible(true)) {
@@ -1752,75 +1775,84 @@
 
 /// VisitMemberExpr - Transfer function for member expressions.
 void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
-                                 ExplodedNodeSet &TopDst) {
+                                 ExplodedNodeSet &Dst) {
 
-  StmtNodeBuilder Bldr(Pred, TopDst, *currBldrCtx);
-  ExplodedNodeSet Dst;
+  // FIXME: Prechecks eventually go in ::Visit().
+  ExplodedNodeSet CheckedSet;
+  getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, M, *this);
+
+  ExplodedNodeSet EvalSet;
   ValueDecl *Member = M->getMemberDecl();
 
   // Handle static member variables and enum constants accessed via
   // member syntax.
   if (isa<VarDecl>(Member) || isa<EnumConstantDecl>(Member)) {
-    Bldr.takeNodes(Pred);
-    VisitCommonDeclRefExpr(M, Member, Pred, Dst);
-    Bldr.addNodes(Dst);
-    return;
-  }
+    ExplodedNodeSet Dst;
+    for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+         I != E; ++I) {
+      VisitCommonDeclRefExpr(M, Member, Pred, EvalSet);
+    }
+  } else {
+    StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);
+    ExplodedNodeSet Tmp;
 
-  ProgramStateRef state = Pred->getState();
-  const LocationContext *LCtx = Pred->getLocationContext();
-  Expr *BaseExpr = M->getBase();
+    for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+         I != E; ++I) {
+      ProgramStateRef state = (*I)->getState();
+      const LocationContext *LCtx = (*I)->getLocationContext();
+      Expr *BaseExpr = M->getBase();
 
-  // Handle C++ method calls.
-  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member)) {
-    if (MD->isInstance())
+      // Handle C++ method calls.
+      if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member)) {
+        if (MD->isInstance())
+          state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
+
+        SVal MDVal = svalBuilder.getFunctionPointer(MD);
+        state = state->BindExpr(M, LCtx, MDVal);
+
+        Bldr.generateNode(M, *I, state);
+        continue;
+      }
+
+      // Handle regular struct fields / member variables.
       state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
+      SVal baseExprVal = state->getSVal(BaseExpr, LCtx);
 
-    SVal MDVal = svalBuilder.getFunctionPointer(MD);
-    state = state->BindExpr(M, LCtx, MDVal);
+      FieldDecl *field = cast<FieldDecl>(Member);
+      SVal L = state->getLValue(field, baseExprVal);
 
-    Bldr.generateNode(M, Pred, state);
-    return;
-  }
+      if (M->isGLValue() || M->getType()->isArrayType()) {
+        // We special-case rvalues of array type because the analyzer cannot
+        // reason about them, since we expect all regions to be wrapped in Locs.
+        // We instead treat these as lvalues and assume that they will decay to
+        // pointers as soon as they are used.
+        if (!M->isGLValue()) {
+          assert(M->getType()->isArrayType());
+          const ImplicitCastExpr *PE =
+            dyn_cast<ImplicitCastExpr>((*I)->getParentMap().getParent(M));
+          if (!PE || PE->getCastKind() != CK_ArrayToPointerDecay) {
+            llvm_unreachable("should always be wrapped in ArrayToPointerDecay");
+          }
+        }
 
-  // Handle regular struct fields / member variables.
-  state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
-  SVal baseExprVal = state->getSVal(BaseExpr, LCtx);
+        if (field->getType()->isReferenceType()) {
+          if (const MemRegion *R = L.getAsRegion())
+            L = state->getSVal(R);
+          else
+            L = UnknownVal();
+        }
 
-  FieldDecl *field = cast<FieldDecl>(Member);
-  SVal L = state->getLValue(field, baseExprVal);
-
-  if (M->isGLValue() || M->getType()->isArrayType()) {
-
-    // We special case rvalue of array type because the analyzer cannot reason
-    // about it, since we expect all regions to be wrapped in Locs. So we will
-    // treat these as lvalues assuming that they will decay to pointers as soon
-    // as they are used.
-    if (!M->isGLValue()) {
-      assert(M->getType()->isArrayType());
-      const ImplicitCastExpr *PE =
-        dyn_cast<ImplicitCastExpr>(Pred->getParentMap().getParent(M));
-      if (!PE || PE->getCastKind() != CK_ArrayToPointerDecay) {
-        assert(false &&
-               "We assume that array is always wrapped in ArrayToPointerDecay");
-        L = UnknownVal();
+        Bldr.generateNode(M, *I, state->BindExpr(M, LCtx, L), 0,
+                          ProgramPoint::PostLValueKind);
+      } else {
+        Bldr.takeNodes(*I);
+        evalLoad(Tmp, M, M, *I, state, L);
+        Bldr.addNodes(Tmp);
       }
     }
-
-    if (field->getType()->isReferenceType()) {
-      if (const MemRegion *R = L.getAsRegion())
-        L = state->getSVal(R);
-      else
-        L = UnknownVal();
-    }
-
-    Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), 0,
-                      ProgramPoint::PostLValueKind);
-  } else {
-    Bldr.takeNodes(Pred);
-    evalLoad(Dst, M, M, Pred, state, L);
-    Bldr.addNodes(Dst);
   }
+
+  getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, M, *this);
 }
 
 namespace {
@@ -1830,7 +1862,7 @@
   CollectReachableSymbolsCallback(ProgramStateRef State) {}
   const InvalidatedSymbols &getSymbols() const { return Symbols; }
 
-  bool VisitSymbol(SymbolRef Sym) {
+  bool VisitSymbol(SymbolRef Sym) override {
     Symbols.insert(Sym);
     return true;
   }
@@ -2037,7 +2069,7 @@
     QualType ValTy = TR->getValueType();
     if (const ReferenceType *RT = ValTy->getAs<ReferenceType>()) {
       static SimpleProgramPointTag
-             loadReferenceTag("ExprEngine : Load Reference");
+             loadReferenceTag(TagProviderName, "Load Reference");
       ExplodedNodeSet Tmp;
       evalLoadCommon(Tmp, NodeEx, BoundEx, Pred, state,
                      location, &loadReferenceTag,
@@ -2120,7 +2152,7 @@
     // instead "int *p" is noted as
     // "Variable 'p' initialized to a null pointer value"
     
-    static SimpleProgramPointTag tag("ExprEngine: Location");
+    static SimpleProgramPointTag tag(TagProviderName, "Location");
     Bldr.generateNode(NodeEx, Pred, state, &tag);
   }
   ExplodedNodeSet Tmp;
@@ -2132,8 +2164,10 @@
 std::pair<const ProgramPointTag *, const ProgramPointTag*>
 ExprEngine::geteagerlyAssumeBinOpBifurcationTags() {
   static SimpleProgramPointTag
-         eagerlyAssumeBinOpBifurcationTrue("ExprEngine : Eagerly Assume True"),
-         eagerlyAssumeBinOpBifurcationFalse("ExprEngine : Eagerly Assume False");
+         eagerlyAssumeBinOpBifurcationTrue(TagProviderName,
+                                           "Eagerly Assume True"),
+         eagerlyAssumeBinOpBifurcationFalse(TagProviderName,
+                                            "Eagerly Assume False");
   return std::make_pair(&eagerlyAssumeBinOpBifurcationTrue,
                         &eagerlyAssumeBinOpBifurcationFalse);
 }
@@ -2161,7 +2195,7 @@
         geteagerlyAssumeBinOpBifurcationTags();
 
       ProgramStateRef StateTrue, StateFalse;
-      tie(StateTrue, StateFalse) = state->assume(*SEV);
+      std::tie(StateTrue, StateFalse) = state->assume(*SEV);
 
       // First assume that the condition is true.
       if (StateTrue) {
@@ -2515,7 +2549,7 @@
   GraphPrintCheckerState = this;
   GraphPrintSourceManager = &getContext().getSourceManager();
 
-  OwningPtr<ExplodedGraph> TrimmedG(G.trim(Nodes));
+  std::unique_ptr<ExplodedGraph> TrimmedG(G.trim(Nodes));
 
   if (!TrimmedG.get())
     llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 983fda0..91b072f 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -286,6 +286,7 @@
       case CK_Dependent:
       case CK_ArrayToPointerDecay:
       case CK_BitCast:
+      case CK_AddressSpaceConversion:
       case CK_IntegralCast:
       case CK_NullToPointer:
       case CK_IntegralToPointer:
@@ -554,7 +555,7 @@
     } else {
       DefinedOrUnknownSVal DefinedRHS = RHSVal.castAs<DefinedOrUnknownSVal>();
       ProgramStateRef StTrue, StFalse;
-      llvm::tie(StTrue, StFalse) = N->getState()->assume(DefinedRHS);
+      std::tie(StTrue, StFalse) = N->getState()->assume(DefinedRHS);
       if (StTrue) {
         if (StFalse) {
           // We can't constrain the value to 0 or 1.
@@ -707,34 +708,43 @@
 VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
                               ExplodedNode *Pred,
                               ExplodedNodeSet &Dst) {
-  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+  // FIXME: Prechecks eventually go in ::Visit().
+  ExplodedNodeSet CheckedSet;
+  getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, Ex, *this);
+
+  ExplodedNodeSet EvalSet;
+  StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);
 
   QualType T = Ex->getTypeOfArgument();
-  
-  if (Ex->getKind() == UETT_SizeOf) {
-    if (!T->isIncompleteType() && !T->isConstantSizeType()) {
-      assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
-      
-      // FIXME: Add support for VLA type arguments and VLA expressions.
-      // When that happens, we should probably refactor VLASizeChecker's code.
-      return;
+
+  for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+       I != E; ++I) {
+    if (Ex->getKind() == UETT_SizeOf) {
+      if (!T->isIncompleteType() && !T->isConstantSizeType()) {
+        assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
+        
+        // FIXME: Add support for VLA type arguments and VLA expressions.
+        // When that happens, we should probably refactor VLASizeChecker's code.
+        continue;
+      } else if (T->getAs<ObjCObjectType>()) {
+        // Some code tries to take the sizeof an ObjCObjectType, relying that
+        // the compiler has laid out its representation.  Just report Unknown
+        // for these.
+        continue;
+      }
     }
-    else if (T->getAs<ObjCObjectType>()) {
-      // Some code tries to take the sizeof an ObjCObjectType, relying that
-      // the compiler has laid out its representation.  Just report Unknown
-      // for these.
-      return;
-    }
+    
+    APSInt Value = Ex->EvaluateKnownConstInt(getContext());
+    CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
+    
+    ProgramStateRef state = (*I)->getState();
+    state = state->BindExpr(Ex, (*I)->getLocationContext(),
+                            svalBuilder.makeIntVal(amt.getQuantity(),
+                                                   Ex->getType()));
+    Bldr.generateNode(Ex, *I, state);
   }
-  
-  APSInt Value = Ex->EvaluateKnownConstInt(getContext());
-  CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
-  
-  ProgramStateRef state = Pred->getState();
-  state = state->BindExpr(Ex, Pred->getLocationContext(),
-                          svalBuilder.makeIntVal(amt.getQuantity(),
-                                                     Ex->getType()));
-  Bldr.generateNode(Ex, Pred, state);
+
+  getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
 }
 
 void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, 
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index eba4f94..52fe156 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -91,12 +91,6 @@
 /// If the type is not an array type at all, the original value is returned.
 static SVal makeZeroElementRegion(ProgramStateRef State, SVal LValue,
                                   QualType &Ty) {
-  // FIXME: This check is just a temporary workaround, because
-  // ProcessTemporaryDtor sends us NULL regions. It will not be necessary once
-  // we can properly process temporary destructors.
-  if (!LValue.getAsRegion())
-    return LValue;
-
   SValBuilder &SVB = State->getStateManager().getSValBuilder();
   ASTContext &Ctx = SVB.getContext();
 
@@ -108,6 +102,70 @@
   return LValue;
 }
 
+
+static const MemRegion *getRegionForConstructedObject(
+    const CXXConstructExpr *CE, ExplodedNode *Pred, ExprEngine &Eng,
+    unsigned int CurrStmtIdx) {
+  const LocationContext *LCtx = Pred->getLocationContext();
+  ProgramStateRef State = Pred->getState();
+  const NodeBuilderContext &CurrBldrCtx = Eng.getBuilderContext();
+
+  // See if we're constructing an existing region by looking at the next
+  // element in the CFG.
+  const CFGBlock *B = CurrBldrCtx.getBlock();
+  if (CurrStmtIdx + 1 < B->size()) {
+    CFGElement Next = (*B)[CurrStmtIdx+1];
+
+    // Is this a constructor for a local variable?
+    if (Optional<CFGStmt> StmtElem = Next.getAs<CFGStmt>()) {
+      if (const DeclStmt *DS = dyn_cast<DeclStmt>(StmtElem->getStmt())) {
+        if (const VarDecl *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
+          if (Var->getInit()->IgnoreImplicit() == CE) {
+            SVal LValue = State->getLValue(Var, LCtx);
+            QualType Ty = Var->getType();
+            LValue = makeZeroElementRegion(State, LValue, Ty);
+            return LValue.getAsRegion();
+          }
+        }
+      }
+    }
+
+    // Is this a constructor for a member?
+    if (Optional<CFGInitializer> InitElem = Next.getAs<CFGInitializer>()) {
+      const CXXCtorInitializer *Init = InitElem->getInitializer();
+      assert(Init->isAnyMemberInitializer());
+
+      const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
+      Loc ThisPtr = Eng.getSValBuilder().getCXXThis(CurCtor,
+          LCtx->getCurrentStackFrame());
+      SVal ThisVal = State->getSVal(ThisPtr);
+
+      const ValueDecl *Field;
+      SVal FieldVal;
+      if (Init->isIndirectMemberInitializer()) {
+        Field = Init->getIndirectMember();
+        FieldVal = State->getLValue(Init->getIndirectMember(), ThisVal);
+      } else {
+        Field = Init->getMember();
+        FieldVal = State->getLValue(Init->getMember(), ThisVal);
+      }
+
+      QualType Ty = Field->getType();
+      FieldVal = makeZeroElementRegion(State, FieldVal, Ty);
+      return FieldVal.getAsRegion();
+    }
+
+    // FIXME: This will eventually need to handle new-expressions as well.
+    // Don't forget to update the pre-constructor initialization code in
+    // ExprEngine::VisitCXXConstructExpr.
+  }
+
+  // If we couldn't find an existing region to construct into, assume we're
+  // constructing a temporary.
+  MemRegionManager &MRMgr = Eng.getSValBuilder().getRegionManager();
+  return MRMgr.getCXXTempObjectRegion(CE, LCtx);
+}
+
 void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
                                        ExplodedNode *Pred,
                                        ExplodedNodeSet &destNodes) {
@@ -122,62 +180,7 @@
 
   switch (CE->getConstructionKind()) {
   case CXXConstructExpr::CK_Complete: {
-    // See if we're constructing an existing region by looking at the next
-    // element in the CFG.
-    const CFGBlock *B = currBldrCtx->getBlock();
-    if (currStmtIdx + 1 < B->size()) {
-      CFGElement Next = (*B)[currStmtIdx+1];
-
-      // Is this a constructor for a local variable?
-      if (Optional<CFGStmt> StmtElem = Next.getAs<CFGStmt>()) {
-        if (const DeclStmt *DS = dyn_cast<DeclStmt>(StmtElem->getStmt())) {
-          if (const VarDecl *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
-            if (Var->getInit()->IgnoreImplicit() == CE) {
-              SVal LValue = State->getLValue(Var, LCtx);
-              QualType Ty = Var->getType();
-              LValue = makeZeroElementRegion(State, LValue, Ty);
-              Target = LValue.getAsRegion();
-            }
-          }
-        }
-      }
-      
-      // Is this a constructor for a member?
-      if (Optional<CFGInitializer> InitElem = Next.getAs<CFGInitializer>()) {
-        const CXXCtorInitializer *Init = InitElem->getInitializer();
-        assert(Init->isAnyMemberInitializer());
-
-        const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
-        Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
-                                                  LCtx->getCurrentStackFrame());
-        SVal ThisVal = State->getSVal(ThisPtr);
-
-        const ValueDecl *Field;
-        SVal FieldVal;
-        if (Init->isIndirectMemberInitializer()) {
-          Field = Init->getIndirectMember();
-          FieldVal = State->getLValue(Init->getIndirectMember(), ThisVal);
-        } else {
-          Field = Init->getMember();
-          FieldVal = State->getLValue(Init->getMember(), ThisVal);
-        }
-
-        QualType Ty = Field->getType();
-        FieldVal = makeZeroElementRegion(State, FieldVal, Ty);
-        Target = FieldVal.getAsRegion();
-      }
-
-      // FIXME: This will eventually need to handle new-expressions as well.
-      // Don't forget to update the pre-constructor initialization code below.
-    }
-
-    // If we couldn't find an existing region to construct into, assume we're
-    // constructing a temporary.
-    if (!Target) {
-      MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
-      Target = MRMgr.getCXXTempObjectRegion(CE, LCtx);
-    }
-
+    Target = getRegionForConstructedObject(CE, Pred, *this, currStmtIdx);
     break;
   }
   case CXXConstructExpr::CK_VirtualBase:
@@ -329,6 +332,32 @@
                                              *Call, *this);
 }
 
+void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
+                                          ExplodedNode *Pred,
+                                          ExplodedNodeSet &Dst) {
+  ProgramStateRef State = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+                                CNE->getStartLoc(),
+                                "Error evaluating New Allocator Call");
+  CallEventManager &CEMgr = getStateManager().getCallEventManager();
+  CallEventRef<CXXAllocatorCall> Call =
+    CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+
+  ExplodedNodeSet DstPreCall;
+  getCheckerManager().runCheckersForPreCall(DstPreCall, Pred,
+                                            *Call, *this);
+
+  ExplodedNodeSet DstInvalidated;
+  StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
+  for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+       I != E; ++I)
+    defaultEvalCall(Bldr, *I, *Call);
+  getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
+                                             *Call, *this);
+}
+
+
 void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
                                    ExplodedNodeSet &Dst) {
   // FIXME: Much of this should eventually migrate to CXXAllocatorCall.
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 06328e4..72af75d 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -49,7 +49,7 @@
   assert(Entry->empty());
   assert(Entry->succ_size() == 1);
   
-  // Get the solitary sucessor.
+  // Get the solitary successor.
   const CFGBlock *Succ = *(Entry->succ_begin());
   
   // Construct an edge representing the starting location in the callee.
@@ -162,7 +162,7 @@
   // Find the last statement in the function and the corresponding basic block.
   const Stmt *LastSt = 0;
   const CFGBlock *Blk = 0;
-  llvm::tie(LastSt, Blk) = getLastStmt(Pred);
+  std::tie(LastSt, Blk) = getLastStmt(Pred);
   if (!Blk || !LastSt) {
     Dst.Add(Pred);
     return;
@@ -231,7 +231,7 @@
   // Find the last statement in the function and the corresponding basic block.
   const Stmt *LastSt = 0;
   const CFGBlock *Blk = 0;
-  llvm::tie(LastSt, Blk) = getLastStmt(CEBNode);
+  std::tie(LastSt, Blk) = getLastStmt(CEBNode);
 
   // Generate a CallEvent /before/ cleaning the state, so that we can get the
   // correct value for 'this' (if necessary).
@@ -282,7 +282,7 @@
   // they occurred.
   ExplodedNodeSet CleanedNodes;
   if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
-    static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value");
+    static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
     PostStmt Loc(LastSt, calleeCtx, &retValBind);
     bool isNew;
     ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
@@ -664,6 +664,8 @@
     break;
   }
   case CE_CXXAllocator:
+    if (Opts.mayInlineCXXAllocator())
+      break;
     // Do not inline allocators until we model deallocators.
     // This is unfortunate, but basically necessary for smart pointers and such.
     return CIP_DisallowedAlways;
diff --git a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index d276d92..57c04bf 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -165,7 +165,7 @@
             recVal.castAs<DefinedOrUnknownSVal>();
 
         ProgramStateRef notNilState, nilState;
-        llvm::tie(notNilState, nilState) = State->assume(receiverVal);
+        std::tie(notNilState, nilState) = State->assume(receiverVal);
         
         // There are three cases: can be nil or non-nil, must be nil, must be
         // non-nil. We ignore must be nil, and merge the rest two into non-nil.
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 365f6ab..07a793e 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -44,10 +44,10 @@
 
   virtual ~HTMLDiagnostics() { FlushDiagnostics(NULL); }
 
-  virtual void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
-                                    FilesMade *filesMade);
+  void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+                            FilesMade *filesMade) override;
 
-  virtual StringRef getName() const {
+  StringRef getName() const override {
     return "HTMLDiagnostics";
   }
 
@@ -99,9 +99,7 @@
   // Create the HTML directory if it is missing.
   if (!createdDir) {
     createdDir = true;
-    bool existed;
-    if (llvm::error_code ec =
-            llvm::sys::fs::create_directories(Directory, existed)) {
+    if (llvm::error_code ec = llvm::sys::fs::create_directories(Directory)) {
       llvm::errs() << "warning: could not create directory '"
                    << Directory << "': " << ec.message() << '\n';
 
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index 162cd33..8f6c373 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -975,10 +975,8 @@
   if (IsVirtual)
     return Class->isVirtuallyDerivedFrom(BaseClass);
 
-  for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
-                                                E = Class->bases_end();
-       I != E; ++I) {
-    if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == BaseClass)
+  for (const auto &I : Class->bases()) {
+    if (I.getType()->getAsCXXRecordDecl()->getCanonicalDecl() == BaseClass)
       return true;
   }
 
@@ -1176,10 +1174,8 @@
   // Note that we do NOT canonicalize the base class here, because
   // ASTRecordLayout doesn't either. If that leads us down the wrong path,
   // so be it; at least we won't crash.
-  for (CXXRecordDecl::base_class_const_iterator I = Child->bases_begin(),
-                                                E = Child->bases_end();
-       I != E; ++I) {
-    if (I->getType()->getAsCXXRecordDecl() == Base)
+  for (const auto &I : Child->bases()) {
+    if (I.getType()->getAsCXXRecordDecl() == Base)
       return true;
   }
 
@@ -1363,7 +1359,7 @@
   const VarRegion *VR = 0;
   const VarRegion *OriginalVR = 0;
 
-  if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage()) {
+  if (!VD->hasAttr<BlocksAttr>() && VD->hasLocalStorage()) {
     VR = MemMgr.getVarRegion(VD, this);
     OriginalVR = MemMgr.getVarRegion(VD, LC);
   }
@@ -1386,7 +1382,7 @@
 
   AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext();
   AnalysisDeclContext::referenced_decls_iterator I, E;
-  llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());
+  std::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());
 
   if (I == E) {
     ReferencedVars = (void*) 0x1;
@@ -1406,7 +1402,7 @@
   for ( ; I != E; ++I) {
     const VarRegion *VR = 0;
     const VarRegion *OriginalVR = 0;
-    llvm::tie(VR, OriginalVR) = getCaptureRegions(*I);
+    std::tie(VR, OriginalVR) = getCaptureRegions(*I);
     assert(VR);
     assert(OriginalVR);
     BV->push_back(VR, BC);
diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index b504db6..89e4309 100644
--- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -106,12 +106,13 @@
 
 PathDiagnostic::~PathDiagnostic() {}
 
-PathDiagnostic::PathDiagnostic(const Decl *declWithIssue,
+PathDiagnostic::PathDiagnostic(StringRef CheckName, const Decl *declWithIssue,
                                StringRef bugtype, StringRef verboseDesc,
                                StringRef shortDesc, StringRef category,
                                PathDiagnosticLocation LocationToUnique,
                                const Decl *DeclToUnique)
-  : DeclWithIssue(declWithIssue),
+  : CheckName(CheckName),
+    DeclWithIssue(declWithIssue),
     BugType(StripTrailingDots(bugtype)),
     VerboseDesc(StripTrailingDots(verboseDesc)),
     ShortDesc(StripTrailingDots(shortDesc)),
@@ -196,8 +197,8 @@
 }
 
 void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
-  OwningPtr<PathDiagnostic> OwningD(D);
-  
+  std::unique_ptr<PathDiagnostic> OwningD(D);
+
   if (!D || D->path.empty())
     return;
   
@@ -274,8 +275,8 @@
     Diags.RemoveNode(orig);
     delete orig;
   }
-  
-  Diags.InsertNode(OwningD.take());
+
+  Diags.InsertNode(OwningD.release());
 }
 
 static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y);
@@ -415,17 +416,6 @@
   return b.getValue();
 }
 
-namespace {
-struct CompareDiagnostics {
-  // Compare if 'X' is "<" than 'Y'.
-  bool operator()(const PathDiagnostic *X, const PathDiagnostic *Y) const {
-    if (X == Y)
-      return false;
-    return compare(*X, *Y);
-  }
-};
-}
-
 void PathDiagnosticConsumer::FlushDiagnostics(
                                      PathDiagnosticConsumer::FilesMade *Files) {
   if (flushed)
@@ -443,8 +433,11 @@
   // Sort the diagnostics so that they are always emitted in a deterministic
   // order.
   if (!BatchDiags.empty())
-    std::sort(BatchDiags.begin(), BatchDiags.end(), CompareDiagnostics());
-  
+    std::sort(BatchDiags.begin(), BatchDiags.end(),
+              [](const PathDiagnostic *X, const PathDiagnostic *Y) {
+      return X != Y && compare(*X, *Y);
+    });
+
   FlushDiagnosticsImpl(BatchDiags, Files);
 
   // Delete the flushed diagnostics.
@@ -571,6 +564,7 @@
     return PathDiagnosticLocation::create(CallerInfo->getDecl(), SM);
   }
   case CFGElement::TemporaryDtor:
+  case CFGElement::NewAllocator:
     llvm_unreachable("not yet implemented!");
   }
 
@@ -610,6 +604,14 @@
 }
 
 PathDiagnosticLocation
+  PathDiagnosticLocation::createConditionalColonLoc(
+                                            const ConditionalOperator *CO,
+                                            const SourceManager &SM) {
+  return PathDiagnosticLocation(CO->getColonLoc(), SM, SingleLocK);
+}
+
+
+PathDiagnosticLocation
   PathDiagnosticLocation::createMemberLoc(const MemberExpr *ME,
                                           const SourceManager &SM) {
   return PathDiagnosticLocation(ME->getMemberLoc(), SM, SingleLocK);
@@ -731,8 +733,12 @@
   assert(N && "Cannot create a location with a null node.");
   const Stmt *S = getStmt(N);
 
-  if (!S)
+  if (!S) {
+    // If this is an implicit call, return the implicit call point location.
+    if (Optional<PreImplicitCall> PIE = N->getLocationAs<PreImplicitCall>())
+      return PathDiagnosticLocation(PIE->getLocation(), SM);
     S = getNextStmt(N);
+  }
 
   if (S) {
     ProgramPoint P = N->getLocation();
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 5dca811..8e1ea25 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -13,6 +13,7 @@
 
 #include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
 #include "clang/Basic/FileManager.h"
+#include "clang/Basic/PlistSupport.h"
 #include "clang/Basic/SourceManager.h"
 #include "clang/Basic/Version.h"
 #include "clang/Lex/Preprocessor.h"
@@ -21,12 +22,9 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/Casting.h"
-#include "llvm/Support/raw_ostream.h"
 using namespace clang;
 using namespace ento;
-
-typedef llvm::DenseMap<FileID, unsigned> FIDMap;
-
+using namespace markup;
 
 namespace {
   class PlistDiagnostics : public PathDiagnosticConsumer {
@@ -42,15 +40,17 @@
     virtual ~PlistDiagnostics() {}
 
     void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
-                              FilesMade *filesMade);
-    
-    virtual StringRef getName() const {
+                              FilesMade *filesMade) override;
+
+    virtual StringRef getName() const override {
       return "PlistDiagnostics";
     }
 
-    PathGenerationScheme getGenerationScheme() const { return Extensive; }
-    bool supportsLogicalOpControlFlow() const { return true; }
-    virtual bool supportsCrossFileDiagnostics() const {
+    PathGenerationScheme getGenerationScheme() const override {
+      return Extensive;
+    }
+    bool supportsLogicalOpControlFlow() const override { return true; }
+    bool supportsCrossFileDiagnostics() const override {
       return SupportsCrossFileDiagnostics;
     }
   };
@@ -80,84 +80,6 @@
                                    PP.getLangOpts(), true));
 }
 
-static void AddFID(FIDMap &FIDs, SmallVectorImpl<FileID> &V,
-                   const SourceManager* SM, SourceLocation L) {
-
-  FileID FID = SM->getFileID(SM->getExpansionLoc(L));
-  FIDMap::iterator I = FIDs.find(FID);
-  if (I != FIDs.end()) return;
-  FIDs[FID] = V.size();
-  V.push_back(FID);
-}
-
-static unsigned GetFID(const FIDMap& FIDs, const SourceManager &SM,
-                       SourceLocation L) {
-  FileID FID = SM.getFileID(SM.getExpansionLoc(L));
-  FIDMap::const_iterator I = FIDs.find(FID);
-  assert(I != FIDs.end());
-  return I->second;
-}
-
-static raw_ostream &Indent(raw_ostream &o, const unsigned indent) {
-  for (unsigned i = 0; i < indent; ++i) o << ' ';
-  return o;
-}
-
-static void EmitLocation(raw_ostream &o, const SourceManager &SM,
-                         const LangOptions &LangOpts,
-                         SourceLocation L, const FIDMap &FM,
-                         unsigned indent, bool extend = false) {
-
-  FullSourceLoc Loc(SM.getExpansionLoc(L), const_cast<SourceManager&>(SM));
-
-  // Add in the length of the token, so that we cover multi-char tokens.
-  unsigned offset =
-    extend ? Lexer::MeasureTokenLength(Loc, SM, LangOpts) - 1 : 0;
-
-  Indent(o, indent) << "<dict>\n";
-  Indent(o, indent) << " <key>line</key><integer>"
-                    << Loc.getExpansionLineNumber() << "</integer>\n";
-  Indent(o, indent) << " <key>col</key><integer>"
-                    << Loc.getExpansionColumnNumber() + offset << "</integer>\n";
-  Indent(o, indent) << " <key>file</key><integer>"
-                    << GetFID(FM, SM, Loc) << "</integer>\n";
-  Indent(o, indent) << "</dict>\n";
-}
-
-static void EmitLocation(raw_ostream &o, const SourceManager &SM,
-                         const LangOptions &LangOpts,
-                         const PathDiagnosticLocation &L, const FIDMap& FM,
-                         unsigned indent, bool extend = false) {
-  EmitLocation(o, SM, LangOpts, L.asLocation(), FM, indent, extend);
-}
-
-static void EmitRange(raw_ostream &o, const SourceManager &SM,
-                      const LangOptions &LangOpts,
-                      PathDiagnosticRange R, const FIDMap &FM,
-                      unsigned indent) {
-  Indent(o, indent) << "<array>\n";
-  EmitLocation(o, SM, LangOpts, R.getBegin(), FM, indent+1);
-  EmitLocation(o, SM, LangOpts, R.getEnd(), FM, indent+1, !R.isPoint);
-  Indent(o, indent) << "</array>\n";
-}
-
-static raw_ostream &EmitString(raw_ostream &o, StringRef s) {
-  o << "<string>";
-  for (StringRef::const_iterator I = s.begin(), E = s.end(); I != E; ++I) {
-    char c = *I;
-    switch (c) {
-    default:   o << c; break;
-    case '&':  o << "&amp;"; break;
-    case '<':  o << "&lt;"; break;
-    case '>':  o << "&gt;"; break;
-    case '\'': o << "&apos;"; break;
-    case '\"': o << "&quot;"; break;
-    }
-  }
-  o << "</string>";
-  return o;
-}
-
 static void ReportControlFlow(raw_ostream &o,
                               const PathDiagnosticControlFlowPiece& P,
                               const FIDMap& FM,
@@ -185,11 +107,13 @@
     // logic for clients.
     Indent(o, indent) << "<key>start</key>\n";
     SourceLocation StartEdge = I->getStart().asRange().getBegin();
-    EmitRange(o, SM, LangOpts, SourceRange(StartEdge, StartEdge), FM, indent+1);
+    EmitRange(o, SM, LangOpts, CharSourceRange::getTokenRange(StartEdge), FM,
+              indent + 1);
 
     Indent(o, indent) << "<key>end</key>\n";
     SourceLocation EndEdge = I->getEnd().asRange().getBegin();
-    EmitRange(o, SM, LangOpts, SourceRange(EndEdge, EndEdge), FM, indent+1);
+    EmitRange(o, SM, LangOpts, CharSourceRange::getTokenRange(EndEdge), FM,
+              indent + 1);
 
     --indent;
     Indent(o, indent) << "</dict>\n";
@@ -241,7 +165,8 @@
     ++indent;
     for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
          I != E; ++I) {
-      EmitRange(o, SM, LangOpts, *I, FM, indent+1);
+      EmitRange(o, SM, LangOpts, CharSourceRange::getTokenRange(*I), FM,
+                indent + 1);
     }
     --indent;
     Indent(o, indent) << "</array>\n";
@@ -387,12 +312,12 @@
       for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E;
            ++I) {
         const PathDiagnosticPiece *piece = I->getPtr();
-        AddFID(FM, Fids, SM, piece->getLocation().asLocation());
+        AddFID(FM, Fids, *SM, piece->getLocation().asLocation());
         ArrayRef<SourceRange> Ranges = piece->getRanges();
         for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
                                              E = Ranges.end(); I != E; ++I) {
-          AddFID(FM, Fids, SM, I->getBegin());
-          AddFID(FM, Fids, SM, I->getEnd());
+          AddFID(FM, Fids, *SM, I->getBegin());
+          AddFID(FM, Fids, *SM, I->getEnd());
         }
 
         if (const PathDiagnosticCallPiece *call =
@@ -400,7 +325,7 @@
           IntrusiveRefCntPtr<PathDiagnosticEventPiece>
             callEnterWithin = call->getCallEnterWithinCallerEvent();
           if (callEnterWithin)
-            AddFID(FM, Fids, SM, callEnterWithin->getLocation().asLocation());
+            AddFID(FM, Fids, *SM, callEnterWithin->getLocation().asLocation());
 
           WorkList.push_back(&call->path);
         }
@@ -414,17 +339,14 @@
 
   // Open the file.
   std::string ErrMsg;
-  llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg);
+  llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg, llvm::sys::fs::F_Text);
   if (!ErrMsg.empty()) {
     llvm::errs() << "warning: could not create file: " << OutputFile << '\n';
     return;
   }
 
   // Write the plist header.
-  o << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
-  "<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" "
-  "\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
-  "<plist version=\"1.0\">\n";
+  o << PlistHeader;
 
   // Write the root object: a <dict> containing...
   //  - "clang_version", the string representation of clang version
@@ -535,7 +457,7 @@
 
     // Output the location of the bug.
     o << "  <key>location</key>\n";
-    EmitLocation(o, *SM, LangOpts, D->getLocation(), FM, 2);
+    EmitLocation(o, *SM, LangOpts, D->getLocation().asLocation(), FM, 2);
 
     // Output the diagnostic to the sub-diagnostic client, if any.
     if (!filesMade->empty()) {
diff --git a/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h b/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
index ed64fcb..c2af36f 100644
--- a/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
+++ b/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
@@ -33,7 +33,7 @@
     assert(LCtx);
   }
 
-  virtual void print(raw_ostream &OS) const {
+  void print(raw_ostream &OS) const override {
     OS << "While analyzing stack: \n";
     LCtx->dumpStack(OS, "\t");
   }
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index 6e23668..0bc510a 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -175,7 +175,6 @@
                                     const CallEvent *Call) const {
   ProgramStateManager &Mgr = getStateManager();
   SubEngine* Eng = Mgr.getOwningEngine();
-  InvalidatedSymbols ConstIS;
 
   InvalidatedSymbols Invalidated;
   if (!IS)
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 3606e09..362b94f 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -290,35 +290,37 @@
 
   ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
                              const llvm::APSInt& Int,
-                             const llvm::APSInt& Adjustment);
+                             const llvm::APSInt& Adjustment) override;
 
   ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
                              const llvm::APSInt& Int,
-                             const llvm::APSInt& Adjustment);
+                             const llvm::APSInt& Adjustment) override;
 
   ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
                              const llvm::APSInt& Int,
-                             const llvm::APSInt& Adjustment);
+                             const llvm::APSInt& Adjustment) override;
 
   ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
                              const llvm::APSInt& Int,
-                             const llvm::APSInt& Adjustment);
+                             const llvm::APSInt& Adjustment) override;
 
   ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
                              const llvm::APSInt& Int,
-                             const llvm::APSInt& Adjustment);
+                             const llvm::APSInt& Adjustment) override;
 
   ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
                              const llvm::APSInt& Int,
-                             const llvm::APSInt& Adjustment);
+                             const llvm::APSInt& Adjustment) override;
 
-  const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) const;
-  ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym);
+  const llvm::APSInt* getSymVal(ProgramStateRef St,
+                                SymbolRef sym) const override;
+  ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym) override;
 
-  ProgramStateRef removeDeadBindings(ProgramStateRef St, SymbolReaper& SymReaper);
+  ProgramStateRef removeDeadBindings(ProgramStateRef St,
+                                     SymbolReaper& SymReaper) override;
 
   void print(ProgramStateRef St, raw_ostream &Out,
-             const char* nl, const char *sep);
+             const char* nl, const char *sep) override;
 
 private:
   RangeSet::Factory F;
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index 0b51976..b811c67 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -104,7 +104,7 @@
            Data == X.Data;
   }
 
-  LLVM_ATTRIBUTE_USED void dump() const;
+  void dump() const;
 };
 } // end anonymous namespace
 
@@ -133,9 +133,7 @@
   };
 } // end llvm namespace
 
-void BindingKey::dump() const {
-  llvm::errs() << *this;
-}
+LLVM_DUMP_METHOD void BindingKey::dump() const { llvm::errs() << *this; }
 
 //===----------------------------------------------------------------------===//
 // Actual Store type.
@@ -224,9 +222,7 @@
    }
   }
 
-  LLVM_ATTRIBUTE_USED void dump() const {
-    dump(llvm::errs(), "\n");
-  }
+  LLVM_DUMP_METHOD void dump() const { dump(llvm::errs(), "\n"); }
 };
 } // end anonymous namespace
 
@@ -376,9 +372,9 @@
   ///  version of that lvalue (i.e., a pointer to the first element of
   ///  the array).  This is called by ExprEngine when evaluating
   ///  casts from arrays to pointers.
-  SVal ArrayToPointer(Loc Array, QualType ElementTy);
+  SVal ArrayToPointer(Loc Array, QualType ElementTy) override;
 
-  StoreRef getInitialStore(const LocationContext *InitLoc) {
+  StoreRef getInitialStore(const LocationContext *InitLoc) override {
     return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this);
   }
 
@@ -400,24 +396,24 @@
                              InvalidatedSymbols &IS,
                              RegionAndSymbolInvalidationTraits &ITraits,
                              InvalidatedRegions *Invalidated,
-                             InvalidatedRegions *InvalidatedTopLevel);
+                             InvalidatedRegions *InvalidatedTopLevel) override;
 
   bool scanReachableSymbols(Store S, const MemRegion *R,
-                            ScanReachableSymbols &Callbacks);
+                            ScanReachableSymbols &Callbacks) override;
 
   RegionBindingsRef removeSubRegionBindings(RegionBindingsConstRef B,
                                             const SubRegion *R);
 
 public: // Part of public interface to class.
 
-  virtual StoreRef Bind(Store store, Loc LV, SVal V) {
+  StoreRef Bind(Store store, Loc LV, SVal V) override {
     return StoreRef(bind(getRegionBindings(store), LV, V).asStore(), *this);
   }
 
   RegionBindingsRef bind(RegionBindingsConstRef B, Loc LV, SVal V);
 
   // BindDefault is only used to initialize a region with a default value.
-  StoreRef BindDefault(Store store, const MemRegion *R, SVal V) {
+  StoreRef BindDefault(Store store, const MemRegion *R, SVal V) override {
     RegionBindingsRef B = getRegionBindings(store);
     assert(!B.lookup(R, BindingKey::Direct));
 
@@ -471,20 +467,20 @@
   /// \brief Create a new store with the specified binding removed.
   /// \param ST the original store, that is the basis for the new store.
   /// \param L the location whose binding should be removed.
-  virtual StoreRef killBinding(Store ST, Loc L);
+  StoreRef killBinding(Store ST, Loc L) override;
 
-  void incrementReferenceCount(Store store) {
+  void incrementReferenceCount(Store store) override {
     getRegionBindings(store).manualRetain();    
   }
   
   /// If the StoreManager supports it, decrement the reference count of
   /// the specified Store object.  If the reference count hits 0, the memory
   /// associated with the object is recycled.
-  void decrementReferenceCount(Store store) {
+  void decrementReferenceCount(Store store) override {
     getRegionBindings(store).manualRelease();
   }
-  
-  bool includedInBindings(Store store, const MemRegion *region) const;
+
+  bool includedInBindings(Store store, const MemRegion *region) const override;
 
   /// \brief Return the value bound to specified location in a given state.
   ///
@@ -499,7 +495,7 @@
   ///       return undefined
   ///     else
   ///       return symbolic
-  virtual SVal getBinding(Store S, Loc L, QualType T) {
+  SVal getBinding(Store S, Loc L, QualType T) override {
     return getBinding(getRegionBindings(S), L, T);
   }
 
@@ -564,15 +560,16 @@
   /// removeDeadBindings - Scans the RegionStore of 'state' for dead values.
   ///  It returns a new Store with these values removed.
   StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
-                              SymbolReaper& SymReaper);
-  
+                              SymbolReaper& SymReaper) override;
+
   //===------------------------------------------------------------------===//
   // Region "extents".
   //===------------------------------------------------------------------===//
 
   // FIXME: This method will soon be eliminated; see the note in Store.h.
   DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
-                                         const MemRegion* R, QualType EleTy);
+                                         const MemRegion* R,
+                                         QualType EleTy) override;
 
   //===------------------------------------------------------------------===//
   // Utility methods.
@@ -585,9 +582,9 @@
   }
 
   void print(Store store, raw_ostream &Out, const char* nl,
-             const char *sep);
+             const char *sep) override;
 
-  void iterBindings(Store store, BindingsHandler& f) {
+  void iterBindings(Store store, BindingsHandler& f) override {
     RegionBindingsRef B = getRegionBindings(store);
     for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
       const ClusterBindings &Cluster = I.getData();
@@ -1016,7 +1013,7 @@
          BI != BE; ++BI) {
       const VarRegion *VR = BI.getCapturedRegion();
       const VarDecl *VD = VR->getDecl();
-      if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage()) {
+      if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage()) {
         AddToWorkList(VR);
       }
       else if (Loc::isLocType(VR->getValueType())) {
@@ -1630,7 +1627,7 @@
   // Lazy binding?
   Store lazyBindingStore = NULL;
   const SubRegion *lazyBindingRegion = NULL;
-  llvm::tie(lazyBindingStore, lazyBindingRegion) = findLazyBinding(B, R, R);
+  std::tie(lazyBindingStore, lazyBindingRegion) = findLazyBinding(B, R, R);
   if (lazyBindingRegion)
     return getLazyBinding(lazyBindingRegion,
                           getRegionBindings(lazyBindingStore));
@@ -1791,7 +1788,7 @@
   // values to return.
   const ClusterBindings *Cluster = B.lookup(LazyR->getBaseRegion());
   if (!Cluster)
-    return (LazyBindingsMap[LCV.getCVData()] = llvm_move(List));
+    return (LazyBindingsMap[LCV.getCVData()] = std::move(List));
 
   SmallVector<BindingPair, 32> Bindings;
   collectSubRegionBindings(Bindings, svalBuilder, *Cluster, LazyR,
@@ -1813,7 +1810,7 @@
     List.push_back(V);
   }
 
-  return (LazyBindingsMap[LCV.getCVData()] = llvm_move(List));
+  return (LazyBindingsMap[LCV.getCVData()] = std::move(List));
 }
 
 NonLoc RegionStoreManager::createLazyBinding(RegionBindingsConstRef B,
@@ -2066,9 +2063,7 @@
     if (Class->getNumBases() != 0 || Class->getNumVBases() != 0)
       return None;
 
-  for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
-       I != E; ++I) {
-    const FieldDecl *FD = *I;
+  for (const auto *FD : RD->fields()) {
     if (FD->isUnnamedBitfield())
       continue;
 
@@ -2081,7 +2076,7 @@
     if (!(Ty->isScalarType() || Ty->isReferenceType()))
       return None;
 
-    Fields.push_back(*I);
+    Fields.push_back(FD);
   }
 
   RegionBindingsRef NewB = B;
diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp
index adc5465..3ed2bde 100644
--- a/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -362,7 +362,7 @@
 DefinedOrUnknownSVal SValBuilder::evalEQ(ProgramStateRef state,
                                          DefinedOrUnknownSVal lhs,
                                          DefinedOrUnknownSVal rhs) {
-  return evalBinOp(state, BO_EQ, lhs, rhs, Context.IntTy)
+  return evalBinOp(state, BO_EQ, lhs, rhs, getConditionType())
       .castAs<DefinedOrUnknownSVal>();
 }
 
@@ -376,7 +376,7 @@
     ToTy = Context.getUnqualifiedArrayType(ToTy, Quals1);
     FromTy = Context.getUnqualifiedArrayType(FromTy, Quals2);
 
-    // Make sure that non cvr-qualifiers the other qualifiers (e.g., address
+    // Make sure that non-cvr-qualifiers the other qualifiers (e.g., address
     // spaces) are identical.
     Quals1.removeCVRQualifiers();
     Quals2.removeCVRQualifiers();
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index 28a9a4d..21e2283 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -34,7 +34,7 @@
   //===------------------------------------------------------------------===//
 
   ProgramStateRef assume(ProgramStateRef state, DefinedSVal Cond,
-                        bool Assumption);
+                        bool Assumption) override;
 
   ProgramStateRef assume(ProgramStateRef state, NonLoc Cond, bool Assumption);
 
@@ -82,7 +82,7 @@
   BasicValueFactory &getBasicVals() const { return SVB.getBasicValueFactory(); }
   SymbolManager &getSymbolManager() const { return SVB.getSymbolManager(); }
 
-  bool canReasonAbout(SVal X) const;
+  bool canReasonAbout(SVal X) const override;
 
   ProgramStateRef assumeAux(ProgramStateRef state,
                                 NonLoc Cond,
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index cc0ee0b..b488d3c 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -21,9 +21,9 @@
 namespace {
 class SimpleSValBuilder : public SValBuilder {
 protected:
-  virtual SVal dispatchCast(SVal val, QualType castTy);
-  virtual SVal evalCastFromNonLoc(NonLoc val, QualType castTy);
-  virtual SVal evalCastFromLoc(Loc val, QualType castTy);
+  SVal dispatchCast(SVal val, QualType castTy) override;
+  SVal evalCastFromNonLoc(NonLoc val, QualType castTy) override;
+  SVal evalCastFromLoc(Loc val, QualType castTy) override;
 
 public:
   SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
@@ -31,19 +31,19 @@
                     : SValBuilder(alloc, context, stateMgr) {}
   virtual ~SimpleSValBuilder() {}
 
-  virtual SVal evalMinus(NonLoc val);
-  virtual SVal evalComplement(NonLoc val);
-  virtual SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
-                           NonLoc lhs, NonLoc rhs, QualType resultTy);
-  virtual SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
-                           Loc lhs, Loc rhs, QualType resultTy);
-  virtual SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
-                           Loc lhs, NonLoc rhs, QualType resultTy);
+  SVal evalMinus(NonLoc val) override;
+  SVal evalComplement(NonLoc val) override;
+  SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
+                   NonLoc lhs, NonLoc rhs, QualType resultTy) override;
+  SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
+                   Loc lhs, Loc rhs, QualType resultTy) override;
+  SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
+                   Loc lhs, NonLoc rhs, QualType resultTy) override;
 
   /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
   ///  (integer) value, that value is returned. Otherwise, returns NULL.
-  virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V);
-  
+  const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V) override;
+
   SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
                      const llvm::APSInt &RHS, QualType resultTy);
 };
@@ -97,7 +97,7 @@
     return UnknownVal();
   }
 
-  // If value is a non integer constant, produce unknown.
+  // If value is a non-integer constant, produce unknown.
   if (!val.getAs<nonloc::ConcreteInt>())
     return UnknownVal();
 
@@ -108,7 +108,7 @@
   }
 
   // Only handle casts from integers to integers - if val is an integer constant
-  // being cast to a non integer type, produce unknown.
+  // being cast to a non-integer type, produce unknown.
   if (!isLocType && !castTy->isIntegralOrEnumerationType())
     return UnknownVal();
 
@@ -158,7 +158,7 @@
       }
 
       case loc::GotoLabelKind:
-        // Labels and non symbolic memory regions are always true.
+        // Labels and non-symbolic memory regions are always true.
         return makeTruthVal(true, castTy);
     }
   }
@@ -569,11 +569,10 @@
   // members and the units in which bit-fields reside have addresses that
   // increase in the order in which they are declared."
   bool leftFirst = (op == BO_LT || op == BO_LE);
-  for (RecordDecl::field_iterator I = RD->field_begin(),
-       E = RD->field_end(); I!=E; ++I) {
-    if (*I == LeftFD)
+  for (const auto *I : RD->fields()) {
+    if (I == LeftFD)
       return SVB.makeTruthVal(leftFirst, resultTy);
-    if (*I == RightFD)
+    if (I == RightFD)
       return SVB.makeTruthVal(!leftFirst, resultTy);
   }
 
diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 1b56f82..a04cb41 100644
--- a/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -326,11 +326,7 @@
 }
 
 SymbolManager::~SymbolManager() {
-  for (SymbolDependTy::const_iterator I = SymbolDependencies.begin(),
-       E = SymbolDependencies.end(); I != E; ++I) {
-    delete I->second;
-  }
-
+  llvm::DeleteContainerSeconds(SymbolDependencies);
 }
 
 bool SymbolManager::canSymbolicate(QualType T) {
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 9efe997..563924d 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -13,13 +13,13 @@
 
 #define DEBUG_TYPE "AnalysisConsumer"
 
-#include "AnalysisConsumer.h"
+#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
 #include "clang/AST/ASTConsumer.h"
+#include "clang/AST/DataRecursiveASTVisitor.h"
 #include "clang/AST/Decl.h"
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/DeclObjC.h"
 #include "clang/AST/ParentMap.h"
-#include "clang/AST/RecursiveASTVisitor.h"
 #include "clang/Analysis/Analyses/LiveVariables.h"
 #include "clang/Analysis/CFG.h"
 #include "clang/Analysis/CallGraph.h"
@@ -36,7 +36,6 @@
 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
 #include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
 #include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/PostOrderIterator.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/Statistic.h"
@@ -45,6 +44,7 @@
 #include "llvm/Support/Program.h"
 #include "llvm/Support/Timer.h"
 #include "llvm/Support/raw_ostream.h"
+#include <memory>
 #include <queue>
 
 using namespace clang;
@@ -90,12 +90,12 @@
   ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag)
     : Diag(Diag), IncludePath(false) {}
   virtual ~ClangDiagPathDiagConsumer() {}
-  virtual StringRef getName() const { return "ClangDiags"; }
+  StringRef getName() const override { return "ClangDiags"; }
 
-  virtual bool supportsLogicalOpControlFlow() const { return true; }
-  virtual bool supportsCrossFileDiagnostics() const { return true; }
+  bool supportsLogicalOpControlFlow() const override { return true; }
+  bool supportsCrossFileDiagnostics() const override { return true; }
 
-  virtual PathGenerationScheme getGenerationScheme() const {
+  PathGenerationScheme getGenerationScheme() const override {
     return IncludePath ? Minimal : None;
   }
 
@@ -103,35 +103,17 @@
     IncludePath = true;
   }
 
-  void emitDiag(SourceLocation L, unsigned DiagID,
-                ArrayRef<SourceRange> Ranges) {
-    DiagnosticBuilder DiagBuilder = Diag.Report(L, DiagID);
-
-    for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
-         I != E; ++I) {
-      DiagBuilder << *I;
-    }
-  }
-
   void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
-                            FilesMade *filesMade) {
+                            FilesMade *filesMade) override {
+    unsigned WarnID = Diag.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
+    unsigned NoteID = Diag.getCustomDiagID(DiagnosticsEngine::Note, "%0");
+
     for (std::vector<const PathDiagnostic*>::iterator I = Diags.begin(),
          E = Diags.end(); I != E; ++I) {
       const PathDiagnostic *PD = *I;
-      StringRef desc = PD->getShortDescription();
-      SmallString<512> TmpStr;
-      llvm::raw_svector_ostream Out(TmpStr);
-      for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) {
-        if (*I == '%')
-          Out << "%%";
-        else
-          Out << *I;
-      }
-      Out.flush();
-      unsigned ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning,
-                                                TmpStr);
-      SourceLocation L = PD->getLocation().asLocation();
-      emitDiag(L, ErrorDiag, PD->path.back()->getRanges());
+      SourceLocation WarnLoc = PD->getLocation().asLocation();
+      Diag.Report(WarnLoc, WarnID) << PD->getShortDescription()
+                                   << PD->path.back()->getRanges();
 
       if (!IncludePath)
         continue;
@@ -140,11 +122,9 @@
       for (PathPieces::const_iterator PI = FlatPath.begin(),
                                       PE = FlatPath.end();
            PI != PE; ++PI) {
-        unsigned NoteID = Diag.getCustomDiagID(DiagnosticsEngine::Note,
-                                               (*PI)->getString());
-
         SourceLocation NoteLoc = (*PI)->getLocation().asLocation();
-        emitDiag(NoteLoc, NoteID, (*PI)->getRanges());
+        Diag.Report(NoteLoc, NoteID) << (*PI)->getString()
+                                     << (*PI)->getRanges();
       }
     }
   }
@@ -157,8 +137,8 @@
 
 namespace {
 
-class AnalysisConsumer : public ASTConsumer,
-                         public RecursiveASTVisitor<AnalysisConsumer> {
+class AnalysisConsumer : public AnalysisASTConsumer,
+                         public DataRecursiveASTVisitor<AnalysisConsumer> {
   enum {
     AM_None = 0,
     AM_Syntax = 0x1,
@@ -191,8 +171,8 @@
   StoreManagerCreator CreateStoreMgr;
   ConstraintManagerCreator CreateConstraintMgr;
 
-  OwningPtr<CheckerManager> checkerMgr;
-  OwningPtr<AnalysisManager> Mgr;
+  std::unique_ptr<CheckerManager> checkerMgr;
+  std::unique_ptr<AnalysisManager> Mgr;
 
   /// Time the analyzes time of each translation unit.
   static llvm::Timer* TUTotalTimer;
@@ -220,21 +200,24 @@
   }
 
   void DigestAnalyzerOptions() {
-    // Create the PathDiagnosticConsumer.
-    ClangDiagPathDiagConsumer *clangDiags =
-      new ClangDiagPathDiagConsumer(PP.getDiagnostics());
-    PathConsumers.push_back(clangDiags);
+    if (Opts->AnalysisDiagOpt != PD_NONE) {
+      // Create the PathDiagnosticConsumer.
+      ClangDiagPathDiagConsumer *clangDiags =
+          new ClangDiagPathDiagConsumer(PP.getDiagnostics());
+      PathConsumers.push_back(clangDiags);
 
-    if (Opts->AnalysisDiagOpt == PD_TEXT) {
-      clangDiags->enablePaths();
+      if (Opts->AnalysisDiagOpt == PD_TEXT) {
+        clangDiags->enablePaths();
 
-    } else if (!OutDir.empty()) {
-      switch (Opts->AnalysisDiagOpt) {
-      default:
-#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN) \
-        case PD_##NAME: CREATEFN(*Opts.getPtr(), PathConsumers, OutDir, PP);\
-        break;
+      } else if (!OutDir.empty()) {
+        switch (Opts->AnalysisDiagOpt) {
+        default:
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN)                    \
+  case PD_##NAME:                                                              \
+    CREATEFN(*Opts.getPtr(), PathConsumers, OutDir, PP);                       \
+    break;
 #include "clang/StaticAnalyzer/Core/Analyses.def"
+        }
       }
     }
 
@@ -299,7 +282,7 @@
     }
   }
 
-  virtual void Initialize(ASTContext &Context) {
+  void Initialize(ASTContext &Context) override {
     Ctx = &Context;
     checkerMgr.reset(createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
                                           PP.getDiagnostics()));
@@ -315,10 +298,10 @@
 
   /// \brief Store the top level decls in the set to be processed later on.
   /// (Doing this pre-processing avoids deserialization of data from PCH.)
-  virtual bool HandleTopLevelDecl(DeclGroupRef D);
-  virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
+  bool HandleTopLevelDecl(DeclGroupRef D) override;
+  void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override;
 
-  virtual void HandleTranslationUnit(ASTContext &C);
+  void HandleTranslationUnit(ASTContext &C) override;
 
   /// \brief Determine which inlining mode should be used when this function is
   /// analyzed. This allows to redefine the default inlining policies when
@@ -389,6 +372,11 @@
     return true;
   }
 
+  virtual void
+  AddDiagnosticConsumer(PathDiagnosticConsumer *Consumer) override {
+    PathConsumers.push_back(Consumer);
+  }
+
 private:
   void storeTopLevelDecls(DeclGroupRef DG);
 
@@ -653,7 +641,7 @@
   ExprEngine Eng(*Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries,IMode);
 
   // Set the graph auditor.
-  OwningPtr<ExplodedNode::Auditor> Auditor;
+  std::unique_ptr<ExplodedNode::Auditor> Auditor;
   if (Mgr->options.visualizeExplodedGraphWithUbiGraph) {
     Auditor.reset(CreateUbiViz());
     ExplodedNode::SetAuditor(Auditor.get());
@@ -699,10 +687,10 @@
 // AnalysisConsumer creation.
 //===----------------------------------------------------------------------===//
 
-ASTConsumer* ento::CreateAnalysisConsumer(const Preprocessor& pp,
-                                          const std::string& outDir,
-                                          AnalyzerOptionsRef opts,
-                                          ArrayRef<std::string> plugins) {
+AnalysisASTConsumer *
+ento::CreateAnalysisConsumer(const Preprocessor &pp, const std::string &outDir,
+                             AnalyzerOptionsRef opts,
+                             ArrayRef<std::string> plugins) {
   // Disable the effects of '-Werror' when using the AnalysisConsumer.
   pp.getDiagnostics().setWarningsAsErrors(false);
 
@@ -716,7 +704,7 @@
 namespace {
 
 class UbigraphViz : public ExplodedNode::Auditor {
-  OwningPtr<raw_ostream> Out;
+  std::unique_ptr<raw_ostream> Out;
   std::string Filename;
   unsigned Cntr;
 
@@ -728,7 +716,7 @@
 
   ~UbigraphViz();
 
-  virtual void AddEdge(ExplodedNode *Src, ExplodedNode *Dst);
+  void AddEdge(ExplodedNode *Src, ExplodedNode *Dst) override;
 };
 
 } // end anonymous namespace
@@ -739,10 +727,10 @@
   llvm::sys::fs::createTemporaryFile("llvm_ubi", "", FD, P);
   llvm::errs() << "Writing '" << P.str() << "'.\n";
 
-  OwningPtr<llvm::raw_fd_ostream> Stream;
+  std::unique_ptr<llvm::raw_fd_ostream> Stream;
   Stream.reset(new llvm::raw_fd_ostream(FD, true));
 
-  return new UbigraphViz(Stream.take(), P);
+  return new UbigraphViz(Stream.release(), P);
 }
 
 void UbigraphViz::AddEdge(ExplodedNode *Src, ExplodedNode *Dst) {
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
deleted file mode 100644
index b75220b..0000000
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//===--- AnalysisConsumer.h - Front-end Analysis Engine Hooks ---*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This header contains the functions necessary for a front-end to run various
-// analyses.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_GR_ANALYSISCONSUMER_H
-#define LLVM_CLANG_GR_ANALYSISCONSUMER_H
-
-#include "clang/Basic/LLVM.h"
-#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
-#include <string>
-
-namespace clang {
-
-class ASTConsumer;
-class Preprocessor;
-class DiagnosticsEngine;
-
-namespace ento {
-class CheckerManager;
-
-/// CreateAnalysisConsumer - Creates an ASTConsumer to run various code
-/// analysis passes.  (The set of analyses run is controlled by command-line
-/// options.)
-ASTConsumer* CreateAnalysisConsumer(const Preprocessor &pp,
-                                    const std::string &output,
-                                    AnalyzerOptionsRef opts,
-                                    ArrayRef<std::string> plugins);
-
-} // end GR namespace
-
-} // end clang namespace
-
-#endif
diff --git a/lib/StaticAnalyzer/Frontend/Android.mk b/lib/StaticAnalyzer/Frontend/Android.mk
index 031c330..ccffdad 100644
--- a/lib/StaticAnalyzer/Frontend/Android.mk
+++ b/lib/StaticAnalyzer/Frontend/Android.mk
@@ -6,6 +6,7 @@
 clang_static_analyzer_frontend_TBLGEN_TABLES := \
   AttrList.inc \
   Attrs.inc \
+  AttrVisitor.inc \
   CommentCommandList.inc \
   CommentNodes.inc \
   DeclNodes.inc \
diff --git a/lib/StaticAnalyzer/Frontend/CMakeLists.txt b/lib/StaticAnalyzer/Frontend/CMakeLists.txt
index aafb249..5349ed9 100644
--- a/lib/StaticAnalyzer/Frontend/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Frontend/CMakeLists.txt
@@ -1,31 +1,19 @@
-set(LLVM_NO_RTTI 1)
-
 include_directories( ${CMAKE_CURRENT_BINARY_DIR}/../Checkers )
 
+set(LLVM_LINK_COMPONENTS
+  Support
+  )
+
 add_clang_library(clangStaticAnalyzerFrontend
   AnalysisConsumer.cpp
   CheckerRegistration.cpp
   FrontendActions.cpp
-  )
 
-add_dependencies(clangStaticAnalyzerFrontend
+  LINK_LIBS
+  clangAST
+  clangAnalysis
+  clangBasic
+  clangFrontend
   clangStaticAnalyzerCheckers
   clangStaticAnalyzerCore
-  ClangAttrClasses
-  ClangAttrList
-  ClangCommentNodes
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticFrontend
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangStaticAnalyzerFrontend
-  clangBasic
-  clangLex
-  clangAST
-  clangFrontend
-  clangRewriteCore
-  clangRewriteFrontend
-  clangStaticAnalyzerCheckers
   )
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index e7def08..d9d5bae 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -20,11 +20,11 @@
 #include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
 #include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
 #include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
-#include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/DynamicLibrary.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Support/raw_ostream.h"
+#include <memory>
 
 using namespace clang;
 using namespace ento;
@@ -104,8 +104,8 @@
                                            const LangOptions &langOpts,
                                            ArrayRef<std::string> plugins,
                                            DiagnosticsEngine &diags) {
-  OwningPtr<CheckerManager> checkerMgr(new CheckerManager(langOpts,
-                                                          &opts));
+  std::unique_ptr<CheckerManager> checkerMgr(
+      new CheckerManager(langOpts, &opts));
 
   SmallVector<CheckerOptInfo, 8> checkerOpts;
   for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
@@ -123,7 +123,7 @@
           << checkerOpts[i].getName();
   }
 
-  return checkerMgr.take();
+  return checkerMgr.release();
 }
 
 void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) {
diff --git a/lib/StaticAnalyzer/Frontend/FrontendActions.cpp b/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
index 13971af..aa38077 100644
--- a/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
+++ b/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
@@ -8,8 +8,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
-#include "AnalysisConsumer.h"
 #include "clang/Frontend/CompilerInstance.h"
+#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
 using namespace clang;
 using namespace ento;
 
diff --git a/lib/Tooling/CMakeLists.txt b/lib/Tooling/CMakeLists.txt
index d29e564..a1bf964 100644
--- a/lib/Tooling/CMakeLists.txt
+++ b/lib/Tooling/CMakeLists.txt
@@ -9,22 +9,13 @@
   Refactoring.cpp
   RefactoringCallbacks.cpp
   Tooling.cpp
-  )
 
-add_dependencies(clangTooling
-  ClangAttrClasses
-  ClangAttrList
-  ClangDeclNodes
-  ClangDiagnosticCommon
-  ClangDiagnosticFrontend
-  ClangStmtNodes
-  )
-
-target_link_libraries(clangTooling
-  clangBasic
-  clangFrontend
+  LINK_LIBS
   clangAST
   clangASTMatchers
+  clangBasic
+  clangDriver
+  clangFrontend
+  clangLex
   clangRewriteCore
-  clangRewriteFrontend
   )
diff --git a/lib/Tooling/CommonOptionsParser.cpp b/lib/Tooling/CommonOptionsParser.cpp
index cce4816..e0b844c 100644
--- a/lib/Tooling/CommonOptionsParser.cpp
+++ b/lib/Tooling/CommonOptionsParser.cpp
@@ -54,12 +54,26 @@
     "\n";
 
 CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv,
+                                         cl::OptionCategory &Category,
                                          const char *Overview) {
-  static cl::opt<std::string> BuildPath(
-      "p", cl::desc("Build path"), cl::Optional);
+  static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden);
+
+  static cl::opt<std::string> BuildPath("p", cl::desc("Build path"),
+                                        cl::Optional, cl::cat(Category));
 
   static cl::list<std::string> SourcePaths(
-      cl::Positional, cl::desc("<source0> [... <sourceN>]"), cl::OneOrMore);
+      cl::Positional, cl::desc("<source0> [... <sourceN>]"), cl::OneOrMore,
+      cl::cat(Category));
+
+  // Hide unrelated options.
+  StringMap<cl::Option*> Options;
+  cl::getRegisteredOptions(Options);
+  for (StringMap<cl::Option *>::iterator I = Options.begin(), E = Options.end();
+       I != E; ++I) {
+    if (I->second->Category != &Category && I->first() != "help" &&
+        I->first() != "version")
+      I->second->setHiddenFlag(cl::ReallyHidden);
+  }
 
   Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc,
                                                                    argv));
diff --git a/lib/Tooling/CompilationDatabase.cpp b/lib/Tooling/CompilationDatabase.cpp
index c962055..b513446 100644
--- a/lib/Tooling/CompilationDatabase.cpp
+++ b/lib/Tooling/CompilationDatabase.cpp
@@ -13,22 +13,21 @@
 //===----------------------------------------------------------------------===//
 
 #include "clang/Tooling/CompilationDatabase.h"
-#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
-#include "clang/Tooling/Tooling.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/system_error.h"
-#include <sstream>
-
 #include "clang/Basic/Diagnostic.h"
 #include "clang/Driver/Action.h"
+#include "clang/Driver/Compilation.h"
 #include "clang/Driver/Driver.h"
 #include "clang/Driver/DriverDiagnostic.h"
 #include "clang/Driver/Job.h"
-#include "clang/Driver/Compilation.h"
 #include "clang/Frontend/TextDiagnosticPrinter.h"
-#include "llvm/Support/Host.h"
+#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/SmallString.h"
 #include "llvm/Option/Arg.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
+#include <sstream>
 
 namespace clang {
 namespace tooling {
@@ -44,7 +43,7 @@
        Ie = CompilationDatabasePluginRegistry::end();
        It != Ie; ++It) {
     std::string DatabaseErrorMessage;
-    OwningPtr<CompilationDatabasePlugin> Plugin(It->instantiate());
+    std::unique_ptr<CompilationDatabasePlugin> Plugin(It->instantiate());
     if (CompilationDatabase *DB =
         Plugin->loadFromDirectory(BuildDirectory, DatabaseErrorMessage))
       return DB;
@@ -158,7 +157,7 @@
   UnusedInputDiagConsumer(DiagnosticConsumer *Other) : Other(Other) {}
 
   virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
-                                const Diagnostic &Info) LLVM_OVERRIDE {
+                                const Diagnostic &Info) override {
     if (Info.getID() == clang::diag::warn_drv_input_file_unused) {
       // Arg 1 for this diagnostic is the option that didn't get used.
       UnusedInputs.push_back(Info.getArgStdStr(0));
@@ -204,8 +203,8 @@
 ///          \li true if successful.
 ///          \li false if \c Args cannot be used for compilation jobs (e.g.
 ///          contains an option like -E or -version).
-bool stripPositionalArgs(std::vector<const char *> Args,
-                         std::vector<std::string> &Result) {
+static bool stripPositionalArgs(std::vector<const char *> Args,
+                                std::vector<std::string> &Result) {
   IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
   UnusedInputDiagConsumer DiagClient;
   DiagnosticsEngine Diagnostics(
@@ -214,7 +213,7 @@
 
   // Neither clang executable nor default image name are required since the
   // jobs the driver builds will not be executed.
-  OwningPtr<driver::Driver> NewDriver(new driver::Driver(
+  std::unique_ptr<driver::Driver> NewDriver(new driver::Driver(
       /* ClangExecutable= */ "", llvm::sys::getDefaultTargetTriple(),
       /* DefaultImageName= */ "", Diagnostics));
   NewDriver->setCheckInputsExist(false);
@@ -237,7 +236,12 @@
   // up with no jobs but then this is the user's fault.
   Args.push_back("placeholder.cpp");
 
-  const OwningPtr<driver::Compilation> Compilation(
+  // Remove -no-integrated-as; it's not used for syntax checking,
+  // and it confuses targets which don't support this option.
+  std::remove_if(Args.begin(), Args.end(),
+                 MatchesAny(std::string("-no-integrated-as")));
+
+  const std::unique_ptr<driver::Compilation> Compilation(
       NewDriver->BuildCompilation(Args));
 
   const driver::JobList &Jobs = Compilation->getJobs();
@@ -271,6 +275,7 @@
   End = std::remove_if(Args.begin(), End, MatchesAny(DiagClient.UnusedInputs));
 
   // Remove the -c add above as well. It will be at the end right now.
+  assert(strcmp(*(End - 1), "-c") == 0);
   --End;
 
   Result = std::vector<std::string>(Args.begin() + 1, End);
@@ -298,7 +303,8 @@
   std::vector<std::string> ToolCommandLine(1, "clang-tool");
   ToolCommandLine.insert(ToolCommandLine.end(),
                          CommandLine.begin(), CommandLine.end());
-  CompileCommands.push_back(CompileCommand(Directory, ToolCommandLine));
+  CompileCommands.push_back(
+      CompileCommand(Directory, std::move(ToolCommandLine)));
 }
 
 std::vector<CompileCommand>
diff --git a/lib/Tooling/FileMatchTrie.cpp b/lib/Tooling/FileMatchTrie.cpp
index 89979a8..dc9999e 100644
--- a/lib/Tooling/FileMatchTrie.cpp
+++ b/lib/Tooling/FileMatchTrie.cpp
@@ -24,7 +24,7 @@
 /// \brief Default \c PathComparator using \c llvm::sys::fs::equivalent().
 struct DefaultPathComparator : public PathComparator {
   virtual ~DefaultPathComparator() {}
-  virtual bool equivalent(StringRef FileA, StringRef FileB) const {
+  bool equivalent(StringRef FileA, StringRef FileB) const override {
     return FileA == FileB || llvm::sys::fs::equivalent(FileA, FileB);
   }
 };
diff --git a/lib/Tooling/JSONCompilationDatabase.cpp b/lib/Tooling/JSONCompilationDatabase.cpp
index 1e33b41..5fe0980 100644
--- a/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/lib/Tooling/JSONCompilationDatabase.cpp
@@ -118,15 +118,15 @@
 }
 
 class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
-  virtual CompilationDatabase *loadFromDirectory(
-      StringRef Directory, std::string &ErrorMessage) {
+  CompilationDatabase *loadFromDirectory(StringRef Directory,
+                                         std::string &ErrorMessage) override {
     SmallString<1024> JSONDatabasePath(Directory);
     llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
-    OwningPtr<CompilationDatabase> Database(
+    std::unique_ptr<CompilationDatabase> Database(
         JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage));
     if (!Database)
       return NULL;
-    return Database.take();
+    return Database.release();
   }
 };
 
@@ -144,37 +144,37 @@
 JSONCompilationDatabase *
 JSONCompilationDatabase::loadFromFile(StringRef FilePath,
                                       std::string &ErrorMessage) {
-  OwningPtr<llvm::MemoryBuffer> DatabaseBuffer;
+  std::unique_ptr<llvm::MemoryBuffer> DatabaseBuffer;
   llvm::error_code Result =
     llvm::MemoryBuffer::getFile(FilePath, DatabaseBuffer);
   if (Result != 0) {
     ErrorMessage = "Error while opening JSON database: " + Result.message();
     return NULL;
   }
-  OwningPtr<JSONCompilationDatabase> Database(
-    new JSONCompilationDatabase(DatabaseBuffer.take()));
+  std::unique_ptr<JSONCompilationDatabase> Database(
+      new JSONCompilationDatabase(DatabaseBuffer.release()));
   if (!Database->parse(ErrorMessage))
     return NULL;
-  return Database.take();
+  return Database.release();
 }
 
 JSONCompilationDatabase *
 JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
                                         std::string &ErrorMessage) {
-  OwningPtr<llvm::MemoryBuffer> DatabaseBuffer(
+  std::unique_ptr<llvm::MemoryBuffer> DatabaseBuffer(
       llvm::MemoryBuffer::getMemBuffer(DatabaseString));
-  OwningPtr<JSONCompilationDatabase> Database(
-      new JSONCompilationDatabase(DatabaseBuffer.take()));
+  std::unique_ptr<JSONCompilationDatabase> Database(
+      new JSONCompilationDatabase(DatabaseBuffer.release()));
   if (!Database->parse(ErrorMessage))
     return NULL;
-  return Database.take();
+  return Database.release();
 }
 
 std::vector<CompileCommand>
 JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
   SmallString<128> NativeFilePath;
   llvm::sys::path::native(FilePath, NativeFilePath);
-  std::vector<StringRef> PossibleMatches;
+
   std::string Error;
   llvm::raw_string_ostream ES(Error);
   StringRef Match = MatchTrie.findEquivalent(NativeFilePath.str(), ES);
diff --git a/lib/Tooling/Refactoring.cpp b/lib/Tooling/Refactoring.cpp
index e165c12..df9600e 100644
--- a/lib/Tooling/Refactoring.cpp
+++ b/lib/Tooling/Refactoring.cpp
@@ -18,9 +18,9 @@
 #include "clang/Lex/Lexer.h"
 #include "clang/Rewrite/Core/Rewriter.h"
 #include "clang/Tooling/Refactoring.h"
-#include "llvm/Support/raw_os_ostream.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/Path.h"
+#include "llvm/Support/raw_os_ostream.h"
 
 namespace clang {
 namespace tooling {
@@ -35,12 +35,13 @@
     : FilePath(FilePath), ReplacementRange(Offset, Length),
       ReplacementText(ReplacementText) {}
 
-Replacement::Replacement(SourceManager &Sources, SourceLocation Start,
+Replacement::Replacement(const SourceManager &Sources, SourceLocation Start,
                          unsigned Length, StringRef ReplacementText) {
   setFromSourceLocation(Sources, Start, Length, ReplacementText);
 }
 
-Replacement::Replacement(SourceManager &Sources, const CharSourceRange &Range,
+Replacement::Replacement(const SourceManager &Sources,
+                         const CharSourceRange &Range,
                          StringRef ReplacementText) {
   setFromSourceRange(Sources, Range, ReplacementText);
 }
@@ -99,7 +100,7 @@
          LHS.getReplacementText() == RHS.getReplacementText();
 }
 
-void Replacement::setFromSourceLocation(SourceManager &Sources,
+void Replacement::setFromSourceLocation(const SourceManager &Sources,
                                         SourceLocation Start, unsigned Length,
                                         StringRef ReplacementText) {
   const std::pair<FileID, unsigned> DecomposedLocation =
@@ -121,7 +122,8 @@
 // FIXME: This should go into the Lexer, but we need to figure out how
 // to handle ranges for refactoring in general first - there is no obvious
 // good way how to integrate this into the Lexer yet.
-static int getRangeSize(SourceManager &Sources, const CharSourceRange &Range) {
+static int getRangeSize(const SourceManager &Sources,
+                        const CharSourceRange &Range) {
   SourceLocation SpellingBegin = Sources.getSpellingLoc(Range.getBegin());
   SourceLocation SpellingEnd = Sources.getSpellingLoc(Range.getEnd());
   std::pair<FileID, unsigned> Start = Sources.getDecomposedLoc(SpellingBegin);
@@ -133,7 +135,7 @@
   return End.second - Start.second;
 }
 
-void Replacement::setFromSourceRange(SourceManager &Sources,
+void Replacement::setFromSourceRange(const SourceManager &Sources,
                                      const CharSourceRange &Range,
                                      StringRef ReplacementText) {
   setFromSourceLocation(Sources, Sources.getSpellingLoc(Range.getBegin()),
diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp
index a58854d..7425e3b 100644
--- a/lib/Tooling/Tooling.cpp
+++ b/lib/Tooling/Tooling.cpp
@@ -24,6 +24,7 @@
 #include "clang/Tooling/ArgumentsAdjusters.h"
 #include "clang/Tooling/CompilationDatabase.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/config.h"
 #include "llvm/Option/Option.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/FileSystem.h"
@@ -31,7 +32,7 @@
 #include "llvm/Support/raw_ostream.h"
 
 // For chdir, see the comment in ClangTool::run for more information.
-#ifdef _WIN32
+#ifdef LLVM_ON_WIN32
 #  include <direct.h>
 #else
 #  include <unistd.h>
@@ -51,7 +52,7 @@
 /// \brief Builds a clang driver initialized for running clang tools.
 static clang::driver::Driver *newDriver(clang::DiagnosticsEngine *Diagnostics,
                                         const char *BinaryName) {
-  const std::string DefaultOutputName = "a.out";
+  const char *DefaultOutputName = "a.out";
   clang::driver::Driver *CompilerDriver = new clang::driver::Driver(
     BinaryName, llvm::sys::getDefaultTargetTriple(),
     DefaultOutputName, *Diagnostics);
@@ -99,6 +100,7 @@
       *Diagnostics);
   Invocation->getFrontendOpts().DisableFree = false;
   Invocation->getCodeGenOpts().DisableFree = false;
+  Invocation->getDependencyOutputOpts() = DependencyOutputOptions();
   return Invocation;
 }
 
@@ -158,22 +160,22 @@
 public:
   SingleFrontendActionFactory(FrontendAction *Action) : Action(Action) {}
 
-  FrontendAction *create() { return Action; }
+  FrontendAction *create() override { return Action; }
 };
 
 }
 
-ToolInvocation::ToolInvocation(ArrayRef<std::string> CommandLine,
+ToolInvocation::ToolInvocation(std::vector<std::string> CommandLine,
                                ToolAction *Action, FileManager *Files)
-    : CommandLine(CommandLine.vec()),
+    : CommandLine(std::move(CommandLine)),
       Action(Action),
       OwnsAction(false),
       Files(Files),
       DiagConsumer(NULL) {}
 
-ToolInvocation::ToolInvocation(ArrayRef<std::string> CommandLine,
+ToolInvocation::ToolInvocation(std::vector<std::string> CommandLine,
                                FrontendAction *FAction, FileManager *Files)
-    : CommandLine(CommandLine.vec()),
+    : CommandLine(std::move(CommandLine)),
       Action(new SingleFrontendActionFactory(FAction)),
       OwnsAction(true),
       Files(Files),
@@ -196,8 +198,8 @@
 
 bool ToolInvocation::run() {
   std::vector<const char*> Argv;
-  for (int I = 0, E = CommandLine.size(); I != E; ++I)
-    Argv.push_back(CommandLine[I].c_str());
+  for (const std::string &Str : CommandLine)
+    Argv.push_back(Str.c_str());
   const char *const BinaryName = Argv[0];
   IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
   TextDiagnosticPrinter DiagnosticPrinter(
@@ -206,28 +208,25 @@
       IntrusiveRefCntPtr<clang::DiagnosticIDs>(new DiagnosticIDs()), &*DiagOpts,
       DiagConsumer ? DiagConsumer : &DiagnosticPrinter, false);
 
-  const OwningPtr<clang::driver::Driver> Driver(
+  const std::unique_ptr<clang::driver::Driver> Driver(
       newDriver(&Diagnostics, BinaryName));
   // Since the input might only be virtual, don't check whether it exists.
   Driver->setCheckInputsExist(false);
-  const OwningPtr<clang::driver::Compilation> Compilation(
+  const std::unique_ptr<clang::driver::Compilation> Compilation(
       Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
   const llvm::opt::ArgStringList *const CC1Args = getCC1Arguments(
       &Diagnostics, Compilation.get());
   if (CC1Args == NULL) {
     return false;
   }
-  OwningPtr<clang::CompilerInvocation> Invocation(
+  std::unique_ptr<clang::CompilerInvocation> Invocation(
       newInvocation(&Diagnostics, *CC1Args));
-  for (llvm::StringMap<StringRef>::const_iterator
-           It = MappedFileContents.begin(), End = MappedFileContents.end();
-       It != End; ++It) {
+  for (const auto &It : MappedFileContents) {
     // Inject the code as the given file name into the preprocessor options.
-    const llvm::MemoryBuffer *Input =
-        llvm::MemoryBuffer::getMemBuffer(It->getValue());
-    Invocation->getPreprocessorOpts().addRemappedFile(It->getKey(), Input);
+    auto *Input = llvm::MemoryBuffer::getMemBuffer(It.getValue());
+    Invocation->getPreprocessorOpts().addRemappedFile(It.getKey(), Input);
   }
-  return runInvocation(BinaryName, Compilation.get(), Invocation.take());
+  return runInvocation(BinaryName, Compilation.get(), Invocation.release());
 }
 
 bool ToolInvocation::runInvocation(
@@ -254,8 +253,8 @@
 
   // The FrontendAction can have lifetime requirements for Compiler or its
   // members, and we need to ensure it's deleted earlier than Compiler. So we
-  // pass it to an OwningPtr declared after the Compiler variable.
-  OwningPtr<FrontendAction> ScopedToolAction(create());
+  // pass it to an std::unique_ptr declared after the Compiler variable.
+  std::unique_ptr<FrontendAction> ScopedToolAction(create());
 
   // Create the compilers actual diagnostics engine.
   Compiler.createDiagnostics(DiagConsumer, /*ShouldOwnClient=*/false);
@@ -275,15 +274,15 @@
     : Files(new FileManager(FileSystemOptions())), DiagConsumer(NULL) {
   ArgsAdjusters.push_back(new ClangStripOutputAdjuster());
   ArgsAdjusters.push_back(new ClangSyntaxOnlyAdjuster());
-  for (unsigned I = 0, E = SourcePaths.size(); I != E; ++I) {
-    SmallString<1024> File(getAbsolutePath(SourcePaths[I]));
+  for (const auto &SourcePath : SourcePaths) {
+    std::string File(getAbsolutePath(SourcePath));
 
     std::vector<CompileCommand> CompileCommandsForFile =
-      Compilations.getCompileCommands(File.str());
+      Compilations.getCompileCommands(File);
     if (!CompileCommandsForFile.empty()) {
-      for (int I = 0, E = CompileCommandsForFile.size(); I != E; ++I) {
-        CompileCommands.push_back(std::make_pair(File.str(),
-                                  CompileCommandsForFile[I]));
+      for (CompileCommand &CompileCommand : CompileCommandsForFile) {
+        CompileCommands.push_back(
+            std::make_pair(File, std::move(CompileCommand)));
       }
     } else {
       // FIXME: There are two use cases here: doing a fuzzy
@@ -332,8 +331,7 @@
       llvm::sys::fs::getMainExecutable("clang_tool", &StaticSymbol);
 
   bool ProcessingFailed = false;
-  for (unsigned I = 0; I < CompileCommands.size(); ++I) {
-    std::string File = CompileCommands[I].first;
+  for (const auto &Command : CompileCommands) {
     // FIXME: chdir is thread hostile; on the other hand, creating the same
     // behavior as chdir is complex: chdir resolves the path once, thus
     // guaranteeing that all subsequent relative path operations work
@@ -341,28 +339,27 @@
     // for example on network filesystems, where symlinks might be switched
     // during runtime of the tool. Fixing this depends on having a file system
     // abstraction that allows openat() style interactions.
-    if (chdir(CompileCommands[I].second.Directory.c_str()))
+    if (chdir(Command.second.Directory.c_str()))
       llvm::report_fatal_error("Cannot chdir into \"" +
-                               CompileCommands[I].second.Directory + "\n!");
-    std::vector<std::string> CommandLine = CompileCommands[I].second.CommandLine;
-    for (unsigned I = 0, E = ArgsAdjusters.size(); I != E; ++I)
-      CommandLine = ArgsAdjusters[I]->Adjust(CommandLine);
+                               Twine(Command.second.Directory) + "\n!");
+    std::vector<std::string> CommandLine = Command.second.CommandLine;
+    for (ArgumentsAdjuster *Adjuster : ArgsAdjusters)
+      CommandLine = Adjuster->Adjust(CommandLine);
     assert(!CommandLine.empty());
     CommandLine[0] = MainExecutable;
     // FIXME: We need a callback mechanism for the tool writer to output a
     // customized message for each file.
     DEBUG({
-      llvm::dbgs() << "Processing: " << File << ".\n";
+      llvm::dbgs() << "Processing: " << Command.first << ".\n";
     });
-    ToolInvocation Invocation(CommandLine, Action, Files.getPtr());
+    ToolInvocation Invocation(std::move(CommandLine), Action, Files.getPtr());
     Invocation.setDiagnosticConsumer(DiagConsumer);
-    for (int I = 0, E = MappedFileContents.size(); I != E; ++I) {
-      Invocation.mapVirtualFile(MappedFileContents[I].first,
-                                MappedFileContents[I].second);
+    for (const auto &MappedFile : MappedFileContents) {
+      Invocation.mapVirtualFile(MappedFile.first, MappedFile.second);
     }
     if (!Invocation.run()) {
       // FIXME: Diagnostics should be used instead.
-      llvm::errs() << "Error while processing " << File << ".\n";
+      llvm::errs() << "Error while processing " << Command.first << ".\n";
       ProcessingFailed = true;
     }
   }
@@ -378,7 +375,7 @@
   ASTBuilderAction(std::vector<ASTUnit *> &ASTs) : ASTs(ASTs) {}
 
   bool runInvocation(CompilerInvocation *Invocation, FileManager *Files,
-                     DiagnosticConsumer *DiagConsumer) {
+                     DiagnosticConsumer *DiagConsumer) override {
     // FIXME: This should use the provided FileManager.
     ASTUnit *AST = ASTUnit::LoadFromCompilerInvocation(
         Invocation, CompilerInstance::createDiagnostics(
diff --git a/lib/include b/lib/include
deleted file mode 120000
index 74ea861..0000000
--- a/lib/include
+++ /dev/null
@@ -1 +0,0 @@
-Headers/
\ No newline at end of file