Update aosp/master Clang for rebase to r222490.

Change-Id: Ic557ac55e97fbf6ee08771c7b7c3594777b0aefd
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index 90d4b13..be66f32 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -69,8 +69,9 @@
                                                        bool addTemporaryDtors,
                                                        bool synthesizeBodies,
                                                        bool addStaticInitBranch,
-                                                       bool addCXXNewAllocator)
-  : SynthesizeBodies(synthesizeBodies)
+                                                       bool addCXXNewAllocator,
+                                                       CodeInjector *injector)
+  : Injector(injector), SynthesizeBodies(synthesizeBodies)
 {
   cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
   cfgBuildOptions.AddImplicitDtors = addImplicitDtors;
@@ -84,8 +85,8 @@
   llvm::DeleteContainerSeconds(Contexts);
 }
 
-static BodyFarm &getBodyFarm(ASTContext &C) {
-  static BodyFarm *BF = new BodyFarm(C);
+static BodyFarm &getBodyFarm(ASTContext &C, CodeInjector *injector = nullptr) {
+  static BodyFarm *BF = new BodyFarm(C, injector);
   return *BF;
 }
 
@@ -94,7 +95,7 @@
   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
     Stmt *Body = FD->getBody();
     if (!Body && Manager && Manager->synthesizeBodies()) {
-      Body = getBodyFarm(getASTContext()).getBody(FD);
+      Body = getBodyFarm(getASTContext(), Manager->Injector.get()).getBody(FD);
       if (Body)
         IsAutosynthesized = true;
     }
@@ -103,7 +104,7 @@
   else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
     Stmt *Body = MD->getBody();
     if (!Body && Manager && Manager->synthesizeBodies()) {
-      Body = getBodyFarm(getASTContext()).getBody(MD);
+      Body = getBodyFarm(getASTContext(), Manager->Injector.get()).getBody(MD);
       if (Body)
         IsAutosynthesized = true;
     }
@@ -128,6 +129,13 @@
   return Tmp;
 }
 
+bool AnalysisDeclContext::isBodyAutosynthesizedFromModelFile() const {
+  bool Tmp;
+  Stmt *Body = getBody(Tmp);
+  return Tmp && Body->getLocStart().isValid();
+}
+
+
 const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
   if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
     return MD->getSelfDecl();
@@ -181,8 +189,7 @@
     return getUnoptimizedCFG();
 
   if (!builtCFG) {
-    cfg.reset(CFG::buildCFG(D, getBody(),
-                            &D->getASTContext(), cfgBuildOptions));
+    cfg = CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
     // Even when the cfg is not successfully built, we don't
     // want to try building it again.
     builtCFG = true;
@@ -200,8 +207,8 @@
   if (!builtCompleteCFG) {
     SaveAndRestore<bool> NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges,
                                   false);
-    completeCFG.reset(CFG::buildCFG(D, getBody(), &D->getASTContext(),
-                                    cfgBuildOptions));
+    completeCFG =
+        CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
     // Even when the cfg is not successfully built, we don't
     // want to try building it again.
     builtCompleteCFG = true;
@@ -474,7 +481,7 @@
     // Non-local variables are also directly modified.
     if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
       if (!VD->hasLocalStorage()) {
-        if (Visited.insert(VD))
+        if (Visited.insert(VD).second)
           BEVals.push_back(VD, BC);
       }
     }
diff --git a/lib/Analysis/Android.mk b/lib/Analysis/Android.mk
index cde3e31..4c6cc51 100644
--- a/lib/Analysis/Android.mk
+++ b/lib/Analysis/Android.mk
@@ -21,6 +21,7 @@
   CFGReachabilityAnalysis.cpp \
   CFGStmtMap.cpp \
   CocoaConventions.cpp \
+  CodeInjector.cpp \
   Consumed.cpp \
   Dominators.cpp \
   FormatString.cpp \
@@ -32,7 +33,10 @@
   PseudoConstantAnalysis.cpp \
   ReachableCode.cpp \
   ScanfFormatString.cpp \
+  ThreadSafetyCommon.cpp \
   ThreadSafety.cpp \
+  ThreadSafetyLogical.cpp \
+  ThreadSafetyTIL.cpp \
   UninitializedValues.cpp
 
 # For the host
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index 316a18b..23286b2 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "BodyFarm.h"
+#include "clang/Analysis/CodeInjector.h"
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/Decl.h"
 #include "clang/AST/Expr.h"
@@ -223,10 +224,8 @@
        PredicateTy);
   
   // (3) Create the compound statement.
-  Stmt *Stmts[2];
-  Stmts[0] = B;
-  Stmts[1] = CE;
-  CompoundStmt *CS = M.makeCompound(ArrayRef<Stmt*>(Stmts, 2));
+  Stmt *Stmts[] = { B, CE };
+  CompoundStmt *CS = M.makeCompound(Stmts);
   
   // (4) Create the 'if' condition.
   ImplicitCastExpr *LValToRval =
@@ -337,7 +336,7 @@
   Expr *RetVal = isBoolean ? M.makeIntegralCastToBoolean(BoolVal)
                            : M.makeIntegralCast(BoolVal, ResultTy);
   Stmts[1] = M.makeReturn(RetVal);
-  CompoundStmt *Body = M.makeCompound(ArrayRef<Stmt*>(Stmts, 2));
+  CompoundStmt *Body = M.makeCompound(Stmts);
   
   // Construct the else clause.
   BoolVal = M.makeObjCBool(false);
@@ -383,6 +382,7 @@
   }
   
   if (FF) { Val = FF(C, D); }
+  else if (Injector) { Val = Injector->getBody(D); }
   return Val.getValue();
 }
 
diff --git a/lib/Analysis/BodyFarm.h b/lib/Analysis/BodyFarm.h
index 2d200fb..9137943 100644
--- a/lib/Analysis/BodyFarm.h
+++ b/lib/Analysis/BodyFarm.h
@@ -12,8 +12,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef LLVM_CLANG_ANALYSIS_BODYFARM_H
-#define LLVM_CLANG_ANALYSIS_BODYFARM_H
+#ifndef LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
+#define LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
 
 #include "clang/Basic/LLVM.h"
 #include "llvm/ADT/DenseMap.h"
@@ -27,10 +27,11 @@
 class ObjCMethodDecl;
 class ObjCPropertyDecl;
 class Stmt;
+class CodeInjector;
   
 class BodyFarm {
 public:
-  BodyFarm(ASTContext &C) : C(C) {}
+  BodyFarm(ASTContext &C, CodeInjector *injector) : C(C), Injector(injector) {}
   
   /// Factory method for creating bodies for ordinary functions.
   Stmt *getBody(const FunctionDecl *D);
@@ -43,6 +44,7 @@
 
   ASTContext &C;
   BodyMap Bodies;
+  CodeInjector *Injector;
 };
 }
 
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index d6361e8..d9073aa 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -234,6 +234,12 @@
   }
 };
 
+TryResult bothKnownTrue(TryResult R1, TryResult R2) {
+  if (!R1.isKnown() || !R2.isKnown())
+    return TryResult();
+  return TryResult(R1.isTrue() && R2.isTrue());
+}
+
 class reverse_children {
   llvm::SmallVector<Stmt *, 12> childrenBuf;
   ArrayRef<Stmt*> children;
@@ -300,7 +306,7 @@
   CFGBlock *SwitchTerminatedBlock;
   CFGBlock *DefaultCaseBlock;
   CFGBlock *TryTerminatedBlock;
-  
+
   // Current position in local scope.
   LocalScope::const_iterator ScopePos;
 
@@ -343,7 +349,7 @@
       cachedEntry(nullptr), lastLookup(nullptr) {}
 
   // buildCFG - Used by external clients to construct the CFG.
-  CFG* buildCFG(const Decl *D, Stmt *Statement);
+  std::unique_ptr<CFG> buildCFG(const Decl *D, Stmt *Statement);
 
   bool alwaysAdd(const Stmt *stmt);
   
@@ -410,16 +416,80 @@
   CFGBlock *VisitChildren(Stmt *S);
   CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
 
+  /// When creating the CFG for temporary destructors, we want to mirror the
+  /// branch structure of the corresponding constructor calls.
+  /// Thus, while visiting a statement for temporary destructors, we keep a
+  /// context to keep track of the following information:
+  /// - whether a subexpression is executed unconditionally
+  /// - if a subexpression is executed conditionally, the first
+  ///   CXXBindTemporaryExpr we encounter in that subexpression (which
+  ///   corresponds to the last temporary destructor we have to call for this
+  ///   subexpression) and the CFG block at that point (which will become the
+  ///   successor block when inserting the decision point).
+  ///
+  /// That way, we can build the branch structure for temporary destructors as
+  /// follows:
+  /// 1. If a subexpression is executed unconditionally, we add the temporary
+  ///    destructor calls to the current block.
+  /// 2. If a subexpression is executed conditionally, when we encounter a
+  ///    CXXBindTemporaryExpr:
+  ///    a) If it is the first temporary destructor call in the subexpression,
+  ///       we remember the CXXBindTemporaryExpr and the current block in the
+  ///       TempDtorContext; we start a new block, and insert the temporary
+  ///       destructor call.
+  ///    b) Otherwise, add the temporary destructor call to the current block.
+  ///  3. When we finished visiting a conditionally executed subexpression,
+  ///     and we found at least one temporary constructor during the visitation
+  ///     (2.a has executed), we insert a decision block that uses the
+  ///     CXXBindTemporaryExpr as terminator, and branches to the current block
+  ///     if the CXXBindTemporaryExpr was marked executed, and otherwise
+  ///     branches to the stored successor.
+  struct TempDtorContext {
+    TempDtorContext()
+        : IsConditional(false), KnownExecuted(true), Succ(nullptr),
+          TerminatorExpr(nullptr) {}
+
+    TempDtorContext(TryResult KnownExecuted)
+        : IsConditional(true), KnownExecuted(KnownExecuted), Succ(nullptr),
+          TerminatorExpr(nullptr) {}
+
+    /// Returns whether we need to start a new branch for a temporary destructor
+    /// call. This is the case when the the temporary destructor is
+    /// conditionally executed, and it is the first one we encounter while
+    /// visiting a subexpression - other temporary destructors at the same level
+    /// will be added to the same block and are executed under the same
+    /// condition.
+    bool needsTempDtorBranch() const {
+      return IsConditional && !TerminatorExpr;
+    }
+
+    /// Remember the successor S of a temporary destructor decision branch for
+    /// the corresponding CXXBindTemporaryExpr E.
+    void setDecisionPoint(CFGBlock *S, CXXBindTemporaryExpr *E) {
+      Succ = S;
+      TerminatorExpr = E;
+    }
+
+    const bool IsConditional;
+    const TryResult KnownExecuted;
+    CFGBlock *Succ;
+    CXXBindTemporaryExpr *TerminatorExpr;
+  };
+
   // Visitors to walk an AST and generate destructors of temporaries in
   // full expression.
-  CFGBlock *VisitForTemporaryDtors(Stmt *E, bool BindToTemporary = false);
-  CFGBlock *VisitChildrenForTemporaryDtors(Stmt *E);
-  CFGBlock *VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E);
-  CFGBlock *VisitCXXBindTemporaryExprForTemporaryDtors(CXXBindTemporaryExpr *E,
-      bool BindToTemporary);
-  CFGBlock *
-  VisitConditionalOperatorForTemporaryDtors(AbstractConditionalOperator *E,
-                                            bool BindToTemporary);
+  CFGBlock *VisitForTemporaryDtors(Stmt *E, bool BindToTemporary,
+                                   TempDtorContext &Context);
+  CFGBlock *VisitChildrenForTemporaryDtors(Stmt *E, TempDtorContext &Context);
+  CFGBlock *VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E,
+                                                 TempDtorContext &Context);
+  CFGBlock *VisitCXXBindTemporaryExprForTemporaryDtors(
+      CXXBindTemporaryExpr *E, bool BindToTemporary, TempDtorContext &Context);
+  CFGBlock *VisitConditionalOperatorForTemporaryDtors(
+      AbstractConditionalOperator *E, bool BindToTemporary,
+      TempDtorContext &Context);
+  void InsertTempDtorDecisionBlock(const TempDtorContext &Context,
+                                   CFGBlock *FalseSucc = nullptr);
 
   // NYS == Not Yet Supported
   CFGBlock *NYS() {
@@ -901,7 +971,7 @@
 ///  body (compound statement).  The ownership of the returned CFG is
 ///  transferred to the caller.  If CFG construction fails, this method returns
 ///  NULL.
-CFG* CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
+std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
   assert(cfg.get());
   if (!Statement)
     return nullptr;
@@ -973,7 +1043,7 @@
   // Create an empty entry block that has no predecessors.
   cfg->setEntry(createBlock());
 
-  return cfg.release();
+  return std::move(cfg);
 }
 
 /// createBlock - Used to lazily create blocks that are connected
@@ -1000,21 +1070,19 @@
   if (!BuildOpts.AddInitializers)
     return Block;
 
-  bool IsReference = false;
   bool HasTemporaries = false;
 
   // Destructors of temporaries in initialization expression should be called
   // after initialization finishes.
   Expr *Init = I->getInit();
   if (Init) {
-    if (FieldDecl *FD = I->getAnyMember())
-      IsReference = FD->getType()->isReferenceType();
     HasTemporaries = isa<ExprWithCleanups>(Init);
 
     if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
       // Generate destructors for temporaries in initialization expression.
+      TempDtorContext Context;
       VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
-          IsReference);
+                             /*BindToTemporary=*/false, Context);
     }
   }
 
@@ -1946,7 +2014,6 @@
     return Block;
   }
 
-  bool IsReference = false;
   bool HasTemporaries = false;
 
   // Guard static initializers under a branch.
@@ -1968,13 +2035,13 @@
   // after initialization finishes.
   Expr *Init = VD->getInit();
   if (Init) {
-    IsReference = VD->getType()->isReferenceType();
     HasTemporaries = isa<ExprWithCleanups>(Init);
 
     if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
       // Generate destructors for temporaries in initialization expression.
+      TempDtorContext Context;
       VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
-          IsReference);
+                             /*BindToTemporary=*/false, Context);
     }
   }
 
@@ -3354,7 +3421,8 @@
   if (BuildOpts.AddTemporaryDtors) {
     // If adding implicit destructors visit the full expression for adding
     // destructors of temporaries.
-    VisitForTemporaryDtors(E->getSubExpr());
+    TempDtorContext Context;
+    VisitForTemporaryDtors(E->getSubExpr(), false, Context);
 
     // Full expression has to be added as CFGStmt so it will be sequenced
     // before destructors of it's temporaries.
@@ -3463,7 +3531,8 @@
   return addStmt(I->getTarget());
 }
 
-CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary) {
+CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary,
+                                             TempDtorContext &Context) {
   assert(BuildOpts.AddImplicitDtors && BuildOpts.AddTemporaryDtors);
 
 tryAgain:
@@ -3473,36 +3542,88 @@
   }
   switch (E->getStmtClass()) {
     default:
-      return VisitChildrenForTemporaryDtors(E);
+      return VisitChildrenForTemporaryDtors(E, Context);
 
     case Stmt::BinaryOperatorClass:
-      return VisitBinaryOperatorForTemporaryDtors(cast<BinaryOperator>(E));
+      return VisitBinaryOperatorForTemporaryDtors(cast<BinaryOperator>(E),
+                                                  Context);
 
     case Stmt::CXXBindTemporaryExprClass:
       return VisitCXXBindTemporaryExprForTemporaryDtors(
-          cast<CXXBindTemporaryExpr>(E), BindToTemporary);
+          cast<CXXBindTemporaryExpr>(E), BindToTemporary, Context);
 
     case Stmt::BinaryConditionalOperatorClass:
     case Stmt::ConditionalOperatorClass:
       return VisitConditionalOperatorForTemporaryDtors(
-          cast<AbstractConditionalOperator>(E), BindToTemporary);
+          cast<AbstractConditionalOperator>(E), BindToTemporary, Context);
 
     case Stmt::ImplicitCastExprClass:
       // For implicit cast we want BindToTemporary to be passed further.
       E = cast<CastExpr>(E)->getSubExpr();
       goto tryAgain;
 
+    case Stmt::CXXFunctionalCastExprClass:
+      // For functional cast we want BindToTemporary to be passed further.
+      E = cast<CXXFunctionalCastExpr>(E)->getSubExpr();
+      goto tryAgain;
+
     case Stmt::ParenExprClass:
       E = cast<ParenExpr>(E)->getSubExpr();
       goto tryAgain;
-      
-    case Stmt::MaterializeTemporaryExprClass:
-      E = cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr();
+
+    case Stmt::MaterializeTemporaryExprClass: {
+      const MaterializeTemporaryExpr* MTE = cast<MaterializeTemporaryExpr>(E);
+      BindToTemporary = (MTE->getStorageDuration() != SD_FullExpression);
+      SmallVector<const Expr *, 2> CommaLHSs;
+      SmallVector<SubobjectAdjustment, 2> Adjustments;
+      // Find the expression whose lifetime needs to be extended.
+      E = const_cast<Expr *>(
+          cast<MaterializeTemporaryExpr>(E)
+              ->GetTemporaryExpr()
+              ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments));
+      // Visit the skipped comma operator left-hand sides for other temporaries.
+      for (const Expr *CommaLHS : CommaLHSs) {
+        VisitForTemporaryDtors(const_cast<Expr *>(CommaLHS),
+                               /*BindToTemporary=*/false, Context);
+      }
+      goto tryAgain;
+    }
+
+    case Stmt::BlockExprClass:
+      // Don't recurse into blocks; their subexpressions don't get evaluated
+      // here.
+      return Block;
+
+    case Stmt::LambdaExprClass: {
+      // For lambda expressions, only recurse into the capture initializers,
+      // and not the body.
+      auto *LE = cast<LambdaExpr>(E);
+      CFGBlock *B = Block;
+      for (Expr *Init : LE->capture_inits()) {
+        if (CFGBlock *R = VisitForTemporaryDtors(
+                Init, /*BindToTemporary=*/false, Context))
+          B = R;
+      }
+      return B;
+    }
+
+    case Stmt::CXXDefaultArgExprClass:
+      E = cast<CXXDefaultArgExpr>(E)->getExpr();
+      goto tryAgain;
+
+    case Stmt::CXXDefaultInitExprClass:
+      E = cast<CXXDefaultInitExpr>(E)->getExpr();
       goto tryAgain;
   }
 }
 
-CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E) {
+CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E,
+                                                     TempDtorContext &Context) {
+  if (isa<LambdaExpr>(E)) {
+    // Do not visit the children of lambdas; they have their own CFGs.
+    return Block;
+  }
+
   // When visiting children for destructors we want to visit them in reverse
   // order that they will appear in the CFG.  Because the CFG is built
   // bottom-up, this means we visit them in their natural order, which
@@ -3510,165 +3631,126 @@
   CFGBlock *B = Block;
   for (Stmt::child_range I = E->children(); I; ++I) {
     if (Stmt *Child = *I)
-      if (CFGBlock *R = VisitForTemporaryDtors(Child))
+      if (CFGBlock *R = VisitForTemporaryDtors(Child, false, Context))
         B = R;
   }
   return B;
 }
 
-CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E) {
+CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(
+    BinaryOperator *E, TempDtorContext &Context) {
   if (E->isLogicalOp()) {
-    // Destructors for temporaries in LHS expression should be called after
-    // those for RHS expression. Even if this will unnecessarily create a block,
-    // this block will be used at least by the full expression.
-    autoCreateBlock();
-    CFGBlock *ConfluenceBlock = VisitForTemporaryDtors(E->getLHS());
-    if (badCFG)
-      return nullptr;
+    VisitForTemporaryDtors(E->getLHS(), false, Context);
+    TryResult RHSExecuted = tryEvaluateBool(E->getLHS());
+    if (RHSExecuted.isKnown() && E->getOpcode() == BO_LOr)
+      RHSExecuted.negate();
 
-    Succ = ConfluenceBlock;
-    Block = nullptr;
-    CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+    // We do not know at CFG-construction time whether the right-hand-side was
+    // executed, thus we add a branch node that depends on the temporary
+    // constructor call.
+    TempDtorContext RHSContext(
+        bothKnownTrue(Context.KnownExecuted, RHSExecuted));
+    VisitForTemporaryDtors(E->getRHS(), false, RHSContext);
+    InsertTempDtorDecisionBlock(RHSContext);
 
-    if (RHSBlock) {
-      if (badCFG)
-        return nullptr;
-
-      // If RHS expression did produce destructors we need to connect created
-      // blocks to CFG in same manner as for binary operator itself.
-      CFGBlock *LHSBlock = createBlock(false);
-      LHSBlock->setTerminator(CFGTerminator(E, true));
-
-      // For binary operator LHS block is before RHS in list of predecessors
-      // of ConfluenceBlock.
-      std::reverse(ConfluenceBlock->pred_begin(),
-          ConfluenceBlock->pred_end());
-
-      // See if this is a known constant.
-      TryResult KnownVal = tryEvaluateBool(E->getLHS());
-      if (KnownVal.isKnown() && (E->getOpcode() == BO_LOr))
-        KnownVal.negate();
-
-      // Link LHSBlock with RHSBlock exactly the same way as for binary operator
-      // itself.
-      if (E->getOpcode() == BO_LOr) {
-        addSuccessor(LHSBlock, KnownVal.isTrue() ? nullptr : ConfluenceBlock);
-        addSuccessor(LHSBlock, KnownVal.isFalse() ? nullptr : RHSBlock);
-      } else {
-        assert (E->getOpcode() == BO_LAnd);
-        addSuccessor(LHSBlock, KnownVal.isFalse() ? nullptr : RHSBlock);
-        addSuccessor(LHSBlock, KnownVal.isTrue() ? nullptr : ConfluenceBlock);
-      }
-
-      Block = LHSBlock;
-      return LHSBlock;
-    }
-
-    Block = ConfluenceBlock;
-    return ConfluenceBlock;
+    return Block;
   }
 
   if (E->isAssignmentOp()) {
     // For assignment operator (=) LHS expression is visited
     // before RHS expression. For destructors visit them in reverse order.
-    CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
-    CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS());
+    CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
+    CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
     return LHSBlock ? LHSBlock : RHSBlock;
   }
 
   // For any other binary operator RHS expression is visited before
   // LHS expression (order of children). For destructors visit them in reverse
   // order.
-  CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS());
-  CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+  CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
+  CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
   return RHSBlock ? RHSBlock : LHSBlock;
 }
 
 CFGBlock *CFGBuilder::VisitCXXBindTemporaryExprForTemporaryDtors(
-    CXXBindTemporaryExpr *E, bool BindToTemporary) {
+    CXXBindTemporaryExpr *E, bool BindToTemporary, TempDtorContext &Context) {
   // First add destructors for temporaries in subexpression.
-  CFGBlock *B = VisitForTemporaryDtors(E->getSubExpr());
+  CFGBlock *B = VisitForTemporaryDtors(E->getSubExpr(), false, Context);
   if (!BindToTemporary) {
     // If lifetime of temporary is not prolonged (by assigning to constant
     // reference) add destructor for it.
 
-    // If the destructor is marked as a no-return destructor, we need to create
-    // a new block for the destructor which does not have as a successor
-    // anything built thus far. Control won't flow out of this block.
     const CXXDestructorDecl *Dtor = E->getTemporary()->getDestructor();
+
     if (Dtor->isNoReturn()) {
-      Succ = B;
+      // If the destructor is marked as a no-return destructor, we need to
+      // create a new block for the destructor which does not have as a
+      // successor anything built thus far. Control won't flow out of this
+      // block.
+      if (B) Succ = B;
       Block = createNoReturnBlock();
+    } else if (Context.needsTempDtorBranch()) {
+      // If we need to introduce a branch, we add a new block that we will hook
+      // up to a decision block later.
+      if (B) Succ = B;
+      Block = createBlock();
     } else {
       autoCreateBlock();
     }
-
+    if (Context.needsTempDtorBranch()) {
+      Context.setDecisionPoint(Succ, E);
+    }
     appendTemporaryDtor(Block, E);
+
     B = Block;
   }
   return B;
 }
 
+void CFGBuilder::InsertTempDtorDecisionBlock(const TempDtorContext &Context,
+                                             CFGBlock *FalseSucc) {
+  if (!Context.TerminatorExpr) {
+    // If no temporary was found, we do not need to insert a decision point.
+    return;
+  }
+  assert(Context.TerminatorExpr);
+  CFGBlock *Decision = createBlock(false);
+  Decision->setTerminator(CFGTerminator(Context.TerminatorExpr, true));
+  addSuccessor(Decision, Block, !Context.KnownExecuted.isFalse());
+  addSuccessor(Decision, FalseSucc ? FalseSucc : Context.Succ,
+               !Context.KnownExecuted.isTrue());
+  Block = Decision;
+}
+
 CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors(
-    AbstractConditionalOperator *E, bool BindToTemporary) {
-  // First add destructors for condition expression.  Even if this will
-  // unnecessarily create a block, this block will be used at least by the full
-  // expression.
-  autoCreateBlock();
-  CFGBlock *ConfluenceBlock = VisitForTemporaryDtors(E->getCond());
-  if (badCFG)
-    return nullptr;
-  if (BinaryConditionalOperator *BCO
-        = dyn_cast<BinaryConditionalOperator>(E)) {
-    ConfluenceBlock = VisitForTemporaryDtors(BCO->getCommon());
-    if (badCFG)
-      return nullptr;
-  }
+    AbstractConditionalOperator *E, bool BindToTemporary,
+    TempDtorContext &Context) {
+  VisitForTemporaryDtors(E->getCond(), false, Context);
+  CFGBlock *ConditionBlock = Block;
+  CFGBlock *ConditionSucc = Succ;
+  TryResult ConditionVal = tryEvaluateBool(E->getCond());
+  TryResult NegatedVal = ConditionVal;
+  if (NegatedVal.isKnown()) NegatedVal.negate();
 
-  // Try to add block with destructors for LHS expression.
-  CFGBlock *LHSBlock = nullptr;
-  Succ = ConfluenceBlock;
-  Block = nullptr;
-  LHSBlock = VisitForTemporaryDtors(E->getTrueExpr(), BindToTemporary);
-  if (badCFG)
-    return nullptr;
+  TempDtorContext TrueContext(
+      bothKnownTrue(Context.KnownExecuted, ConditionVal));
+  VisitForTemporaryDtors(E->getTrueExpr(), BindToTemporary, TrueContext);
+  CFGBlock *TrueBlock = Block;
 
-  // Try to add block with destructors for RHS expression;
-  Succ = ConfluenceBlock;
-  Block = nullptr;
-  CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getFalseExpr(),
-                                              BindToTemporary);
-  if (badCFG)
-    return nullptr;
+  Block = ConditionBlock;
+  Succ = ConditionSucc;
+  TempDtorContext FalseContext(
+      bothKnownTrue(Context.KnownExecuted, NegatedVal));
+  VisitForTemporaryDtors(E->getFalseExpr(), BindToTemporary, FalseContext);
 
-  if (!RHSBlock && !LHSBlock) {
-    // If neither LHS nor RHS expression had temporaries to destroy don't create
-    // more blocks.
-    Block = ConfluenceBlock;
-    return Block;
-  }
-
-  Block = createBlock(false);
-  Block->setTerminator(CFGTerminator(E, true));
-  assert(Block->getTerminator().isTemporaryDtorsBranch());
-
-  // See if this is a known constant.
-  const TryResult &KnownVal = tryEvaluateBool(E->getCond());
-
-  if (LHSBlock) {
-    addSuccessor(Block, LHSBlock, !KnownVal.isFalse());
-  } else if (KnownVal.isFalse()) {
-    addSuccessor(Block, nullptr);
+  if (TrueContext.TerminatorExpr && FalseContext.TerminatorExpr) {
+    InsertTempDtorDecisionBlock(FalseContext, TrueBlock);
+  } else if (TrueContext.TerminatorExpr) {
+    Block = TrueBlock;
+    InsertTempDtorDecisionBlock(TrueContext);
   } else {
-    addSuccessor(Block, ConfluenceBlock);
-    std::reverse(ConfluenceBlock->pred_begin(), ConfluenceBlock->pred_end());
+    InsertTempDtorDecisionBlock(FalseContext);
   }
-
-  if (!RHSBlock)
-    RHSBlock = ConfluenceBlock;
-
-  addSuccessor(Block, RHSBlock, !KnownVal.isTrue());
-
   return Block;
 }
 
@@ -3693,10 +3775,9 @@
   return &back();
 }
 
-/// buildCFG - Constructs a CFG from an AST.  Ownership of the returned
-///  CFG is returned to the caller.
-CFG* CFG::buildCFG(const Decl *D, Stmt *Statement, ASTContext *C,
-    const BuildOptions &BO) {
+/// buildCFG - Constructs a CFG from an AST.
+std::unique_ptr<CFG> CFG::buildCFG(const Decl *D, Stmt *Statement,
+                                   ASTContext *C, const BuildOptions &BO) {
   CFGBuilder Builder(C, BO);
   return Builder.buildCFG(D, Statement);
 }
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index 5e97ce2..1df093d 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -1,5 +1,4 @@
 set(LLVM_LINK_COMPONENTS
-  MC
   Support
   )
 
@@ -12,6 +11,7 @@
   CallGraph.cpp
   CocoaConventions.cpp
   Consumed.cpp
+  CodeInjector.cpp
   Dominators.cpp
   FormatString.cpp
   LiveVariables.cpp
@@ -29,6 +29,7 @@
   UninitializedValues.cpp
 
   LINK_LIBS
-  clangBasic
   clangAST
+  clangBasic
+  clangLex
   )
diff --git a/lib/Analysis/CodeInjector.cpp b/lib/Analysis/CodeInjector.cpp
new file mode 100644
index 0000000..76bf364
--- /dev/null
+++ b/lib/Analysis/CodeInjector.cpp
@@ -0,0 +1,15 @@
+//===-- CodeInjector.cpp ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/CodeInjector.h"
+
+using namespace clang;
+
+CodeInjector::CodeInjector() {}
+CodeInjector::~CodeInjector() {}
diff --git a/lib/Analysis/FormatString.cpp b/lib/Analysis/FormatString.cpp
index 851b97e..8c663d8 100644
--- a/lib/Analysis/FormatString.cpp
+++ b/lib/Analysis/FormatString.cpp
@@ -244,6 +244,8 @@
       ++I;
       lmKind = LengthModifier::AsInt3264;
       break;
+    case 'w':
+      lmKind = LengthModifier::AsWide; ++I; break;
   }
   LengthModifier lm(lmPosition, lmKind);
   FS.setLengthModifier(lm);
@@ -504,6 +506,8 @@
     return "a";
   case AsMAllocate:
     return "m";
+  case AsWide:
+    return "w";
   case None:
     return "";
   }
@@ -550,6 +554,9 @@
 
   // GlibC specific specifiers.
   case PrintErrno: return "m";
+
+  // MS specific specifiers.
+  case ZArg: return "Z";
   }
   return nullptr;
 }
@@ -608,8 +615,21 @@
       return true;
       
     // Handle most integer flags
-    case LengthModifier::AsChar:
     case LengthModifier::AsShort:
+      if (Target.getTriple().isOSMSVCRT()) {
+        switch (CS.getKind()) {
+          case ConversionSpecifier::cArg:
+          case ConversionSpecifier::CArg:
+          case ConversionSpecifier::sArg:
+          case ConversionSpecifier::SArg:
+          case ConversionSpecifier::ZArg:
+            return true;
+          default:
+            break;
+        }
+      }
+      // Fall through.
+    case LengthModifier::AsChar:
     case LengthModifier::AsLongLong:
     case LengthModifier::AsQuad:
     case LengthModifier::AsIntMax:
@@ -632,7 +652,7 @@
       }
       
     // Handle 'l' flag
-    case LengthModifier::AsLong:
+    case LengthModifier::AsLong: // or AsWideChar
       switch (CS.getKind()) {
         case ConversionSpecifier::dArg:
         case ConversionSpecifier::DArg:
@@ -655,6 +675,7 @@
         case ConversionSpecifier::cArg:
         case ConversionSpecifier::sArg:
         case ConversionSpecifier::ScanListArg:
+        case ConversionSpecifier::ZArg:
           return true;
         default:
           return false;
@@ -719,6 +740,17 @@
         default:
           return false;
       }
+    case LengthModifier::AsWide:
+      switch (CS.getKind()) {
+        case ConversionSpecifier::cArg:
+        case ConversionSpecifier::CArg:
+        case ConversionSpecifier::sArg:
+        case ConversionSpecifier::SArg:
+        case ConversionSpecifier::ZArg:
+          return Target.getTriple().isOSMSVCRT();
+        default:
+          return false;
+      }
   }
   llvm_unreachable("Invalid LengthModifier Kind!");
 }
@@ -741,6 +773,7 @@
     case LengthModifier::AsInt32:
     case LengthModifier::AsInt3264:
     case LengthModifier::AsInt64:
+    case LengthModifier::AsWide:
       return false;
   }
   llvm_unreachable("Invalid LengthModifier Kind!");
@@ -778,6 +811,7 @@
     case ConversionSpecifier::DArg:
     case ConversionSpecifier::OArg:
     case ConversionSpecifier::UArg:
+    case ConversionSpecifier::ZArg:
       return false;
   }
   llvm_unreachable("Invalid ConversionSpecifier Kind!");
diff --git a/lib/Analysis/FormatStringParsing.h b/lib/Analysis/FormatStringParsing.h
index fba3180..e165296 100644
--- a/lib/Analysis/FormatStringParsing.h
+++ b/lib/Analysis/FormatStringParsing.h
@@ -1,5 +1,5 @@
-#ifndef LLVM_CLANG_FORMAT_PARSING_H
-#define LLVM_CLANG_FORMAT_PARSING_H
+#ifndef LLVM_CLANG_LIB_ANALYSIS_FORMATSTRINGPARSING_H
+#define LLVM_CLANG_LIB_ANALYSIS_FORMATSTRINGPARSING_H
 
 #include "clang/AST/ASTContext.h"
 #include "clang/AST/Type.h"
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 3d6fc03..86b679c 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -82,7 +82,6 @@
 class LiveVariablesImpl {
 public:  
   AnalysisDeclContext &analysisContext;
-  std::vector<LiveVariables::LivenessValues> cfgBlockValues;
   llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
   llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
   llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/Analysis/PrintfFormatString.cpp
index 082a832..146635b 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/Analysis/PrintfFormatString.cpp
@@ -54,7 +54,8 @@
                                                   const char *E,
                                                   unsigned &argIndex,
                                                   const LangOptions &LO,
-                                                  const TargetInfo &Target) {
+                                                  const TargetInfo &Target,
+                                                  bool Warn) {
 
   using namespace clang::analyze_format_string;
   using namespace clang::analyze_printf;
@@ -83,7 +84,8 @@
 
   if (I == E) {
     // No more characters left?
-    H.HandleIncompleteSpecifier(Start, E - Start);
+    if (Warn)
+      H.HandleIncompleteSpecifier(Start, E - Start);
     return true;
   }
 
@@ -93,7 +95,8 @@
 
   if (I == E) {
     // No more characters left?
-    H.HandleIncompleteSpecifier(Start, E - Start);
+    if (Warn)
+      H.HandleIncompleteSpecifier(Start, E - Start);
     return true;
   }
 
@@ -118,7 +121,8 @@
 
   if (I == E) {
     // No more characters left?
-    H.HandleIncompleteSpecifier(Start, E - Start);
+    if (Warn)
+      H.HandleIncompleteSpecifier(Start, E - Start);
     return true;
   }
 
@@ -129,7 +133,8 @@
 
   if (I == E) {
     // No more characters left?
-    H.HandleIncompleteSpecifier(Start, E - Start);
+    if (Warn)
+      H.HandleIncompleteSpecifier(Start, E - Start);
     return true;
   }
 
@@ -137,7 +142,8 @@
   if (*I == '.') {
     ++I;
     if (I == E) {
-      H.HandleIncompleteSpecifier(Start, E - Start);
+      if (Warn)
+        H.HandleIncompleteSpecifier(Start, E - Start);
       return true;
     }
 
@@ -147,7 +153,8 @@
 
     if (I == E) {
       // No more characters left?
-      H.HandleIncompleteSpecifier(Start, E - Start);
+      if (Warn)
+        H.HandleIncompleteSpecifier(Start, E - Start);
       return true;
     }
   }
@@ -155,7 +162,8 @@
   // Look for the length modifier.
   if (ParseLengthModifier(FS, I, E, LO) && I == E) {
     // No more characters left?
-    H.HandleIncompleteSpecifier(Start, E - Start);
+    if (Warn)
+      H.HandleIncompleteSpecifier(Start, E - Start);
     return true;
   }
 
@@ -198,7 +206,7 @@
     case '@': k = ConversionSpecifier::ObjCObjArg; break;
     // Glibc specific.
     case 'm': k = ConversionSpecifier::PrintErrno; break;
-    // Apple-specific
+    // Apple-specific.
     case 'D':
       if (Target.getTriple().isOSDarwin())
         k = ConversionSpecifier::DArg;
@@ -211,6 +219,10 @@
       if (Target.getTriple().isOSDarwin())
         k = ConversionSpecifier::UArg;
       break;
+    // MS specific.
+    case 'Z':
+      if (Target.getTriple().isOSMSVCRT())
+        k = ConversionSpecifier::ZArg;
   }
   PrintfConversionSpecifier CS(conversionPosition, k);
   FS.setConversionSpecifier(CS);
@@ -235,7 +247,7 @@
   // Keep looking for a format specifier until we have exhausted the string.
   while (I != E) {
     const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
-                                                            LO, Target);
+                                                            LO, Target, true);
     // Did a fail-stop error of any kind occur when parsing the specifier?
     // If so, don't do any more processing.
     if (FSR.shouldStop())
@@ -253,6 +265,34 @@
   return false;
 }
 
+bool clang::analyze_format_string::ParseFormatStringHasSArg(const char *I,
+                                                            const char *E,
+                                                            const LangOptions &LO,
+                                                            const TargetInfo &Target) {
+  
+  unsigned argIndex = 0;
+  
+  // Keep looking for a %s format specifier until we have exhausted the string.
+  FormatStringHandler H;
+  while (I != E) {
+    const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
+                                                            LO, Target, false);
+    // Did a fail-stop error of any kind occur when parsing the specifier?
+    // If so, don't do any more processing.
+    if (FSR.shouldStop())
+      return false;
+    // Did we exhaust the string or encounter an error that
+    // we can recover from?
+    if (!FSR.hasValue())
+      continue;
+    const analyze_printf::PrintfSpecifier &FS = FSR.getValue();
+    // Return true if this a %s format specifier.
+    if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::Kind::sArg)
+      return true;
+  }
+  return false;
+}
+
 //===----------------------------------------------------------------------===//
 // Methods on PrintfSpecifier.
 //===----------------------------------------------------------------------===//
@@ -266,9 +306,14 @@
 
   if (CS.getKind() == ConversionSpecifier::cArg)
     switch (LM.getKind()) {
-      case LengthModifier::None: return Ctx.IntTy;
+      case LengthModifier::None:
+        return Ctx.IntTy;
       case LengthModifier::AsLong:
+      case LengthModifier::AsWide:
         return ArgType(ArgType::WIntTy, "wint_t");
+      case LengthModifier::AsShort:
+        if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+          return Ctx.IntTy;
       default:
         return ArgType::Invalid();
     }
@@ -303,6 +348,7 @@
         return ArgType(Ctx.getPointerDiffType(), "ptrdiff_t");
       case LengthModifier::AsAllocate:
       case LengthModifier::AsMAllocate:
+      case LengthModifier::AsWide:
         return ArgType::Invalid();
     }
 
@@ -337,6 +383,7 @@
         return ArgType();
       case LengthModifier::AsAllocate:
       case LengthModifier::AsMAllocate:
+      case LengthModifier::AsWide:
         return ArgType::Invalid();
     }
 
@@ -372,6 +419,7 @@
       case LengthModifier::AsInt32:
       case LengthModifier::AsInt3264:
       case LengthModifier::AsInt64:
+      case LengthModifier::AsWide:
         return ArgType::Invalid();
     }
   }
@@ -384,15 +432,23 @@
                          "const unichar *");
         return ArgType(ArgType::WCStrTy, "wchar_t *");
       }
+      if (LM.getKind() == LengthModifier::AsWide)
+        return ArgType(ArgType::WCStrTy, "wchar_t *");
       return ArgType::CStrTy;
     case ConversionSpecifier::SArg:
       if (IsObjCLiteral)
         return ArgType(Ctx.getPointerType(Ctx.UnsignedShortTy.withConst()),
                        "const unichar *");
+      if (Ctx.getTargetInfo().getTriple().isOSMSVCRT() &&
+          LM.getKind() == LengthModifier::AsShort)
+        return ArgType::CStrTy;
       return ArgType(ArgType::WCStrTy, "wchar_t *");
     case ConversionSpecifier::CArg:
       if (IsObjCLiteral)
         return ArgType(Ctx.UnsignedShortTy, "unichar");
+      if (Ctx.getTargetInfo().getTriple().isOSMSVCRT() &&
+          LM.getKind() == LengthModifier::AsShort)
+        return Ctx.IntTy;
       return ArgType(Ctx.WideCharTy, "wchar_t");
     case ConversionSpecifier::pArg:
       return ArgType::CPointerTy;
diff --git a/lib/Analysis/ScanfFormatString.cpp b/lib/Analysis/ScanfFormatString.cpp
index ed28627..d484d8e 100644
--- a/lib/Analysis/ScanfFormatString.cpp
+++ b/lib/Analysis/ScanfFormatString.cpp
@@ -257,6 +257,7 @@
         case LengthModifier::AsMAllocate:
         case LengthModifier::AsInt32:
         case LengthModifier::AsInt3264:
+        case LengthModifier::AsWide:
           return ArgType::Invalid();
       }
 
@@ -295,6 +296,7 @@
         case LengthModifier::AsMAllocate:
         case LengthModifier::AsInt32:
         case LengthModifier::AsInt3264:
+        case LengthModifier::AsWide:
           return ArgType::Invalid();
       }
 
@@ -326,10 +328,14 @@
         case LengthModifier::None:
           return ArgType::PtrTo(ArgType::AnyCharTy);
         case LengthModifier::AsLong:
+        case LengthModifier::AsWide:
           return ArgType::PtrTo(ArgType(Ctx.getWideCharType(), "wchar_t"));
         case LengthModifier::AsAllocate:
         case LengthModifier::AsMAllocate:
           return ArgType::PtrTo(ArgType::CStrTy);
+        case LengthModifier::AsShort:
+          if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+            return ArgType::PtrTo(ArgType::AnyCharTy);
         default:
           return ArgType::Invalid();
       }
@@ -338,10 +344,14 @@
       // FIXME: Mac OS X specific?
       switch (LM.getKind()) {
         case LengthModifier::None:
+        case LengthModifier::AsWide:
           return ArgType::PtrTo(ArgType(Ctx.getWideCharType(), "wchar_t"));
         case LengthModifier::AsAllocate:
         case LengthModifier::AsMAllocate:
           return ArgType::PtrTo(ArgType(ArgType::WCStrTy, "wchar_t *"));
+        case LengthModifier::AsShort:
+          if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+            return ArgType::PtrTo(ArgType::AnyCharTy);
         default:
           return ArgType::Invalid();
       }
@@ -378,6 +388,7 @@
         case LengthModifier::AsMAllocate:
         case LengthModifier::AsInt32:
         case LengthModifier::AsInt3264:
+        case LengthModifier::AsWide:
           return ArgType::Invalid();
         }
 
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index 11df61f..4ddc21d 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -40,762 +40,111 @@
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Support/raw_ostream.h"
 #include <algorithm>
+#include <ostream>
+#include <sstream>
 #include <utility>
 #include <vector>
 
-using namespace clang;
-using namespace thread_safety;
+
+namespace clang {
+namespace threadSafety {
 
 // Key method definition
 ThreadSafetyHandler::~ThreadSafetyHandler() {}
 
-namespace {
-
-/// SExpr implements a simple expression language that is used to store,
-/// compare, and pretty-print C++ expressions.  Unlike a clang Expr, a SExpr
-/// does not capture surface syntax, and it does not distinguish between
-/// C++ concepts, like pointers and references, that have no real semantic
-/// differences.  This simplicity allows SExprs to be meaningfully compared,
-/// e.g.
-///        (x)          =  x
-///        (*this).foo  =  this->foo
-///        *&a          =  a
-///
-/// Thread-safety analysis works by comparing lock expressions.  Within the
-/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
-/// a particular mutex object at run-time.  Subsequent occurrences of the same
-/// expression (where "same" means syntactic equality) will refer to the same
-/// run-time object if three conditions hold:
-/// (1) Local variables in the expression, such as "x" have not changed.
-/// (2) Values on the heap that affect the expression have not changed.
-/// (3) The expression involves only pure function calls.
-///
-/// The current implementation assumes, but does not verify, that multiple uses
-/// of the same lock expression satisfies these criteria.
-class SExpr {
-private:
-  enum ExprOp {
-    EOP_Nop,       ///< No-op
-    EOP_Wildcard,  ///< Matches anything.
-    EOP_Universal, ///< Universal lock.
-    EOP_This,      ///< This keyword.
-    EOP_NVar,      ///< Named variable.
-    EOP_LVar,      ///< Local variable.
-    EOP_Dot,       ///< Field access
-    EOP_Call,      ///< Function call
-    EOP_MCall,     ///< Method call
-    EOP_Index,     ///< Array index
-    EOP_Unary,     ///< Unary operation
-    EOP_Binary,    ///< Binary operation
-    EOP_Unknown    ///< Catchall for everything else
-  };
+class TILPrinter :
+  public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
 
 
-  class SExprNode {
-   private:
-    unsigned char  Op;     ///< Opcode of the root node
-    unsigned char  Flags;  ///< Additional opcode-specific data
-    unsigned short Sz;     ///< Number of child nodes
-    const void*    Data;   ///< Additional opcode-specific data
+/// Issue a warning about an invalid lock expression
+static void warnInvalidLock(ThreadSafetyHandler &Handler,
+                            const Expr *MutexExp, const NamedDecl *D,
+                            const Expr *DeclExp, StringRef Kind) {
+  SourceLocation Loc;
+  if (DeclExp)
+    Loc = DeclExp->getExprLoc();
 
-   public:
-    SExprNode(ExprOp O, unsigned F, const void* D)
-      : Op(static_cast<unsigned char>(O)),
-        Flags(static_cast<unsigned char>(F)), Sz(1), Data(D)
-    { }
-
-    unsigned size() const        { return Sz; }
-    void     setSize(unsigned S) { Sz = S;    }
-
-    ExprOp   kind() const { return static_cast<ExprOp>(Op); }
-
-    const NamedDecl* getNamedDecl() const {
-      assert(Op == EOP_NVar || Op == EOP_LVar || Op == EOP_Dot);
-      return reinterpret_cast<const NamedDecl*>(Data);
-    }
-
-    const NamedDecl* getFunctionDecl() const {
-      assert(Op == EOP_Call || Op == EOP_MCall);
-      return reinterpret_cast<const NamedDecl*>(Data);
-    }
-
-    bool isArrow() const { return Op == EOP_Dot && Flags == 1; }
-    void setArrow(bool A) { Flags = A ? 1 : 0; }
-
-    unsigned arity() const {
-      switch (Op) {
-        case EOP_Nop:       return 0;
-        case EOP_Wildcard:  return 0;
-        case EOP_Universal: return 0;
-        case EOP_NVar:      return 0;
-        case EOP_LVar:      return 0;
-        case EOP_This:      return 0;
-        case EOP_Dot:       return 1;
-        case EOP_Call:      return Flags+1;  // First arg is function.
-        case EOP_MCall:     return Flags+1;  // First arg is implicit obj.
-        case EOP_Index:     return 2;
-        case EOP_Unary:     return 1;
-        case EOP_Binary:    return 2;
-        case EOP_Unknown:   return Flags;
-      }
-      return 0;
-    }
-
-    bool operator==(const SExprNode& Other) const {
-      // Ignore flags and size -- they don't matter.
-      return (Op == Other.Op &&
-              Data == Other.Data);
-    }
-
-    bool operator!=(const SExprNode& Other) const {
-      return !(*this == Other);
-    }
-
-    bool matches(const SExprNode& Other) const {
-      return (*this == Other) ||
-             (Op == EOP_Wildcard) ||
-             (Other.Op == EOP_Wildcard);
-    }
-  };
+  // FIXME: add a note about the attribute location in MutexExp or D
+  if (Loc.isValid())
+    Handler.handleInvalidLockExp(Kind, Loc);
+}
 
 
-  /// \brief Encapsulates the lexical context of a function call.  The lexical
-  /// context includes the arguments to the call, including the implicit object
-  /// argument.  When an attribute containing a mutex expression is attached to
-  /// a method, the expression may refer to formal parameters of the method.
-  /// Actual arguments must be substituted for formal parameters to derive
-  /// the appropriate mutex expression in the lexical context where the function
-  /// is called.  PrevCtx holds the context in which the arguments themselves
-  /// should be evaluated; multiple calling contexts can be chained together
-  /// by the lock_returned attribute.
-  struct CallingContext {
-    const NamedDecl*   AttrDecl;   // The decl to which the attribute is attached.
-    const Expr*        SelfArg;    // Implicit object argument -- e.g. 'this'
-    bool               SelfArrow;  // is Self referred to with -> or .?
-    unsigned           NumArgs;    // Number of funArgs
-    const Expr* const* FunArgs;    // Function arguments
-    CallingContext*    PrevCtx;    // The previous context; or 0 if none.
-
-    CallingContext(const NamedDecl *D)
-        : AttrDecl(D), SelfArg(nullptr), SelfArrow(false), NumArgs(0),
-          FunArgs(nullptr), PrevCtx(nullptr) {}
-  };
-
-  typedef SmallVector<SExprNode, 4> NodeVector;
-
-private:
-  // A SExpr is a list of SExprNodes in prefix order.  The Size field allows
-  // the list to be traversed as a tree.
-  NodeVector NodeVec;
-
-private:
-  unsigned make(ExprOp O, unsigned F = 0, const void *D = nullptr) {
-    NodeVec.push_back(SExprNode(O, F, D));
-    return NodeVec.size() - 1;
-  }
-
-  unsigned makeNop() {
-    return make(EOP_Nop);
-  }
-
-  unsigned makeWildcard() {
-    return make(EOP_Wildcard);
-  }
-
-  unsigned makeUniversal() {
-    return make(EOP_Universal);
-  }
-
-  unsigned makeNamedVar(const NamedDecl *D) {
-    return make(EOP_NVar, 0, D);
-  }
-
-  unsigned makeLocalVar(const NamedDecl *D) {
-    return make(EOP_LVar, 0, D);
-  }
-
-  unsigned makeThis() {
-    return make(EOP_This);
-  }
-
-  unsigned makeDot(const NamedDecl *D, bool Arrow) {
-    return make(EOP_Dot, Arrow ? 1 : 0, D);
-  }
-
-  unsigned makeCall(unsigned NumArgs, const NamedDecl *D) {
-    return make(EOP_Call, NumArgs, D);
-  }
-
-  // Grab the very first declaration of virtual method D
-  const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) {
-    while (true) {
-      D = D->getCanonicalDecl();
-      CXXMethodDecl::method_iterator I = D->begin_overridden_methods(),
-                                     E = D->end_overridden_methods();
-      if (I == E)
-        return D;  // Method does not override anything
-      D = *I;      // FIXME: this does not work with multiple inheritance.
-    }
-    return nullptr;
-  }
-
-  unsigned makeMCall(unsigned NumArgs, const CXXMethodDecl *D) {
-    return make(EOP_MCall, NumArgs, getFirstVirtualDecl(D));
-  }
-
-  unsigned makeIndex() {
-    return make(EOP_Index);
-  }
-
-  unsigned makeUnary() {
-    return make(EOP_Unary);
-  }
-
-  unsigned makeBinary() {
-    return make(EOP_Binary);
-  }
-
-  unsigned makeUnknown(unsigned Arity) {
-    return make(EOP_Unknown, Arity);
-  }
-
-  inline bool isCalleeArrow(const Expr *E) {
-    const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
-    return ME ? ME->isArrow() : false;
-  }
-
-  /// Build an SExpr from the given C++ expression.
-  /// Recursive function that terminates on DeclRefExpr.
-  /// Note: this function merely creates a SExpr; it does not check to
-  /// ensure that the original expression is a valid mutex expression.
-  ///
-  /// NDeref returns the number of Derefence and AddressOf operations
-  /// preceding the Expr; this is used to decide whether to pretty-print
-  /// SExprs with . or ->.
-  unsigned buildSExpr(const Expr *Exp, CallingContext *CallCtx,
-                      int *NDeref = nullptr) {
-    if (!Exp)
-      return 0;
-
-    if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
-      const NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
-      const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND);
-      if (PV) {
-        const FunctionDecl *FD =
-          cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
-        unsigned i = PV->getFunctionScopeIndex();
-
-        if (CallCtx && CallCtx->FunArgs &&
-            FD == CallCtx->AttrDecl->getCanonicalDecl()) {
-          // Substitute call arguments for references to function parameters
-          assert(i < CallCtx->NumArgs);
-          return buildSExpr(CallCtx->FunArgs[i], CallCtx->PrevCtx, NDeref);
-        }
-        // Map the param back to the param of the original function declaration.
-        makeNamedVar(FD->getParamDecl(i));
-        return 1;
-      }
-      // Not a function parameter -- just store the reference.
-      makeNamedVar(ND);
-      return 1;
-    } else if (isa<CXXThisExpr>(Exp)) {
-      // Substitute parent for 'this'
-      if (CallCtx && CallCtx->SelfArg) {
-        if (!CallCtx->SelfArrow && NDeref)
-          // 'this' is a pointer, but self is not, so need to take address.
-          --(*NDeref);
-        return buildSExpr(CallCtx->SelfArg, CallCtx->PrevCtx, NDeref);
-      }
-      else {
-        makeThis();
-        return 1;
-      }
-    } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
-      const NamedDecl *ND = ME->getMemberDecl();
-      int ImplicitDeref = ME->isArrow() ? 1 : 0;
-      unsigned Root = makeDot(ND, false);
-      unsigned Sz = buildSExpr(ME->getBase(), CallCtx, &ImplicitDeref);
-      NodeVec[Root].setArrow(ImplicitDeref > 0);
-      NodeVec[Root].setSize(Sz + 1);
-      return Sz + 1;
-    } else if (const CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
-      // When calling a function with a lock_returned attribute, replace
-      // the function call with the expression in lock_returned.
-      const CXXMethodDecl *MD = CMCE->getMethodDecl()->getMostRecentDecl();
-      if (LockReturnedAttr* At = MD->getAttr<LockReturnedAttr>()) {
-        CallingContext LRCallCtx(CMCE->getMethodDecl());
-        LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument();
-        LRCallCtx.SelfArrow = isCalleeArrow(CMCE->getCallee());
-        LRCallCtx.NumArgs = CMCE->getNumArgs();
-        LRCallCtx.FunArgs = CMCE->getArgs();
-        LRCallCtx.PrevCtx = CallCtx;
-        return buildSExpr(At->getArg(), &LRCallCtx);
-      }
-      // Hack to treat smart pointers and iterators as pointers;
-      // ignore any method named get().
-      if (CMCE->getMethodDecl()->getNameAsString() == "get" &&
-          CMCE->getNumArgs() == 0) {
-        if (NDeref && isCalleeArrow(CMCE->getCallee()))
-          ++(*NDeref);
-        return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref);
-      }
-      unsigned NumCallArgs = CMCE->getNumArgs();
-      unsigned Root = makeMCall(NumCallArgs, CMCE->getMethodDecl());
-      unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx);
-      const Expr* const* CallArgs = CMCE->getArgs();
-      for (unsigned i = 0; i < NumCallArgs; ++i) {
-        Sz += buildSExpr(CallArgs[i], CallCtx);
-      }
-      NodeVec[Root].setSize(Sz + 1);
-      return Sz + 1;
-    } else if (const CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
-      const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl();
-      if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
-        CallingContext LRCallCtx(CE->getDirectCallee());
-        LRCallCtx.NumArgs = CE->getNumArgs();
-        LRCallCtx.FunArgs = CE->getArgs();
-        LRCallCtx.PrevCtx = CallCtx;
-        return buildSExpr(At->getArg(), &LRCallCtx);
-      }
-      // Treat smart pointers and iterators as pointers;
-      // ignore the * and -> operators.
-      if (const CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) {
-        OverloadedOperatorKind k = OE->getOperator();
-        if (k == OO_Star) {
-          if (NDeref) ++(*NDeref);
-          return buildSExpr(OE->getArg(0), CallCtx, NDeref);
-        }
-        else if (k == OO_Arrow) {
-          return buildSExpr(OE->getArg(0), CallCtx, NDeref);
-        }
-      }
-      unsigned NumCallArgs = CE->getNumArgs();
-      unsigned Root = makeCall(NumCallArgs, nullptr);
-      unsigned Sz = buildSExpr(CE->getCallee(), CallCtx);
-      const Expr* const* CallArgs = CE->getArgs();
-      for (unsigned i = 0; i < NumCallArgs; ++i) {
-        Sz += buildSExpr(CallArgs[i], CallCtx);
-      }
-      NodeVec[Root].setSize(Sz+1);
-      return Sz+1;
-    } else if (const BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) {
-      unsigned Root = makeBinary();
-      unsigned Sz = buildSExpr(BOE->getLHS(), CallCtx);
-      Sz += buildSExpr(BOE->getRHS(), CallCtx);
-      NodeVec[Root].setSize(Sz);
-      return Sz;
-    } else if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) {
-      // Ignore & and * operators -- they're no-ops.
-      // However, we try to figure out whether the expression is a pointer,
-      // so we can use . and -> appropriately in error messages.
-      if (UOE->getOpcode() == UO_Deref) {
-        if (NDeref) ++(*NDeref);
-        return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
-      }
-      if (UOE->getOpcode() == UO_AddrOf) {
-        if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UOE->getSubExpr())) {
-          if (DRE->getDecl()->isCXXInstanceMember()) {
-            // This is a pointer-to-member expression, e.g. &MyClass::mu_.
-            // We interpret this syntax specially, as a wildcard.
-            unsigned Root = makeDot(DRE->getDecl(), false);
-            makeWildcard();
-            NodeVec[Root].setSize(2);
-            return 2;
-          }
-        }
-        if (NDeref) --(*NDeref);
-        return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
-      }
-      unsigned Root = makeUnary();
-      unsigned Sz = buildSExpr(UOE->getSubExpr(), CallCtx);
-      NodeVec[Root].setSize(Sz);
-      return Sz;
-    } else if (const ArraySubscriptExpr *ASE =
-               dyn_cast<ArraySubscriptExpr>(Exp)) {
-      unsigned Root = makeIndex();
-      unsigned Sz = buildSExpr(ASE->getBase(), CallCtx);
-      Sz += buildSExpr(ASE->getIdx(), CallCtx);
-      NodeVec[Root].setSize(Sz);
-      return Sz;
-    } else if (const AbstractConditionalOperator *CE =
-               dyn_cast<AbstractConditionalOperator>(Exp)) {
-      unsigned Root = makeUnknown(3);
-      unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
-      Sz += buildSExpr(CE->getTrueExpr(), CallCtx);
-      Sz += buildSExpr(CE->getFalseExpr(), CallCtx);
-      NodeVec[Root].setSize(Sz);
-      return Sz;
-    } else if (const ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) {
-      unsigned Root = makeUnknown(3);
-      unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
-      Sz += buildSExpr(CE->getLHS(), CallCtx);
-      Sz += buildSExpr(CE->getRHS(), CallCtx);
-      NodeVec[Root].setSize(Sz);
-      return Sz;
-    } else if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
-      return buildSExpr(CE->getSubExpr(), CallCtx, NDeref);
-    } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
-      return buildSExpr(PE->getSubExpr(), CallCtx, NDeref);
-    } else if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) {
-      return buildSExpr(EWC->getSubExpr(), CallCtx, NDeref);
-    } else if (const CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) {
-      return buildSExpr(E->getSubExpr(), CallCtx, NDeref);
-    } else if (isa<CharacterLiteral>(Exp) ||
-               isa<CXXNullPtrLiteralExpr>(Exp) ||
-               isa<GNUNullExpr>(Exp) ||
-               isa<CXXBoolLiteralExpr>(Exp) ||
-               isa<FloatingLiteral>(Exp) ||
-               isa<ImaginaryLiteral>(Exp) ||
-               isa<IntegerLiteral>(Exp) ||
-               isa<StringLiteral>(Exp) ||
-               isa<ObjCStringLiteral>(Exp)) {
-      makeNop();
-      return 1;  // FIXME: Ignore literals for now
-    } else {
-      makeNop();
-      return 1;  // Ignore.  FIXME: mark as invalid expression?
-    }
-  }
-
-  /// \brief Construct a SExpr from an expression.
-  /// \param MutexExp The original mutex expression within an attribute
-  /// \param DeclExp An expression involving the Decl on which the attribute
-  ///        occurs.
-  /// \param D  The declaration to which the lock/unlock attribute is attached.
-  void buildSExprFromExpr(const Expr *MutexExp, const Expr *DeclExp,
-                          const NamedDecl *D, VarDecl *SelfDecl = nullptr) {
-    CallingContext CallCtx(D);
-
-    if (MutexExp) {
-      if (const StringLiteral* SLit = dyn_cast<StringLiteral>(MutexExp)) {
-        if (SLit->getString() == StringRef("*"))
-          // The "*" expr is a universal lock, which essentially turns off
-          // checks until it is removed from the lockset.
-          makeUniversal();
-        else
-          // Ignore other string literals for now.
-          makeNop();
-        return;
-      }
-    }
-
-    // If we are processing a raw attribute expression, with no substitutions.
-    if (!DeclExp) {
-      buildSExpr(MutexExp, nullptr);
-      return;
-    }
-
-    // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
-    // for formal parameters when we call buildMutexID later.
-    if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
-      CallCtx.SelfArg   = ME->getBase();
-      CallCtx.SelfArrow = ME->isArrow();
-    } else if (const CXXMemberCallExpr *CE =
-               dyn_cast<CXXMemberCallExpr>(DeclExp)) {
-      CallCtx.SelfArg   = CE->getImplicitObjectArgument();
-      CallCtx.SelfArrow = isCalleeArrow(CE->getCallee());
-      CallCtx.NumArgs   = CE->getNumArgs();
-      CallCtx.FunArgs   = CE->getArgs();
-    } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
-      CallCtx.NumArgs = CE->getNumArgs();
-      CallCtx.FunArgs = CE->getArgs();
-    } else if (const CXXConstructExpr *CE =
-               dyn_cast<CXXConstructExpr>(DeclExp)) {
-      CallCtx.SelfArg = nullptr;  // Will be set below
-      CallCtx.NumArgs = CE->getNumArgs();
-      CallCtx.FunArgs = CE->getArgs();
-    } else if (D && isa<CXXDestructorDecl>(D)) {
-      // There's no such thing as a "destructor call" in the AST.
-      CallCtx.SelfArg = DeclExp;
-    }
-
-    // Hack to handle constructors, where self cannot be recovered from
-    // the expression.
-    if (SelfDecl && !CallCtx.SelfArg) {
-      DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
-                          SelfDecl->getLocation());
-      CallCtx.SelfArg = &SelfDRE;
-
-      // If the attribute has no arguments, then assume the argument is "this".
-      if (!MutexExp)
-        buildSExpr(CallCtx.SelfArg, nullptr);
-      else  // For most attributes.
-        buildSExpr(MutexExp, &CallCtx);
-      return;
-    }
-
-    // If the attribute has no arguments, then assume the argument is "this".
-    if (!MutexExp)
-      buildSExpr(CallCtx.SelfArg, nullptr);
-    else  // For most attributes.
-      buildSExpr(MutexExp, &CallCtx);
-  }
-
-  /// \brief Get index of next sibling of node i.
-  unsigned getNextSibling(unsigned i) const {
-    return i + NodeVec[i].size();
-  }
-
-public:
-  explicit SExpr(clang::Decl::EmptyShell e) { NodeVec.clear(); }
-
-  /// \param MutexExp The original mutex expression within an attribute
-  /// \param DeclExp An expression involving the Decl on which the attribute
-  ///        occurs.
-  /// \param D  The declaration to which the lock/unlock attribute is attached.
-  /// Caller must check isValid() after construction.
-  SExpr(const Expr *MutexExp, const Expr *DeclExp, const NamedDecl *D,
-        VarDecl *SelfDecl = nullptr) {
-    buildSExprFromExpr(MutexExp, DeclExp, D, SelfDecl);
-  }
-
-  /// Return true if this is a valid decl sequence.
-  /// Caller must call this by hand after construction to handle errors.
-  bool isValid() const {
-    return !NodeVec.empty();
-  }
-
-  bool shouldIgnore() const {
-    // Nop is a mutex that we have decided to deliberately ignore.
-    assert(NodeVec.size() > 0 && "Invalid Mutex");
-    return NodeVec[0].kind() == EOP_Nop;
-  }
-
-  bool isUniversal() const {
-    assert(NodeVec.size() > 0 && "Invalid Mutex");
-    return NodeVec[0].kind() == EOP_Universal;
-  }
-
-  /// Issue a warning about an invalid lock expression
-  static void warnInvalidLock(ThreadSafetyHandler &Handler,
-                              const Expr *MutexExp, const Expr *DeclExp,
-                              const NamedDecl *D, StringRef Kind) {
-    SourceLocation Loc;
-    if (DeclExp)
-      Loc = DeclExp->getExprLoc();
-
-    // FIXME: add a note about the attribute location in MutexExp or D
-    if (Loc.isValid())
-      Handler.handleInvalidLockExp(Kind, Loc);
-  }
-
-  bool operator==(const SExpr &other) const {
-    return NodeVec == other.NodeVec;
-  }
-
-  bool operator!=(const SExpr &other) const {
-    return !(*this == other);
-  }
-
-  bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const {
-    if (NodeVec[i].matches(Other.NodeVec[j])) {
-      unsigned ni = NodeVec[i].arity();
-      unsigned nj = Other.NodeVec[j].arity();
-      unsigned n = (ni < nj) ? ni : nj;
-      bool Result = true;
-      unsigned ci = i+1;  // first child of i
-      unsigned cj = j+1;  // first child of j
-      for (unsigned k = 0; k < n;
-           ++k, ci=getNextSibling(ci), cj = Other.getNextSibling(cj)) {
-        Result = Result && matches(Other, ci, cj);
-      }
-      return Result;
-    }
-    return false;
-  }
-
-  // A partial match between a.mu and b.mu returns true a and b have the same
-  // type (and thus mu refers to the same mutex declaration), regardless of
-  // whether a and b are different objects or not.
-  bool partiallyMatches(const SExpr &Other) const {
-    if (NodeVec[0].kind() == EOP_Dot)
-      return NodeVec[0].matches(Other.NodeVec[0]);
-    return false;
-  }
-
-  /// \brief Pretty print a lock expression for use in error messages.
-  std::string toString(unsigned i = 0) const {
-    assert(isValid());
-    if (i >= NodeVec.size())
-      return "";
-
-    const SExprNode* N = &NodeVec[i];
-    switch (N->kind()) {
-      case EOP_Nop:
-        return "_";
-      case EOP_Wildcard:
-        return "(?)";
-      case EOP_Universal:
-        return "*";
-      case EOP_This:
-        return "this";
-      case EOP_NVar:
-      case EOP_LVar: {
-        return N->getNamedDecl()->getNameAsString();
-      }
-      case EOP_Dot: {
-        if (NodeVec[i+1].kind() == EOP_Wildcard) {
-          std::string S = "&";
-          S += N->getNamedDecl()->getQualifiedNameAsString();
-          return S;
-        }
-        std::string FieldName = N->getNamedDecl()->getNameAsString();
-        if (NodeVec[i+1].kind() == EOP_This)
-          return FieldName;
-
-        std::string S = toString(i+1);
-        if (N->isArrow())
-          return S + "->" + FieldName;
-        else
-          return S + "." + FieldName;
-      }
-      case EOP_Call: {
-        std::string S = toString(i+1) + "(";
-        unsigned NumArgs = N->arity()-1;
-        unsigned ci = getNextSibling(i+1);
-        for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
-          S += toString(ci);
-          if (k+1 < NumArgs) S += ",";
-        }
-        S += ")";
-        return S;
-      }
-      case EOP_MCall: {
-        std::string S = "";
-        if (NodeVec[i+1].kind() != EOP_This)
-          S = toString(i+1) + ".";
-        if (const NamedDecl *D = N->getFunctionDecl())
-          S += D->getNameAsString() + "(";
-        else
-          S += "#(";
-        unsigned NumArgs = N->arity()-1;
-        unsigned ci = getNextSibling(i+1);
-        for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
-          S += toString(ci);
-          if (k+1 < NumArgs) S += ",";
-        }
-        S += ")";
-        return S;
-      }
-      case EOP_Index: {
-        std::string S1 = toString(i+1);
-        std::string S2 = toString(i+1 + NodeVec[i+1].size());
-        return S1 + "[" + S2 + "]";
-      }
-      case EOP_Unary: {
-        std::string S = toString(i+1);
-        return "#" + S;
-      }
-      case EOP_Binary: {
-        std::string S1 = toString(i+1);
-        std::string S2 = toString(i+1 + NodeVec[i+1].size());
-        return "(" + S1 + "#" + S2 + ")";
-      }
-      case EOP_Unknown: {
-        unsigned NumChildren = N->arity();
-        if (NumChildren == 0)
-          return "(...)";
-        std::string S = "(";
-        unsigned ci = i+1;
-        for (unsigned j = 0; j < NumChildren; ++j, ci = getNextSibling(ci)) {
-          S += toString(ci);
-          if (j+1 < NumChildren) S += "#";
-        }
-        S += ")";
-        return S;
-      }
-    }
-    return "";
-  }
-};
-
-/// \brief A short list of SExprs
-class MutexIDList : public SmallVector<SExpr, 3> {
+/// \brief A set of CapabilityInfo objects, which are compiled from the
+/// requires attributes on a function.
+class CapExprSet : public SmallVector<CapabilityExpr, 4> {
 public:
   /// \brief Push M onto list, but discard duplicates.
-  void push_back_nodup(const SExpr& M) {
-    if (end() == std::find(begin(), end(), M))
-      push_back(M);
+  void push_back_nodup(const CapabilityExpr &CapE) {
+    iterator It = std::find_if(begin(), end(),
+                               [=](const CapabilityExpr &CapE2) {
+      return CapE.equals(CapE2);
+    });
+    if (It == end())
+      push_back(CapE);
   }
 };
 
-/// \brief This is a helper class that stores info about the most recent
-/// accquire of a Lock.
+class FactManager;
+class FactSet;
+
+/// \brief This is a helper class that stores a fact that is known at a
+/// particular point in program execution.  Currently, a fact is a capability,
+/// along with additional information, such as where it was acquired, whether
+/// it is exclusive or shared, etc.
 ///
-/// The main body of the analysis maps MutexIDs to LockDatas.
-struct LockData {
-  SourceLocation AcquireLoc;
+/// FIXME: this analysis does not currently support either re-entrant
+/// locking or lock "upgrading" and "downgrading" between exclusive and
+/// shared.
+class FactEntry : public CapabilityExpr {
+private:
+  LockKind          LKind;            ///<  exclusive or shared
+  SourceLocation    AcquireLoc;       ///<  where it was acquired.
+  bool              Asserted;         ///<  true if the lock was asserted
 
-  /// \brief LKind stores whether a lock is held shared or exclusively.
-  /// Note that this analysis does not currently support either re-entrant
-  /// locking or lock "upgrading" and "downgrading" between exclusive and
-  /// shared.
-  ///
-  /// FIXME: add support for re-entrant locking and lock up/downgrading
-  LockKind LKind;
-  bool     Asserted;           // for asserted locks
-  bool     Managed;            // for ScopedLockable objects
-  SExpr    UnderlyingMutex;    // for ScopedLockable objects
+public:
+  FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
+            bool Asrt)
+      : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt) {}
 
-  LockData(SourceLocation AcquireLoc, LockKind LKind, bool M=false,
-           bool Asrt=false)
-    : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(Asrt), Managed(M),
-      UnderlyingMutex(Decl::EmptyShell())
-  {}
+  virtual ~FactEntry() {}
 
-  LockData(SourceLocation AcquireLoc, LockKind LKind, const SExpr &Mu)
-    : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(false), Managed(false),
-      UnderlyingMutex(Mu)
-  {}
+  LockKind          kind()       const { return LKind;    }
+  SourceLocation    loc()        const { return AcquireLoc; }
+  bool              asserted()   const { return Asserted; }
 
-  bool operator==(const LockData &other) const {
-    return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
-  }
+  virtual void
+  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+                                SourceLocation JoinLoc, LockErrorKind LEK,
+                                ThreadSafetyHandler &Handler) const = 0;
+  virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
+                            const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+                            bool FullyRemove, ThreadSafetyHandler &Handler,
+                            StringRef DiagKind) const = 0;
 
-  bool operator!=(const LockData &other) const {
-    return !(*this == other);
-  }
-
-  void Profile(llvm::FoldingSetNodeID &ID) const {
-    ID.AddInteger(AcquireLoc.getRawEncoding());
-    ID.AddInteger(LKind);
-  }
-
+  // Return true if LKind >= LK, where exclusive > shared
   bool isAtLeast(LockKind LK) {
-    return (LK == LK_Shared) || (LKind == LK_Exclusive);
+    return  (LKind == LK_Exclusive) || (LK == LK_Shared);
   }
 };
 
 
-/// \brief A FactEntry stores a single fact that is known at a particular point
-/// in the program execution.  Currently, this is information regarding a lock
-/// that is held at that point.
-struct FactEntry {
-  SExpr    MutID;
-  LockData LDat;
-
-  FactEntry(const SExpr& M, const LockData& L)
-    : MutID(M), LDat(L)
-  { }
-};
-
-
 typedef unsigned short FactID;
 
 /// \brief FactManager manages the memory for all facts that are created during
 /// the analysis of a single routine.
 class FactManager {
 private:
-  std::vector<FactEntry> Facts;
+  std::vector<std::unique_ptr<FactEntry>> Facts;
 
 public:
-  FactID newLock(const SExpr& M, const LockData& L) {
-    Facts.push_back(FactEntry(M,L));
+  FactID newFact(std::unique_ptr<FactEntry> Entry) {
+    Facts.push_back(std::move(Entry));
     return static_cast<unsigned short>(Facts.size() - 1);
   }
 
-  const FactEntry& operator[](FactID F) const { return Facts[F]; }
-  FactEntry&       operator[](FactID F)       { return Facts[F]; }
+  const FactEntry &operator[](FactID F) const { return *Facts[F]; }
+  FactEntry &operator[](FactID F) { return *Facts[F]; }
 };
 
 
@@ -824,68 +173,73 @@
 
   bool isEmpty() const { return FactIDs.size() == 0; }
 
-  FactID addLock(FactManager& FM, const SExpr& M, const LockData& L) {
-    FactID F = FM.newLock(M, L);
+  // Return true if the set contains only negative facts
+  bool isEmpty(FactManager &FactMan) const {
+    for (FactID FID : *this) {
+      if (!FactMan[FID].negative())
+        return false;
+    }
+    return true;
+  }
+
+  void addLockByID(FactID ID) { FactIDs.push_back(ID); }
+
+  FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
+    FactID F = FM.newFact(std::move(Entry));
     FactIDs.push_back(F);
     return F;
   }
 
-  bool removeLock(FactManager& FM, const SExpr& M) {
+  bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
     unsigned n = FactIDs.size();
     if (n == 0)
       return false;
 
     for (unsigned i = 0; i < n-1; ++i) {
-      if (FM[FactIDs[i]].MutID.matches(M)) {
+      if (FM[FactIDs[i]].matches(CapE)) {
         FactIDs[i] = FactIDs[n-1];
         FactIDs.pop_back();
         return true;
       }
     }
-    if (FM[FactIDs[n-1]].MutID.matches(M)) {
+    if (FM[FactIDs[n-1]].matches(CapE)) {
       FactIDs.pop_back();
       return true;
     }
     return false;
   }
 
-  iterator findLockIter(FactManager &FM, const SExpr &M) {
+  iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
     return std::find_if(begin(), end(), [&](FactID ID) {
-      return FM[ID].MutID.matches(M);
+      return FM[ID].matches(CapE);
     });
   }
 
-  LockData *findLock(FactManager &FM, const SExpr &M) const {
+  FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
     auto I = std::find_if(begin(), end(), [&](FactID ID) {
-      return FM[ID].MutID.matches(M);
+      return FM[ID].matches(CapE);
     });
-
-    return I != end() ? &FM[*I].LDat : nullptr;
+    return I != end() ? &FM[*I] : nullptr;
   }
 
-  LockData *findLockUniv(FactManager &FM, const SExpr &M) const {
+  FactEntry *findLockUniv(FactManager &FM, const CapabilityExpr &CapE) const {
     auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
-      const SExpr &Expr = FM[ID].MutID;
-      return Expr.isUniversal() || Expr.matches(M);
+      return FM[ID].matchesUniv(CapE);
     });
-
-    return I != end() ? &FM[*I].LDat : nullptr;
+    return I != end() ? &FM[*I] : nullptr;
   }
 
-  FactEntry *findPartialMatch(FactManager &FM, const SExpr &M) const {
+  FactEntry *findPartialMatch(FactManager &FM,
+                              const CapabilityExpr &CapE) const {
     auto I = std::find_if(begin(), end(), [&](FactID ID) {
-      return FM[ID].MutID.partiallyMatches(M);
+      return FM[ID].partiallyMatches(CapE);
     });
-
     return I != end() ? &FM[*I] : nullptr;
   }
 };
 
-/// A Lockset maps each SExpr (defined above) to information about how it has
-/// been locked.
-typedef llvm::ImmutableMap<SExpr, LockData> Lockset;
-typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
 
+typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
 class LocalVariableMap;
 
 /// A side (entry or exit) of a CFG node.
@@ -1404,29 +758,130 @@
   }
 }
 
+class LockableFactEntry : public FactEntry {
+private:
+  bool Managed; ///<  managed by ScopedLockable object
+
+public:
+  LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
+                    bool Mng = false, bool Asrt = false)
+      : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {}
+
+  void
+  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+                                SourceLocation JoinLoc, LockErrorKind LEK,
+                                ThreadSafetyHandler &Handler) const override {
+    if (!Managed && !asserted() && !negative() && !isUniversal()) {
+      Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
+                                        LEK);
+    }
+  }
+
+  void handleUnlock(FactSet &FSet, FactManager &FactMan,
+                    const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+                    bool FullyRemove, ThreadSafetyHandler &Handler,
+                    StringRef DiagKind) const override {
+    FSet.removeLock(FactMan, Cp);
+    if (!Cp.negative()) {
+      FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+                                !Cp, LK_Exclusive, UnlockLoc));
+    }
+  }
+};
+
+class ScopedLockableFactEntry : public FactEntry {
+private:
+  SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
+
+public:
+  ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
+                          const CapExprSet &Excl, const CapExprSet &Shrd)
+      : FactEntry(CE, LK_Exclusive, Loc, false) {
+    for (const auto &M : Excl)
+      UnderlyingMutexes.push_back(M.sexpr());
+    for (const auto &M : Shrd)
+      UnderlyingMutexes.push_back(M.sexpr());
+  }
+
+  void
+  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+                                SourceLocation JoinLoc, LockErrorKind LEK,
+                                ThreadSafetyHandler &Handler) const override {
+    for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+      if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
+        // If this scoped lock manages another mutex, and if the underlying
+        // mutex is still held, then warn about the underlying mutex.
+        Handler.handleMutexHeldEndOfScope(
+            "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
+      }
+    }
+  }
+
+  void handleUnlock(FactSet &FSet, FactManager &FactMan,
+                    const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+                    bool FullyRemove, ThreadSafetyHandler &Handler,
+                    StringRef DiagKind) const override {
+    assert(!Cp.negative() && "Managing object cannot be negative.");
+    for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+      CapabilityExpr UnderCp(UnderlyingMutex, false);
+      auto UnderEntry = llvm::make_unique<LockableFactEntry>(
+          !UnderCp, LK_Exclusive, UnlockLoc);
+
+      if (FullyRemove) {
+        // We're destroying the managing object.
+        // Remove the underlying mutex if it exists; but don't warn.
+        if (FSet.findLock(FactMan, UnderCp)) {
+          FSet.removeLock(FactMan, UnderCp);
+          FSet.addLock(FactMan, std::move(UnderEntry));
+        }
+      } else {
+        // We're releasing the underlying mutex, but not destroying the
+        // managing object.  Warn on dual release.
+        if (!FSet.findLock(FactMan, UnderCp)) {
+          Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
+                                        UnlockLoc);
+        }
+        FSet.removeLock(FactMan, UnderCp);
+        FSet.addLock(FactMan, std::move(UnderEntry));
+      }
+    }
+    if (FullyRemove)
+      FSet.removeLock(FactMan, Cp);
+  }
+};
+
 /// \brief Class which implements the core thread safety analysis routines.
 class ThreadSafetyAnalyzer {
   friend class BuildLockset;
 
+  llvm::BumpPtrAllocator Bpa;
+  threadSafety::til::MemRegionRef Arena;
+  threadSafety::SExprBuilder SxBuilder;
+
   ThreadSafetyHandler       &Handler;
+  const CXXMethodDecl       *CurrentMethod;
   LocalVariableMap          LocalVarMap;
   FactManager               FactMan;
   std::vector<CFGBlockInfo> BlockInfo;
 
 public:
-  ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
+  ThreadSafetyAnalyzer(ThreadSafetyHandler &H)
+     : Arena(&Bpa), SxBuilder(Arena), Handler(H) {}
 
-  void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat,
-               StringRef DiagKind);
-  void removeLock(FactSet &FSet, const SExpr &Mutex, SourceLocation UnlockLoc,
-                  bool FullyRemove, LockKind Kind, StringRef DiagKind);
+  bool inCurrentScope(const CapabilityExpr &CapE);
+
+  void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
+               StringRef DiagKind, bool ReqAttr = false);
+  void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
+                  SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
+                  StringRef DiagKind);
 
   template <typename AttrType>
-  void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
+  void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
                    const NamedDecl *D, VarDecl *SelfDecl = nullptr);
 
   template <class AttrType>
-  void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
+  void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
                    const NamedDecl *D,
                    const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
                    Expr *BrE, bool Neg);
@@ -1530,94 +985,107 @@
   return "mutex";
 }
 
+
+inline bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
+  if (!CurrentMethod)
+      return false;
+  if (auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
+    auto *VD = P->clangDecl();
+    if (VD)
+      return VD->getDeclContext() == CurrentMethod->getDeclContext();
+  }
+  return false;
+}
+
+
 /// \brief Add a new lock to the lockset, warning if the lock is already there.
-/// \param Mutex -- the Mutex expression for the lock
-/// \param LDat  -- the LockData for the lock
-void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex,
-                                   const LockData &LDat, StringRef DiagKind) {
-  // FIXME: deal with acquired before/after annotations.
-  // FIXME: Don't always warn when we have support for reentrant locks.
-  if (Mutex.shouldIgnore())
+/// \param ReqAttr -- true if this is part of an initial Requires attribute.
+void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
+                                   std::unique_ptr<FactEntry> Entry,
+                                   StringRef DiagKind, bool ReqAttr) {
+  if (Entry->shouldIgnore())
     return;
 
-  if (FSet.findLock(FactMan, Mutex)) {
-    if (!LDat.Asserted)
-      Handler.handleDoubleLock(DiagKind, Mutex.toString(), LDat.AcquireLoc);
+  if (!ReqAttr && !Entry->negative()) {
+    // look for the negative capability, and remove it from the fact set.
+    CapabilityExpr NegC = !*Entry;
+    FactEntry *Nen = FSet.findLock(FactMan, NegC);
+    if (Nen) {
+      FSet.removeLock(FactMan, NegC);
+    }
+    else {
+      if (inCurrentScope(*Entry) && !Entry->asserted())
+        Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
+                                      NegC.toString(), Entry->loc());
+    }
+  }
+
+  // FIXME: deal with acquired before/after annotations.
+  // FIXME: Don't always warn when we have support for reentrant locks.
+  if (FSet.findLock(FactMan, *Entry)) {
+    if (!Entry->asserted())
+      Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc());
   } else {
-    FSet.addLock(FactMan, Mutex, LDat);
+    FSet.addLock(FactMan, std::move(Entry));
   }
 }
 
 
 /// \brief Remove a lock from the lockset, warning if the lock is not there.
-/// \param Mutex The lock expression corresponding to the lock to be removed
 /// \param UnlockLoc The source location of the unlock (only used in error msg)
-void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const SExpr &Mutex,
+void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
                                       SourceLocation UnlockLoc,
                                       bool FullyRemove, LockKind ReceivedKind,
                                       StringRef DiagKind) {
-  if (Mutex.shouldIgnore())
+  if (Cp.shouldIgnore())
     return;
 
-  const LockData *LDat = FSet.findLock(FactMan, Mutex);
+  const FactEntry *LDat = FSet.findLock(FactMan, Cp);
   if (!LDat) {
-    Handler.handleUnmatchedUnlock(DiagKind, Mutex.toString(), UnlockLoc);
+    Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
     return;
   }
 
   // Generic lock removal doesn't care about lock kind mismatches, but
   // otherwise diagnose when the lock kinds are mismatched.
-  if (ReceivedKind != LK_Generic && LDat->LKind != ReceivedKind) {
-    Handler.handleIncorrectUnlockKind(DiagKind, Mutex.toString(), LDat->LKind,
-                                      ReceivedKind, UnlockLoc);
-    return;
+  if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
+    Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(),
+                                      LDat->kind(), ReceivedKind, UnlockLoc);
   }
 
-  if (LDat->UnderlyingMutex.isValid()) {
-    // This is scoped lockable object, which manages the real mutex.
-    if (FullyRemove) {
-      // We're destroying the managing object.
-      // Remove the underlying mutex if it exists; but don't warn.
-      if (FSet.findLock(FactMan, LDat->UnderlyingMutex))
-        FSet.removeLock(FactMan, LDat->UnderlyingMutex);
-    } else {
-      // We're releasing the underlying mutex, but not destroying the
-      // managing object.  Warn on dual release.
-      if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) {
-        Handler.handleUnmatchedUnlock(
-            DiagKind, LDat->UnderlyingMutex.toString(), UnlockLoc);
-      }
-      FSet.removeLock(FactMan, LDat->UnderlyingMutex);
-      return;
-    }
-  }
-  FSet.removeLock(FactMan, Mutex);
+  LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
+                     DiagKind);
 }
 
 
 /// \brief Extract the list of mutexIDs from the attribute on an expression,
 /// and push them onto Mtxs, discarding any duplicates.
 template <typename AttrType>
-void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
+void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
                                        Expr *Exp, const NamedDecl *D,
                                        VarDecl *SelfDecl) {
   if (Attr->args_size() == 0) {
     // The mutex held is the "this" object.
-    SExpr Mu(nullptr, Exp, D, SelfDecl);
-    if (!Mu.isValid())
-      SExpr::warnInvalidLock(Handler, nullptr, Exp, D,
-                             ClassifyDiagnostic(Attr));
-    else
-      Mtxs.push_back_nodup(Mu);
+    CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
+    if (Cp.isInvalid()) {
+       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
+       return;
+    }
+    //else
+    if (!Cp.shouldIgnore())
+      Mtxs.push_back_nodup(Cp);
     return;
   }
 
   for (const auto *Arg : Attr->args()) {
-    SExpr Mu(Arg, Exp, D, SelfDecl);
-    if (!Mu.isValid())
-      SExpr::warnInvalidLock(Handler, Arg, Exp, D, ClassifyDiagnostic(Attr));
-    else
-      Mtxs.push_back_nodup(Mu);
+    CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
+    if (Cp.isInvalid()) {
+       warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
+       continue;
+    }
+    //else
+    if (!Cp.shouldIgnore())
+      Mtxs.push_back_nodup(Cp);
   }
 }
 
@@ -1626,7 +1094,7 @@
 /// trylock applies to the given edge, then push them onto Mtxs, discarding
 /// any duplicates.
 template <class AttrType>
-void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
+void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
                                        Expr *Exp, const NamedDecl *D,
                                        const CFGBlock *PredBlock,
                                        const CFGBlock *CurrBlock,
@@ -1758,8 +1226,8 @@
   if(!FunDecl || !FunDecl->hasAttrs())
     return;
 
-  MutexIDList ExclusiveLocksToAdd;
-  MutexIDList SharedLocksToAdd;
+  CapExprSet ExclusiveLocksToAdd;
+  CapExprSet SharedLocksToAdd;
 
   // If the condition is a call to a Trylock function, then grab the attributes
   for (auto *Attr : FunDecl->getAttrs()) {
@@ -1788,10 +1256,13 @@
   // Add and remove locks.
   SourceLocation Loc = Exp->getExprLoc();
   for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
-    addLock(Result, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive),
+    addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
+                                                         LK_Exclusive, Loc),
             CapDiagKind);
   for (const auto &SharedLockToAdd : SharedLocksToAdd)
-    addLock(Result, SharedLockToAdd, LockData(Loc, LK_Shared), CapDiagKind);
+    addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd,
+                                                         LK_Shared, Loc),
+            CapDiagKind);
 }
 
 /// \brief We use this class to visit different types of expressions in
@@ -1807,16 +1278,17 @@
   LocalVariableMap::Context LVarCtx;
   unsigned CtxIndex;
 
-  // Helper functions
-
+  // helper functions
   void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
                           Expr *MutexExp, ProtectedOperationKind POK,
-                          StringRef DiagKind);
+                          StringRef DiagKind, SourceLocation Loc);
   void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
                        StringRef DiagKind);
 
-  void checkAccess(const Expr *Exp, AccessKind AK);
-  void checkPtAccess(const Expr *Exp, AccessKind AK);
+  void checkAccess(const Expr *Exp, AccessKind AK,
+                   ProtectedOperationKind POK = POK_VarAccess);
+  void checkPtAccess(const Expr *Exp, AccessKind AK,
+                     ProtectedOperationKind POK = POK_VarAccess);
 
   void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
 
@@ -1837,62 +1309,87 @@
   void VisitDeclStmt(DeclStmt *S);
 };
 
+
 /// \brief Warn if the LSet does not contain a lock sufficient to protect access
 /// of at least the passed in AccessKind.
 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
                                       AccessKind AK, Expr *MutexExp,
                                       ProtectedOperationKind POK,
-                                      StringRef DiagKind) {
+                                      StringRef DiagKind, SourceLocation Loc) {
   LockKind LK = getLockKindFromAccessKind(AK);
 
-  SExpr Mutex(MutexExp, Exp, D);
-  if (!Mutex.isValid()) {
-    SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind);
+  CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
+  if (Cp.isInvalid()) {
+    warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
     return;
-  } else if (Mutex.shouldIgnore()) {
+  } else if (Cp.shouldIgnore()) {
     return;
   }
 
-  LockData* LDat = FSet.findLockUniv(Analyzer->FactMan, Mutex);
+  if (Cp.negative()) {
+    // Negative capabilities act like locks excluded
+    FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
+    if (LDat) {
+      Analyzer->Handler.handleFunExcludesLock(
+          DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
+      return;
+    }
+
+    // If this does not refer to a negative capability in the same class,
+    // then stop here.
+    if (!Analyzer->inCurrentScope(Cp))
+      return;
+
+    // Otherwise the negative requirement must be propagated to the caller.
+    LDat = FSet.findLock(Analyzer->FactMan, Cp);
+    if (!LDat) {
+      Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(),
+                                           LK_Shared, Loc);
+    }
+    return;
+  }
+
+  FactEntry* LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
   bool NoError = true;
   if (!LDat) {
     // No exact match found.  Look for a partial match.
-    FactEntry* FEntry = FSet.findPartialMatch(Analyzer->FactMan, Mutex);
-    if (FEntry) {
+    LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp);
+    if (LDat) {
       // Warn that there's no precise match.
-      LDat = &FEntry->LDat;
-      std::string PartMatchStr = FEntry->MutID.toString();
+      std::string PartMatchStr = LDat->toString();
       StringRef   PartMatchName(PartMatchStr);
-      Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(),
-                                           LK, Exp->getExprLoc(),
-                                           &PartMatchName);
+      Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+                                           LK, Loc, &PartMatchName);
     } else {
       // Warn that there's no match at all.
-      Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(),
-                                           LK, Exp->getExprLoc());
+      Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+                                           LK, Loc);
     }
     NoError = false;
   }
   // Make sure the mutex we found is the right kind.
-  if (NoError && LDat && !LDat->isAtLeast(LK))
-    Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(), LK,
-                                         Exp->getExprLoc());
+  if (NoError && LDat && !LDat->isAtLeast(LK)) {
+    Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+                                         LK, Loc);
+  }
 }
 
 /// \brief Warn if the LSet contains the given lock.
 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
-                                   Expr *MutexExp,
-                                   StringRef DiagKind) {
-  SExpr Mutex(MutexExp, Exp, D);
-  if (!Mutex.isValid()) {
-    SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind);
+                                   Expr *MutexExp, StringRef DiagKind) {
+  CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
+  if (Cp.isInvalid()) {
+    warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
+    return;
+  } else if (Cp.shouldIgnore()) {
     return;
   }
 
-  LockData* LDat = FSet.findLock(Analyzer->FactMan, Mutex);
-  if (LDat)
+  FactEntry* LDat = FSet.findLock(Analyzer->FactMan, Cp);
+  if (LDat) {
     Analyzer->Handler.handleFunExcludesLock(
-        DiagKind, D->getNameAsString(), Mutex.toString(), Exp->getExprLoc());
+        DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
+  }
 }
 
 /// \brief Checks guarded_by and pt_guarded_by attributes.
@@ -1900,43 +1397,62 @@
 /// marked with guarded_by, we must ensure the appropriate mutexes are held.
 /// Similarly, we check if the access is to an expression that dereferences
 /// a pointer marked with pt_guarded_by.
-void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK) {
+void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
+                               ProtectedOperationKind POK) {
   Exp = Exp->IgnoreParenCasts();
 
+  SourceLocation Loc = Exp->getExprLoc();
+
+  // Local variables of reference type cannot be re-assigned;
+  // map them to their initializer.
+  while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
+    const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
+    if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
+      if (const auto *E = VD->getInit()) {
+        Exp = E;
+        continue;
+      }
+    }
+    break;
+  }
+
   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) {
     // For dereferences
     if (UO->getOpcode() == clang::UO_Deref)
-      checkPtAccess(UO->getSubExpr(), AK);
+      checkPtAccess(UO->getSubExpr(), AK, POK);
     return;
   }
 
   if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
-    checkPtAccess(AE->getLHS(), AK);
+    checkPtAccess(AE->getLHS(), AK, POK);
     return;
   }
 
   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
     if (ME->isArrow())
-      checkPtAccess(ME->getBase(), AK);
+      checkPtAccess(ME->getBase(), AK, POK);
     else
-      checkAccess(ME->getBase(), AK);
+      checkAccess(ME->getBase(), AK, POK);
   }
 
   const ValueDecl *D = getValueDecl(Exp);
   if (!D || !D->hasAttrs())
     return;
 
-  if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty())
-    Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarAccess, AK,
-                                        Exp->getExprLoc());
+  if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) {
+    Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc);
+  }
 
   for (const auto *I : D->specific_attrs<GuardedByAttr>())
-    warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarAccess,
-                       ClassifyDiagnostic(I));
+    warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK,
+                       ClassifyDiagnostic(I), Loc);
 }
 
+
 /// \brief Checks pt_guarded_by and pt_guarded_var attributes.
-void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) {
+/// POK is the same  operationKind that was passed to checkAccess.
+void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
+                                 ProtectedOperationKind POK) {
   while (true) {
     if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
       Exp = PE->getSubExpr();
@@ -1946,7 +1462,7 @@
       if (CE->getCastKind() == CK_ArrayToPointerDecay) {
         // If it's an actual array, and not a pointer, then it's elements
         // are protected by GUARDED_BY, not PT_GUARDED_BY;
-        checkAccess(CE->getSubExpr(), AK);
+        checkAccess(CE->getSubExpr(), AK, POK);
         return;
       }
       Exp = CE->getSubExpr();
@@ -1955,17 +1471,21 @@
     break;
   }
 
+  // Pass by reference warnings are under a different flag.
+  ProtectedOperationKind PtPOK = POK_VarDereference;
+  if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef;
+
   const ValueDecl *D = getValueDecl(Exp);
   if (!D || !D->hasAttrs())
     return;
 
-  if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty())
-    Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarDereference, AK,
+  if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan))
+    Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK,
                                         Exp->getExprLoc());
 
   for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
-    warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarDereference,
-                       ClassifyDiagnostic(I));
+    warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK,
+                       ClassifyDiagnostic(I), Exp->getExprLoc());
 }
 
 /// \brief Process a function call, method call, constructor call,
@@ -1981,8 +1501,8 @@
 void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
   SourceLocation Loc = Exp->getExprLoc();
   const AttrVec &ArgAttrs = D->getAttrs();
-  MutexIDList ExclusiveLocksToAdd, SharedLocksToAdd;
-  MutexIDList ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
+  CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
+  CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
   StringRef CapDiagKind = "mutex";
 
   for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
@@ -2006,22 +1526,23 @@
       case attr::AssertExclusiveLock: {
         AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At);
 
-        MutexIDList AssertLocks;
+        CapExprSet AssertLocks;
         Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
         for (const auto &AssertLock : AssertLocks)
-          Analyzer->addLock(FSet, AssertLock,
-                            LockData(Loc, LK_Exclusive, false, true),
+          Analyzer->addLock(FSet,
+                            llvm::make_unique<LockableFactEntry>(
+                                AssertLock, LK_Exclusive, Loc, false, true),
                             ClassifyDiagnostic(A));
         break;
       }
       case attr::AssertSharedLock: {
         AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At);
 
-        MutexIDList AssertLocks;
+        CapExprSet AssertLocks;
         Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
         for (const auto &AssertLock : AssertLocks)
-          Analyzer->addLock(FSet, AssertLock,
-                            LockData(Loc, LK_Shared, false, true),
+          Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+                                      AssertLock, LK_Shared, Loc, false, true),
                             ClassifyDiagnostic(A));
         break;
       }
@@ -2045,7 +1566,8 @@
         RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At);
         for (auto *Arg : A->args())
           warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
-                             POK_FunctionCall, ClassifyDiagnostic(A));
+                             POK_FunctionCall, ClassifyDiagnostic(A),
+                             Exp->getExprLoc());
         break;
       }
 
@@ -2074,25 +1596,28 @@
 
   // Add locks.
   for (const auto &M : ExclusiveLocksToAdd)
-    Analyzer->addLock(FSet, M, LockData(Loc, LK_Exclusive, isScopedVar),
+    Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+                                M, LK_Exclusive, Loc, isScopedVar),
                       CapDiagKind);
   for (const auto &M : SharedLocksToAdd)
-    Analyzer->addLock(FSet, M, LockData(Loc, LK_Shared, isScopedVar),
+    Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+                                M, LK_Shared, Loc, isScopedVar),
                       CapDiagKind);
 
-  // Add the managing object as a dummy mutex, mapped to the underlying mutex.
-  // FIXME -- this doesn't work if we acquire multiple locks.
   if (isScopedVar) {
+    // Add the managing object as a dummy mutex, mapped to the underlying mutex.
     SourceLocation MLoc = VD->getLocation();
     DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
-    SExpr SMutex(&DRE, nullptr, nullptr);
+    // FIXME: does this store a pointer to DRE?
+    CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
 
-    for (const auto &M : ExclusiveLocksToAdd)
-      Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive, M),
-                        CapDiagKind);
-    for (const auto &M : SharedLocksToAdd)
-      Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared, M),
-                        CapDiagKind);
+    CapExprSet UnderlyingMutexes(ExclusiveLocksToAdd);
+    std::copy(SharedLocksToAdd.begin(), SharedLocksToAdd.end(),
+              std::back_inserter(UnderlyingMutexes));
+    Analyzer->addLock(FSet,
+                      llvm::make_unique<ScopedLockableFactEntry>(
+                          Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
+                      CapDiagKind);
   }
 
   // Remove locks.
@@ -2149,6 +1674,9 @@
 
 
 void BuildLockset::VisitCallExpr(CallExpr *Exp) {
+  bool ExamineArgs = true;
+  bool OperatorFun = false;
+
   if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
     MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee());
     // ME can be null when calling a method pointer
@@ -2169,8 +1697,12 @@
       }
     }
   } else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
-    switch (OE->getOperator()) {
+    OperatorFun = true;
+
+    auto OEop = OE->getOperator();
+    switch (OEop) {
       case OO_Equal: {
+        ExamineArgs = false;
         const Expr *Target = OE->getArg(0);
         const Expr *Source = OE->getArg(1);
         checkAccess(Target, AK_Written);
@@ -2182,16 +1714,53 @@
       case OO_Subscript: {
         const Expr *Obj = OE->getArg(0);
         checkAccess(Obj, AK_Read);
-        checkPtAccess(Obj, AK_Read);
+        if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
+          // Grrr.  operator* can be multiplication...
+          checkPtAccess(Obj, AK_Read);
+        }
         break;
       }
       default: {
+        // TODO: get rid of this, and rely on pass-by-ref instead.
         const Expr *Obj = OE->getArg(0);
         checkAccess(Obj, AK_Read);
         break;
       }
     }
   }
+
+
+  if (ExamineArgs) {
+    if (FunctionDecl *FD = Exp->getDirectCallee()) {
+      unsigned Fn = FD->getNumParams();
+      unsigned Cn = Exp->getNumArgs();
+      unsigned Skip = 0;
+
+      unsigned i = 0;
+      if (OperatorFun) {
+        if (isa<CXXMethodDecl>(FD)) {
+          // First arg in operator call is implicit self argument,
+          // and doesn't appear in the FunctionDecl.
+          Skip = 1;
+          Cn--;
+        } else {
+          // Ignore the first argument of operators; it's been checked above.
+          i = 1;
+        }
+      }
+      // Ignore default arguments
+      unsigned n = (Fn < Cn) ? Fn : Cn;
+
+      for (; i < n; ++i) {
+        ParmVarDecl* Pvd = FD->getParamDecl(i);
+        Expr* Arg = Exp->getArg(i+Skip);
+        QualType Qt = Pvd->getType();
+        if (Qt->isReferenceType())
+          checkAccess(Arg, AK_Read, POK_PassByRef);
+      }
+    }
+  }
+
   NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
   if(!D || !D->hasAttrs())
     return;
@@ -2254,62 +1823,40 @@
 
   // Find locks in FSet2 that conflict or are not in FSet1, and warn.
   for (const auto &Fact : FSet2) {
-    const SExpr &FSet2Mutex = FactMan[Fact].MutID;
-    const LockData &LDat2 = FactMan[Fact].LDat;
-    FactSet::iterator I1 = FSet1.findLockIter(FactMan, FSet2Mutex);
+    const FactEntry *LDat1 = nullptr;
+    const FactEntry *LDat2 = &FactMan[Fact];
+    FactSet::iterator Iter1  = FSet1.findLockIter(FactMan, *LDat2);
+    if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1];
 
-    if (I1 != FSet1.end()) {
-      const LockData* LDat1 = &FactMan[*I1].LDat;
-      if (LDat1->LKind != LDat2.LKind) {
-        Handler.handleExclusiveAndShared("mutex", FSet2Mutex.toString(),
-                                         LDat2.AcquireLoc, LDat1->AcquireLoc);
-        if (Modify && LDat1->LKind != LK_Exclusive) {
+    if (LDat1) {
+      if (LDat1->kind() != LDat2->kind()) {
+        Handler.handleExclusiveAndShared("mutex", LDat2->toString(),
+                                         LDat2->loc(), LDat1->loc());
+        if (Modify && LDat1->kind() != LK_Exclusive) {
           // Take the exclusive lock, which is the one in FSet2.
-          *I1 = Fact;
+          *Iter1 = Fact;
         }
       }
-      else if (LDat1->Asserted && !LDat2.Asserted) {
+      else if (Modify && LDat1->asserted() && !LDat2->asserted()) {
         // The non-asserted lock in FSet2 is the one we want to track.
-        *I1 = Fact;
+        *Iter1 = Fact;
       }
     } else {
-      if (LDat2.UnderlyingMutex.isValid()) {
-        if (FSet2.findLock(FactMan, LDat2.UnderlyingMutex)) {
-          // If this is a scoped lock that manages another mutex, and if the
-          // underlying mutex is still held, then warn about the underlying
-          // mutex.
-          Handler.handleMutexHeldEndOfScope("mutex",
-                                            LDat2.UnderlyingMutex.toString(),
-                                            LDat2.AcquireLoc, JoinLoc, LEK1);
-        }
-      }
-      else if (!LDat2.Managed && !FSet2Mutex.isUniversal() && !LDat2.Asserted)
-        Handler.handleMutexHeldEndOfScope("mutex", FSet2Mutex.toString(),
-                                          LDat2.AcquireLoc, JoinLoc, LEK1);
+      LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1,
+                                           Handler);
     }
   }
 
   // Find locks in FSet1 that are not in FSet2, and remove them.
   for (const auto &Fact : FSet1Orig) {
-    const SExpr &FSet1Mutex = FactMan[Fact].MutID;
-    const LockData &LDat1 = FactMan[Fact].LDat;
+    const FactEntry *LDat1 = &FactMan[Fact];
+    const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
 
-    if (!FSet2.findLock(FactMan, FSet1Mutex)) {
-      if (LDat1.UnderlyingMutex.isValid()) {
-        if (FSet1Orig.findLock(FactMan, LDat1.UnderlyingMutex)) {
-          // If this is a scoped lock that manages another mutex, and if the
-          // underlying mutex is still held, then warn about the underlying
-          // mutex.
-          Handler.handleMutexHeldEndOfScope("mutex",
-                                            LDat1.UnderlyingMutex.toString(),
-                                            LDat1.AcquireLoc, JoinLoc, LEK1);
-        }
-      }
-      else if (!LDat1.Managed && !FSet1Mutex.isUniversal() && !LDat1.Asserted)
-        Handler.handleMutexHeldEndOfScope("mutex", FSet1Mutex.toString(),
-                                          LDat1.AcquireLoc, JoinLoc, LEK2);
+    if (!LDat2) {
+      LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2,
+                                           Handler);
       if (Modify)
-        FSet1.removeLock(FactMan, FSet1Mutex);
+        FSet1.removeLock(FactMan, *LDat1);
     }
   }
 }
@@ -2348,6 +1895,8 @@
 
   CFG *CFGraph = walker.getGraph();
   const NamedDecl *D = walker.getDecl();
+  const FunctionDecl *CurrentFunction = dyn_cast<FunctionDecl>(D);
+  CurrentMethod = dyn_cast<CXXMethodDecl>(D);
 
   if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
     return;
@@ -2361,6 +1910,8 @@
   if (isa<CXXDestructorDecl>(D))
     return;  // Don't check inside destructors.
 
+  Handler.enterFunction(CurrentFunction);
+
   BlockInfo.resize(CFGraph->getNumBlockIDs(),
     CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
 
@@ -2379,9 +1930,9 @@
   // Fill in source locations for all CFGBlocks.
   findBlockLocations(CFGraph, SortedGraph, BlockInfo);
 
-  MutexIDList ExclusiveLocksAcquired;
-  MutexIDList SharedLocksAcquired;
-  MutexIDList LocksReleased;
+  CapExprSet ExclusiveLocksAcquired;
+  CapExprSet SharedLocksAcquired;
+  CapExprSet LocksReleased;
 
   // Add locks from exclusive_locks_required and shared_locks_required
   // to initial lockset. Also turn off checking for lock and unlock functions.
@@ -2391,8 +1942,8 @@
     FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
     const AttrVec &ArgAttrs = D->getAttrs();
 
-    MutexIDList ExclusiveLocksToAdd;
-    MutexIDList SharedLocksToAdd;
+    CapExprSet ExclusiveLocksToAdd;
+    CapExprSet SharedLocksToAdd;
     StringRef CapDiagKind = "mutex";
 
     SourceLocation Loc = D->getLocation();
@@ -2428,12 +1979,14 @@
     }
 
     // FIXME -- Loc can be wrong here.
-    for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
-      addLock(InitialLockset, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive),
-              CapDiagKind);
-    for (const auto &SharedLockToAdd : SharedLocksToAdd)
-      addLock(InitialLockset, SharedLockToAdd, LockData(Loc, LK_Shared),
-              CapDiagKind);
+    for (const auto &Mu : ExclusiveLocksToAdd)
+      addLock(InitialLockset,
+              llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc),
+              CapDiagKind, true);
+    for (const auto &Mu : SharedLocksToAdd)
+      addLock(InitialLockset,
+              llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc),
+              CapDiagKind, true);
   }
 
   for (const auto *CurrBlock : *SortedGraph) {
@@ -2602,11 +2155,11 @@
   // issue the appropriate warning.
   // FIXME: the location here is not quite right.
   for (const auto &Lock : ExclusiveLocksAcquired)
-    ExpectedExitSet.addLock(FactMan, Lock,
-                            LockData(D->getLocation(), LK_Exclusive));
+    ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+                                         Lock, LK_Exclusive, D->getLocation()));
   for (const auto &Lock : SharedLocksAcquired)
-    ExpectedExitSet.addLock(FactMan, Lock,
-                            LockData(D->getLocation(), LK_Shared));
+    ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+                                         Lock, LK_Shared, D->getLocation()));
   for (const auto &Lock : LocksReleased)
     ExpectedExitSet.removeLock(FactMan, Lock);
 
@@ -2616,13 +2169,10 @@
                    LEK_LockedAtEndOfFunction,
                    LEK_NotLockedAtEndOfFunction,
                    false);
+
+  Handler.leaveFunction(CurrentFunction);
 }
 
-} // end anonymous namespace
-
-
-namespace clang {
-namespace thread_safety {
 
 /// \brief Check a function's CFG for thread-safety violations.
 ///
@@ -2647,4 +2197,4 @@
   llvm_unreachable("Unknown AccessKind");
 }
 
-}} // end namespace clang::thread_safety
+}} // end namespace clang::threadSafety
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index da88b78..88a1cbf 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -63,11 +63,9 @@
 namespace til {
 
 // Return true if E is a variable that points to an incomplete Phi node.
-static bool isIncompleteVar(const SExpr *E) {
-  if (const auto *V = dyn_cast<Variable>(E)) {
-    if (const auto *Ph = dyn_cast<Phi>(V->definition()))
-      return Ph->status() == Phi::PH_Incomplete;
-  }
+static bool isIncompletePhi(const SExpr *E) {
+  if (const auto *Ph = dyn_cast<Phi>(E))
+    return Ph->status() == Phi::PH_Incomplete;
   return false;
 }
 
@@ -91,6 +89,124 @@
 }
 
 
+
+inline bool isCalleeArrow(const Expr *E) {
+  const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
+  return ME ? ME->isArrow() : false;
+}
+
+
+/// \brief Translate a clang expression in an attribute to a til::SExpr.
+/// Constructs the context from D, DeclExp, and SelfDecl.
+///
+/// \param AttrExp The expression to translate.
+/// \param D       The declaration to which the attribute is attached.
+/// \param DeclExp An expression involving the Decl to which the attribute
+///                is attached.  E.g. the call to a function.
+CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
+                                               const NamedDecl *D,
+                                               const Expr *DeclExp,
+                                               VarDecl *SelfDecl) {
+  // If we are processing a raw attribute expression, with no substitutions.
+  if (!DeclExp)
+    return translateAttrExpr(AttrExp, nullptr);
+
+  CallingContext Ctx(nullptr, D);
+
+  // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
+  // for formal parameters when we call buildMutexID later.
+  if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
+    Ctx.SelfArg   = ME->getBase();
+    Ctx.SelfArrow = ME->isArrow();
+  } else if (const CXXMemberCallExpr *CE =
+             dyn_cast<CXXMemberCallExpr>(DeclExp)) {
+    Ctx.SelfArg   = CE->getImplicitObjectArgument();
+    Ctx.SelfArrow = isCalleeArrow(CE->getCallee());
+    Ctx.NumArgs   = CE->getNumArgs();
+    Ctx.FunArgs   = CE->getArgs();
+  } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
+    Ctx.NumArgs = CE->getNumArgs();
+    Ctx.FunArgs = CE->getArgs();
+  } else if (const CXXConstructExpr *CE =
+             dyn_cast<CXXConstructExpr>(DeclExp)) {
+    Ctx.SelfArg = nullptr;  // Will be set below
+    Ctx.NumArgs = CE->getNumArgs();
+    Ctx.FunArgs = CE->getArgs();
+  } else if (D && isa<CXXDestructorDecl>(D)) {
+    // There's no such thing as a "destructor call" in the AST.
+    Ctx.SelfArg = DeclExp;
+  }
+
+  // Hack to handle constructors, where self cannot be recovered from
+  // the expression.
+  if (SelfDecl && !Ctx.SelfArg) {
+    DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
+                        SelfDecl->getLocation());
+    Ctx.SelfArg = &SelfDRE;
+
+    // If the attribute has no arguments, then assume the argument is "this".
+    if (!AttrExp)
+      return translateAttrExpr(Ctx.SelfArg, nullptr);
+    else  // For most attributes.
+      return translateAttrExpr(AttrExp, &Ctx);
+  }
+
+  // If the attribute has no arguments, then assume the argument is "this".
+  if (!AttrExp)
+    return translateAttrExpr(Ctx.SelfArg, nullptr);
+  else  // For most attributes.
+    return translateAttrExpr(AttrExp, &Ctx);
+}
+
+
+/// \brief Translate a clang expression in an attribute to a til::SExpr.
+// This assumes a CallingContext has already been created.
+CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
+                                               CallingContext *Ctx) {
+  if (!AttrExp)
+    return CapabilityExpr(nullptr, false);
+
+  if (auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
+    if (SLit->getString() == StringRef("*"))
+      // The "*" expr is a universal lock, which essentially turns off
+      // checks until it is removed from the lockset.
+      return CapabilityExpr(new (Arena) til::Wildcard(), false);
+    else
+      // Ignore other string literals for now.
+      return CapabilityExpr(nullptr, false);
+  }
+
+  bool Neg = false;
+  if (auto *OE = dyn_cast<CXXOperatorCallExpr>(AttrExp)) {
+    if (OE->getOperator() == OO_Exclaim) {
+      Neg = true;
+      AttrExp = OE->getArg(0);
+    }
+  }
+  else if (auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
+    if (UO->getOpcode() == UO_LNot) {
+      Neg = true;
+      AttrExp = UO->getSubExpr();
+    }
+  }
+
+  til::SExpr *E = translate(AttrExp, Ctx);
+
+  // Trap mutex expressions like nullptr, or 0.
+  // Any literal value is nonsense.
+  if (!E || isa<til::Literal>(E))
+    return CapabilityExpr(nullptr, false);
+
+  // Hack to deal with smart pointers -- strip off top-level pointer casts.
+  if (auto *CE = dyn_cast_or_null<til::Cast>(E)) {
+    if (CE->castOpcode() == til::CAST_objToPtr)
+      return CapabilityExpr(CE->expr(), Neg);
+  }
+  return CapabilityExpr(E, Neg);
+}
+
+
+
 // Translate a clang statement or expression to a TIL expression.
 // Also performs substitution of variables; Ctx provides the context.
 // Dispatches on the type of S.
@@ -125,9 +241,10 @@
   case Stmt::ArraySubscriptExprClass:
     return translateArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Ctx);
   case Stmt::ConditionalOperatorClass:
-    return translateConditionalOperator(cast<ConditionalOperator>(S), Ctx);
+    return translateAbstractConditionalOperator(
+             cast<ConditionalOperator>(S), Ctx);
   case Stmt::BinaryConditionalOperatorClass:
-    return translateBinaryConditionalOperator(
+    return translateAbstractConditionalOperator(
              cast<BinaryConditionalOperator>(S), Ctx);
 
   // We treat these as no-ops
@@ -162,6 +279,7 @@
 }
 
 
+
 til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
                                                CallingContext *Ctx) {
   const ValueDecl *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
@@ -197,17 +315,75 @@
 }
 
 
+const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
+  if (auto *V = dyn_cast<til::Variable>(E))
+    return V->clangDecl();
+  if (auto *Ph = dyn_cast<til::Phi>(E))
+    return Ph->clangDecl();
+  if (auto *P = dyn_cast<til::Project>(E))
+    return P->clangDecl();
+  if (auto *L = dyn_cast<til::LiteralPtr>(E))
+    return L->clangDecl();
+  return 0;
+}
+
+bool hasCppPointerType(const til::SExpr *E) {
+  auto *VD = getValueDeclFromSExpr(E);
+  if (VD && VD->getType()->isPointerType())
+    return true;
+  if (auto *C = dyn_cast<til::Cast>(E))
+    return C->castOpcode() == til::CAST_objToPtr;
+
+  return false;
+}
+
+
+// Grab the very first declaration of virtual method D
+const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) {
+  while (true) {
+    D = D->getCanonicalDecl();
+    CXXMethodDecl::method_iterator I = D->begin_overridden_methods(),
+                                   E = D->end_overridden_methods();
+    if (I == E)
+      return D;  // Method does not override anything
+    D = *I;      // FIXME: this does not work with multiple inheritance.
+  }
+  return nullptr;
+}
+
 til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
                                               CallingContext *Ctx) {
-  til::SExpr *E = translate(ME->getBase(), Ctx);
-  E = new (Arena) til::SApply(E);
-  return new (Arena) til::Project(E, ME->getMemberDecl());
+  til::SExpr *BE = translate(ME->getBase(), Ctx);
+  til::SExpr *E  = new (Arena) til::SApply(BE);
+
+  const ValueDecl *D = ME->getMemberDecl();
+  if (auto *VD = dyn_cast<CXXMethodDecl>(D))
+    D = getFirstVirtualDecl(VD);
+
+  til::Project *P = new (Arena) til::Project(E, D);
+  if (hasCppPointerType(BE))
+    P->setArrow(true);
+  return P;
 }
 
 
 til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
-                                            CallingContext *Ctx) {
-  // TODO -- Lock returned
+                                            CallingContext *Ctx,
+                                            const Expr *SelfE) {
+  if (CapabilityExprMode) {
+    // Handle LOCK_RETURNED
+    const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl();
+    if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
+      CallingContext LRCallCtx(Ctx);
+      LRCallCtx.AttrDecl = CE->getDirectCallee();
+      LRCallCtx.SelfArg  = SelfE;
+      LRCallCtx.NumArgs  = CE->getNumArgs();
+      LRCallCtx.FunArgs  = CE->getArgs();
+      return const_cast<til::SExpr*>(
+          translateAttrExpr(At->getArg(), &LRCallCtx).sexpr());
+    }
+  }
+
   til::SExpr *E = translate(CE->getCallee(), Ctx);
   for (const auto *Arg : CE->arguments()) {
     til::SExpr *A = translate(Arg, Ctx);
@@ -219,12 +395,31 @@
 
 til::SExpr *SExprBuilder::translateCXXMemberCallExpr(
     const CXXMemberCallExpr *ME, CallingContext *Ctx) {
-  return translateCallExpr(cast<CallExpr>(ME), Ctx);
+  if (CapabilityExprMode) {
+    // Ignore calls to get() on smart pointers.
+    if (ME->getMethodDecl()->getNameAsString() == "get" &&
+        ME->getNumArgs() == 0) {
+      auto *E = translate(ME->getImplicitObjectArgument(), Ctx);
+      return new (Arena) til::Cast(til::CAST_objToPtr, E);
+      // return E;
+    }
+  }
+  return translateCallExpr(cast<CallExpr>(ME), Ctx,
+                           ME->getImplicitObjectArgument());
 }
 
 
 til::SExpr *SExprBuilder::translateCXXOperatorCallExpr(
     const CXXOperatorCallExpr *OCE, CallingContext *Ctx) {
+  if (CapabilityExprMode) {
+    // Ignore operator * and operator -> on smart pointers.
+    OverloadedOperatorKind k = OCE->getOperator();
+    if (k == OO_Star || k == OO_Arrow) {
+      auto *E = translate(OCE->getArg(0), Ctx);
+      return new (Arena) til::Cast(til::CAST_objToPtr, E);
+      // return E;
+    }
+  }
   return translateCallExpr(cast<CallExpr>(OCE), Ctx);
 }
 
@@ -238,8 +433,23 @@
   case UO_PreDec:
     return new (Arena) til::Undefined(UO);
 
+  case UO_AddrOf: {
+    if (CapabilityExprMode) {
+      // interpret &Graph::mu_ as an existential.
+      if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr())) {
+        if (DRE->getDecl()->isCXXInstanceMember()) {
+          // This is a pointer-to-member expression, e.g. &MyClass::mu_.
+          // We interpret this syntax specially, as a wildcard.
+          auto *W = new (Arena) til::Wildcard();
+          return new (Arena) til::Project(W, DRE->getDecl());
+        }
+      }
+    }
+    // otherwise, & is a no-op
+    return translate(UO->getSubExpr(), Ctx);
+  }
+
   // We treat these as no-ops
-  case UO_AddrOf:
   case UO_Deref:
   case UO_Plus:
     return translate(UO->getSubExpr(), Ctx);
@@ -360,7 +570,9 @@
         return E0;
     }
     til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
-    return new (Arena) til::Load(E0);
+    return E0;
+    // FIXME!! -- get Load working properly
+    // return new (Arena) til::Load(E0);
   }
   case CK_NoOp:
   case CK_DerivedToBase:
@@ -373,6 +585,8 @@
   default: {
     // FIXME: handle different kinds of casts.
     til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
+    if (CapabilityExprMode)
+      return E0;
     return new (Arena) til::Cast(til::CAST_none, E0);
   }
   }
@@ -389,15 +603,12 @@
 
 
 til::SExpr *
-SExprBuilder::translateConditionalOperator(const ConditionalOperator *C,
-                                           CallingContext *Ctx) {
-  return new (Arena) til::Undefined(C);
-}
-
-
-til::SExpr *SExprBuilder::translateBinaryConditionalOperator(
-    const BinaryConditionalOperator *C, CallingContext *Ctx) {
-  return new (Arena) til::Undefined(C);
+SExprBuilder::translateAbstractConditionalOperator(
+    const AbstractConditionalOperator *CO, CallingContext *Ctx) {
+  auto *C = translate(CO->getCond(), Ctx);
+  auto *T = translate(CO->getTrueExpr(), Ctx);
+  auto *E = translate(CO->getFalseExpr(), Ctx);
+  return new (Arena) til::IfThenElse(C, T, E);
 }
 
 
@@ -430,16 +641,14 @@
 // If E is trivial returns E.
 til::SExpr *SExprBuilder::addStatement(til::SExpr* E, const Stmt *S,
                                        const ValueDecl *VD) {
-  if (!E)
-    return nullptr;
-  if (til::ThreadSafetyTIL::isTrivial(E))
+  if (!E || !CurrentBB || E->block() || til::ThreadSafetyTIL::isTrivial(E))
     return E;
-
-  til::Variable *V = new (Arena) til::Variable(E, VD);
-  CurrentInstructions.push_back(V);
+  if (VD)
+    E = new (Arena) til::Variable(E, VD);
+  CurrentInstructions.push_back(E);
   if (S)
-    insertStmt(S, V);
-  return V;
+    insertStmt(S, E);
+  return E;
 }
 
 
@@ -496,11 +705,11 @@
   unsigned ArgIndex = CurrentBlockInfo->ProcessedPredecessors;
   assert(ArgIndex > 0 && ArgIndex < NPreds);
 
-  til::Variable *V = dyn_cast<til::Variable>(CurrentLVarMap[i].second);
-  if (V && V->getBlockID() == CurrentBB->blockID()) {
+  til::SExpr *CurrE = CurrentLVarMap[i].second;
+  if (CurrE->block() == CurrentBB) {
     // We already have a Phi node in the current block,
     // so just add the new variable to the Phi node.
-    til::Phi *Ph = dyn_cast<til::Phi>(V->definition());
+    til::Phi *Ph = dyn_cast<til::Phi>(CurrE);
     assert(Ph && "Expecting Phi node.");
     if (E)
       Ph->values()[ArgIndex] = E;
@@ -509,27 +718,26 @@
 
   // Make a new phi node: phi(..., E)
   // All phi args up to the current index are set to the current value.
-  til::SExpr *CurrE = CurrentLVarMap[i].second;
   til::Phi *Ph = new (Arena) til::Phi(Arena, NPreds);
   Ph->values().setValues(NPreds, nullptr);
   for (unsigned PIdx = 0; PIdx < ArgIndex; ++PIdx)
     Ph->values()[PIdx] = CurrE;
   if (E)
     Ph->values()[ArgIndex] = E;
+  Ph->setClangDecl(CurrentLVarMap[i].first);
   // If E is from a back-edge, or either E or CurrE are incomplete, then
   // mark this node as incomplete; we may need to remove it later.
-  if (!E || isIncompleteVar(E) || isIncompleteVar(CurrE)) {
+  if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE)) {
     Ph->setStatus(til::Phi::PH_Incomplete);
   }
 
   // Add Phi node to current block, and update CurrentLVarMap[i]
-  auto *Var = new (Arena) til::Variable(Ph, CurrentLVarMap[i].first);
-  CurrentArguments.push_back(Var);
+  CurrentArguments.push_back(Ph);
   if (Ph->status() == til::Phi::PH_Incomplete)
-    IncompleteArgs.push_back(Var);
+    IncompleteArgs.push_back(Ph);
 
   CurrentLVarMap.makeWritable();
-  CurrentLVarMap.elem(i).second = Var;
+  CurrentLVarMap.elem(i).second = Ph;
 }
 
 
@@ -603,15 +811,13 @@
   unsigned ArgIndex = BBInfo[Blk->getBlockID()].ProcessedPredecessors;
   assert(ArgIndex > 0 && ArgIndex < BB->numPredecessors());
 
-  for (til::Variable *V : BB->arguments()) {
-    til::Phi *Ph = dyn_cast_or_null<til::Phi>(V->definition());
+  for (til::SExpr *PE : BB->arguments()) {
+    til::Phi *Ph = dyn_cast_or_null<til::Phi>(PE);
     assert(Ph && "Expecting Phi Node.");
     assert(Ph->values()[ArgIndex] == nullptr && "Wrong index for back edge.");
-    assert(V->clangDecl() && "No local variable for Phi node.");
 
-    til::SExpr *E = lookupVarDecl(V->clangDecl());
+    til::SExpr *E = lookupVarDecl(Ph->clangDecl());
     assert(E && "Couldn't find local variable for Phi node.");
-
     Ph->values()[ArgIndex] = E;
   }
 }
@@ -631,7 +837,6 @@
     BB->reserveInstructions(B->size());
     BlockMap[B->getBlockID()] = BB;
   }
-  CallCtx.reset(new SExprBuilder::CallingContext(D));
 
   CurrentBB = lookupBlock(&Cfg->getEntry());
   auto Parms = isa<ObjCMethodDecl>(D) ? cast<ObjCMethodDecl>(D)->parameters()
@@ -691,13 +896,13 @@
   // Push those arguments onto the basic block.
   CurrentBB->arguments().reserve(
     static_cast<unsigned>(CurrentArguments.size()), Arena);
-  for (auto *V : CurrentArguments)
-    CurrentBB->addArgument(V);
+  for (auto *A : CurrentArguments)
+    CurrentBB->addArgument(A);
 }
 
 
 void SExprBuilder::handleStatement(const Stmt *S) {
-  til::SExpr *E = translate(S, CallCtx.get());
+  til::SExpr *E = translate(S, nullptr);
   addStatement(E, S);
 }
 
@@ -726,17 +931,16 @@
     til::BasicBlock *BB = *It ? lookupBlock(*It) : nullptr;
     // TODO: set index
     unsigned Idx = BB ? BB->findPredecessorIndex(CurrentBB) : 0;
-    til::SExpr *Tm = new (Arena) til::Goto(BB, Idx);
+    auto *Tm = new (Arena) til::Goto(BB, Idx);
     CurrentBB->setTerminator(Tm);
   }
   else if (N == 2) {
-    til::SExpr *C = translate(B->getTerminatorCondition(true), CallCtx.get());
+    til::SExpr *C = translate(B->getTerminatorCondition(true), nullptr);
     til::BasicBlock *BB1 = *It ? lookupBlock(*It) : nullptr;
     ++It;
     til::BasicBlock *BB2 = *It ? lookupBlock(*It) : nullptr;
-    unsigned Idx1 = BB1 ? BB1->findPredecessorIndex(CurrentBB) : 0;
-    unsigned Idx2 = BB2 ? BB2->findPredecessorIndex(CurrentBB) : 0;
-    til::SExpr *Tm = new (Arena) til::Branch(C, BB1, BB2, Idx1, Idx2);
+    // FIXME: make sure these arent' critical edges.
+    auto *Tm = new (Arena) til::Branch(C, BB1, BB2);
     CurrentBB->setTerminator(Tm);
   }
 }
@@ -763,10 +967,9 @@
 
 
 void SExprBuilder::exitCFG(const CFGBlock *Last) {
-  for (auto *V : IncompleteArgs) {
-    til::Phi *Ph = dyn_cast<til::Phi>(V->definition());
-    if (Ph && Ph->status() == til::Phi::PH_Incomplete)
-      simplifyIncompleteArg(V, Ph);
+  for (auto *Ph : IncompleteArgs) {
+    if (Ph->status() == til::Phi::PH_Incomplete)
+      simplifyIncompleteArg(Ph);
   }
 
   CurrentArguments.clear();
@@ -775,18 +978,15 @@
 }
 
 
-
-class TILPrinter : public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
-
-
+/*
 void printSCFG(CFGWalker &Walker) {
   llvm::BumpPtrAllocator Bpa;
   til::MemRegionRef Arena(&Bpa);
-  SExprBuilder builder(Arena);
-  til::SCFG *Cfg = builder.buildCFG(Walker);
-  TILPrinter::print(Cfg, llvm::errs());
+  SExprBuilder SxBuilder(Arena);
+  til::SCFG *Scfg = SxBuilder.buildCFG(Walker);
+  TILPrinter::print(Scfg, llvm::errs());
 }
-
+*/
 
 
 } // end namespace threadSafety
diff --git a/lib/Analysis/ThreadSafetyTIL.cpp b/lib/Analysis/ThreadSafetyTIL.cpp
index f67cbb9..ebe374e 100644
--- a/lib/Analysis/ThreadSafetyTIL.cpp
+++ b/lib/Analysis/ThreadSafetyTIL.cpp
@@ -48,12 +48,20 @@
 }
 
 
+SExpr* Future::force() {
+  Status = FS_evaluating;
+  Result = compute();
+  Status = FS_done;
+  return Result;
+}
+
+
 unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
   unsigned Idx = Predecessors.size();
   Predecessors.reserveCheck(1, Arena);
   Predecessors.push_back(Pred);
-  for (Variable *V : Args) {
-    if (Phi* Ph = dyn_cast<Phi>(V->definition())) {
+  for (SExpr *E : Args) {
+    if (Phi* Ph = dyn_cast<Phi>(E)) {
       Ph->values().reserveCheck(1, Arena);
       Ph->values().push_back(nullptr);
     }
@@ -61,93 +69,275 @@
   return Idx;
 }
 
+
 void BasicBlock::reservePredecessors(unsigned NumPreds) {
   Predecessors.reserve(NumPreds, Arena);
-  for (Variable *V : Args) {
-    if (Phi* Ph = dyn_cast<Phi>(V->definition())) {
+  for (SExpr *E : Args) {
+    if (Phi* Ph = dyn_cast<Phi>(E)) {
       Ph->values().reserve(NumPreds, Arena);
     }
   }
 }
 
-void BasicBlock::renumberVars() {
-  unsigned VID = 0;
-  for (Variable *V : Args) {
-    V->setID(BlockID, VID++);
-  }
-  for (Variable *V : Instrs) {
-    V->setID(BlockID, VID++);
-  }
-}
-
-void SCFG::renumberVars() {
-  for (BasicBlock *B : Blocks) {
-    B->renumberVars();
-  }
-}
-
-
-
 
 // If E is a variable, then trace back through any aliases or redundant
 // Phi nodes to find the canonical definition.
-SExpr *getCanonicalVal(SExpr *E) {
-  while (auto *V = dyn_cast<Variable>(E)) {
-    SExpr *D;
-    do {
-      if (V->kind() != Variable::VK_Let)
-        return V;
-      D = V->definition();
-      auto *V2 = dyn_cast<Variable>(D);
-      if (V2)
-        V = V2;
-      else
-        break;
-    } while (true);
-
-    if (ThreadSafetyTIL::isTrivial(D))
-      return D;
-
-    if (Phi *Ph = dyn_cast<Phi>(D)) {
-      if (Ph->status() == Phi::PH_Incomplete)
-        simplifyIncompleteArg(V, Ph);
-
+const SExpr *getCanonicalVal(const SExpr *E) {
+  while (true) {
+    if (auto *V = dyn_cast<Variable>(E)) {
+      if (V->kind() == Variable::VK_Let) {
+        E = V->definition();
+        continue;
+      }
+    }
+    if (const Phi *Ph = dyn_cast<Phi>(E)) {
       if (Ph->status() == Phi::PH_SingleVal) {
         E = Ph->values()[0];
         continue;
       }
     }
-    return V;
+    break;
   }
   return E;
 }
 
 
+// If E is a variable, then trace back through any aliases or redundant
+// Phi nodes to find the canonical definition.
+// The non-const version will simplify incomplete Phi nodes.
+SExpr *simplifyToCanonicalVal(SExpr *E) {
+  while (true) {
+    if (auto *V = dyn_cast<Variable>(E)) {
+      if (V->kind() != Variable::VK_Let)
+        return V;
+      // Eliminate redundant variables, e.g. x = y, or x = 5,
+      // but keep anything more complicated.
+      if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
+        E = V->definition();
+        continue;
+      }
+      return V;
+    }
+    if (auto *Ph = dyn_cast<Phi>(E)) {
+      if (Ph->status() == Phi::PH_Incomplete)
+        simplifyIncompleteArg(Ph);
+      // Eliminate redundant Phi nodes.
+      if (Ph->status() == Phi::PH_SingleVal) {
+        E = Ph->values()[0];
+        continue;
+      }
+    }
+    return E;
+  }
+}
+
+
 // Trace the arguments of an incomplete Phi node to see if they have the same
 // canonical definition.  If so, mark the Phi node as redundant.
 // getCanonicalVal() will recursively call simplifyIncompletePhi().
-void simplifyIncompleteArg(Variable *V, til::Phi *Ph) {
+void simplifyIncompleteArg(til::Phi *Ph) {
   assert(Ph && Ph->status() == Phi::PH_Incomplete);
 
   // eliminate infinite recursion -- assume that this node is not redundant.
   Ph->setStatus(Phi::PH_MultiVal);
 
-  SExpr *E0 = getCanonicalVal(Ph->values()[0]);
+  SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
   for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
-    SExpr *Ei = getCanonicalVal(Ph->values()[i]);
-    if (Ei == V)
+    SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
+    if (Ei == Ph)
       continue;  // Recursive reference to itself.  Don't count.
     if (Ei != E0) {
       return;    // Status is already set to MultiVal.
     }
   }
   Ph->setStatus(Phi::PH_SingleVal);
-  // Eliminate Redundant Phi node.
-  V->setDefinition(Ph->values()[0]);
 }
 
 
+// Renumbers the arguments and instructions to have unique, sequential IDs.
+int BasicBlock::renumberInstrs(int ID) {
+  for (auto *Arg : Args)
+    Arg->setID(this, ID++);
+  for (auto *Instr : Instrs)
+    Instr->setID(this, ID++);
+  TermInstr->setID(this, ID++);
+  return ID;
+}
+
+// Sorts the CFGs blocks using a reverse post-order depth-first traversal.
+// Each block will be written into the Blocks array in order, and its BlockID
+// will be set to the index in the array.  Sorting should start from the entry
+// block, and ID should be the total number of blocks.
+int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+  if (Visited) return ID;
+  Visited = true;
+  for (auto *Block : successors())
+    ID = Block->topologicalSort(Blocks, ID);
+  // set ID and update block array in place.
+  // We may lose pointers to unreachable blocks.
+  assert(ID > 0);
+  BlockID = --ID;
+  Blocks[BlockID] = this;
+  return ID;
+}
+
+// Performs a reverse topological traversal, starting from the exit block and
+// following back-edges.  The dominator is serialized before any predecessors,
+// which guarantees that all blocks are serialized after their dominator and
+// before their post-dominator (because it's a reverse topological traversal).
+// ID should be initially set to 0.
+//
+// This sort assumes that (1) dominators have been computed, (2) there are no
+// critical edges, and (3) the entry block is reachable from the exit block
+// and no blocks are accessable via traversal of back-edges from the exit that
+// weren't accessable via forward edges from the entry.
+int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+  // Visited is assumed to have been set by the topologicalSort.  This pass
+  // assumes !Visited means that we've visited this node before.
+  if (!Visited) return ID;
+  Visited = false;
+  if (DominatorNode.Parent)
+    ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
+  for (auto *Pred : Predecessors)
+    ID = Pred->topologicalFinalSort(Blocks, ID);
+  assert(static_cast<size_t>(ID) < Blocks.size());
+  BlockID = ID++;
+  Blocks[BlockID] = this;
+  return ID;
+}
+
+// Computes the immediate dominator of the current block.  Assumes that all of
+// its predecessors have already computed their dominators.  This is achieved
+// by visiting the nodes in topological order.
+void BasicBlock::computeDominator() {
+  BasicBlock *Candidate = nullptr;
+  // Walk backwards from each predecessor to find the common dominator node.
+  for (auto *Pred : Predecessors) {
+    // Skip back-edges
+    if (Pred->BlockID >= BlockID) continue;
+    // If we don't yet have a candidate for dominator yet, take this one.
+    if (Candidate == nullptr) {
+      Candidate = Pred;
+      continue;
+    }
+    // Walk the alternate and current candidate back to find a common ancestor.
+    auto *Alternate = Pred;
+    while (Alternate != Candidate) {
+      if (Candidate->BlockID > Alternate->BlockID)
+        Candidate = Candidate->DominatorNode.Parent;
+      else
+        Alternate = Alternate->DominatorNode.Parent;
+    }
+  }
+  DominatorNode.Parent = Candidate;
+  DominatorNode.SizeOfSubTree = 1;
+}
+
+// Computes the immediate post-dominator of the current block.  Assumes that all
+// of its successors have already computed their post-dominators.  This is
+// achieved visiting the nodes in reverse topological order.
+void BasicBlock::computePostDominator() {
+  BasicBlock *Candidate = nullptr;
+  // Walk back from each predecessor to find the common post-dominator node.
+  for (auto *Succ : successors()) {
+    // Skip back-edges
+    if (Succ->BlockID <= BlockID) continue;
+    // If we don't yet have a candidate for post-dominator yet, take this one.
+    if (Candidate == nullptr) {
+      Candidate = Succ;
+      continue;
+    }
+    // Walk the alternate and current candidate back to find a common ancestor.
+    auto *Alternate = Succ;
+    while (Alternate != Candidate) {
+      if (Candidate->BlockID < Alternate->BlockID)
+        Candidate = Candidate->PostDominatorNode.Parent;
+      else
+        Alternate = Alternate->PostDominatorNode.Parent;
+    }
+  }
+  PostDominatorNode.Parent = Candidate;
+  PostDominatorNode.SizeOfSubTree = 1;
+}
+
+
+// Renumber instructions in all blocks
+void SCFG::renumberInstrs() {
+  int InstrID = 0;
+  for (auto *Block : Blocks)
+    InstrID = Block->renumberInstrs(InstrID);
+}
+
+
+static inline void computeNodeSize(BasicBlock *B,
+                                   BasicBlock::TopologyNode BasicBlock::*TN) {
+  BasicBlock::TopologyNode *N = &(B->*TN);
+  if (N->Parent) {
+    BasicBlock::TopologyNode *P = &(N->Parent->*TN);
+    // Initially set ID relative to the (as yet uncomputed) parent ID
+    N->NodeID = P->SizeOfSubTree;
+    P->SizeOfSubTree += N->SizeOfSubTree;
+  }
+}
+
+static inline void computeNodeID(BasicBlock *B,
+                                 BasicBlock::TopologyNode BasicBlock::*TN) {
+  BasicBlock::TopologyNode *N = &(B->*TN);
+  if (N->Parent) {
+    BasicBlock::TopologyNode *P = &(N->Parent->*TN);
+    N->NodeID += P->NodeID;    // Fix NodeIDs relative to starting node.
+  }
+}
+
+
+// Normalizes a CFG.  Normalization has a few major components:
+// 1) Removing unreachable blocks.
+// 2) Computing dominators and post-dominators
+// 3) Topologically sorting the blocks into the "Blocks" array.
+void SCFG::computeNormalForm() {
+  // Topologically sort the blocks starting from the entry block.
+  int NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
+  if (NumUnreachableBlocks > 0) {
+    // If there were unreachable blocks shift everything down, and delete them.
+    for (size_t I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
+      size_t NI = I - NumUnreachableBlocks;
+      Blocks[NI] = Blocks[I];
+      Blocks[NI]->BlockID = NI;
+      // FIXME: clean up predecessor pointers to unreachable blocks?
+    }
+    Blocks.drop(NumUnreachableBlocks);
+  }
+
+  // Compute dominators.
+  for (auto *Block : Blocks)
+    Block->computeDominator();
+
+  // Once dominators have been computed, the final sort may be performed.
+  int NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
+  assert(static_cast<size_t>(NumBlocks) == Blocks.size());
+  (void) NumBlocks;
+
+  // Renumber the instructions now that we have a final sort.
+  renumberInstrs();
+
+  // Compute post-dominators and compute the sizes of each node in the
+  // dominator tree.
+  for (auto *Block : Blocks.reverse()) {
+    Block->computePostDominator();
+    computeNodeSize(Block, &BasicBlock::DominatorNode);
+  }
+  // Compute the sizes of each node in the post-dominator tree and assign IDs in
+  // the dominator tree.
+  for (auto *Block : Blocks) {
+    computeNodeID(Block, &BasicBlock::DominatorNode);
+    computeNodeSize(Block, &BasicBlock::PostDominatorNode);
+  }
+  // Assign IDs in the post-dominator tree.
+  for (auto *Block : Blocks.reverse()) {
+    computeNodeID(Block, &BasicBlock::PostDominatorNode);
+  }
+}
+
 }  // end namespace til
 }  // end namespace threadSafety
 }  // end namespace clang
-
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index f5c786a..94f59f1 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -360,13 +360,28 @@
   // The result of a ?: could also be an lvalue.
   E = E->IgnoreParens();
   if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
-    const Expr *TrueExpr = CO->getTrueExpr();
-    if (!isa<OpaqueValueExpr>(TrueExpr))
-      classify(TrueExpr, C);
+    classify(CO->getTrueExpr(), C);
     classify(CO->getFalseExpr(), C);
     return;
   }
 
+  if (const BinaryConditionalOperator *BCO =
+          dyn_cast<BinaryConditionalOperator>(E)) {
+    classify(BCO->getFalseExpr(), C);
+    return;
+  }
+
+  if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+    classify(OVE->getSourceExpr(), C);
+    return;
+  }
+
+  if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+    if (BO->getOpcode() == BO_Comma)
+      classify(BO->getRHS(), C);
+    return;
+  }
+
   FindVarResult Var = findVar(E, DC);
   if (const DeclRefExpr *DRE = Var.getDeclRefExpr())
     Classification[DRE] = std::max(Classification[DRE], C);
@@ -401,6 +416,16 @@
 }
 
 void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
+  // Classify arguments to std::move as used.
+  if (CE->getNumArgs() == 1) {
+    if (FunctionDecl *FD = CE->getDirectCallee()) {
+      if (FD->getIdentifier() && FD->getIdentifier()->isStr("move")) {
+        classify(CE->getArg(0), Use);
+        return;
+      }
+    }
+  }
+
   // If a value is passed by const reference to a function, we should not assume
   // that it is initialized by the call, and we conservatively do not assume
   // that it is used.